LLVM 23.0.0git
NVPTXISelLowering.cpp
Go to the documentation of this file.
1//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that NVPTX uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "NVPTXISelLowering.h"
16#include "NVPTX.h"
17#include "NVPTXISelDAGToDAG.h"
19#include "NVPTXSubtarget.h"
20#include "NVPTXTargetMachine.h"
22#include "NVPTXUtilities.h"
23#include "NVVMProperties.h"
24#include "llvm/ADT/APFloat.h"
25#include "llvm/ADT/APInt.h"
26#include "llvm/ADT/STLExtras.h"
28#include "llvm/ADT/StringRef.h"
41#include "llvm/IR/Argument.h"
42#include "llvm/IR/Attributes.h"
43#include "llvm/IR/Constants.h"
44#include "llvm/IR/DataLayout.h"
47#include "llvm/IR/FPEnv.h"
48#include "llvm/IR/Function.h"
49#include "llvm/IR/GlobalValue.h"
50#include "llvm/IR/IRBuilder.h"
51#include "llvm/IR/Instruction.h"
53#include "llvm/IR/IntrinsicsNVPTX.h"
54#include "llvm/IR/Module.h"
55#include "llvm/IR/Type.h"
56#include "llvm/IR/Value.h"
68#include <algorithm>
69#include <cassert>
70#include <cmath>
71#include <cstdint>
72#include <iterator>
73#include <optional>
74#include <string>
75#include <tuple>
76#include <utility>
77#include <vector>
78
79#define DEBUG_TYPE "nvptx-lower"
80
81using namespace llvm;
82
84 "nvptx-sched4reg",
85 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
86
88 "nvptx-fma-level", cl::Hidden,
89 cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
90 " 1: do it 2: do it aggressively"),
91 cl::init(2));
92
94 "nvptx-prec-divf32", cl::Hidden,
96 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),
98 clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"),
99 clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"),
101 "Use IEEE Compliant F32 div.rnd if available (default)"),
103 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),
105
107 "nvptx-prec-sqrtf32", cl::Hidden,
108 cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
109 cl::init(true));
110
111/// Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it
112/// does NOT use lg2.approx for log2, so this is disabled by default.
114 "nvptx-approx-log2f32",
115 cl::desc("NVPTX Specific: whether to use lg2.approx for log2"),
116 cl::init(false));
117
120 const SDNode &N) const {
121 // If nvptx-prec-div32=N is used on the command-line, always honor it
122 if (UsePrecDivF32.getNumOccurrences() > 0)
123 return UsePrecDivF32;
124
125 const SDNodeFlags Flags = N.getFlags();
126 if (Flags.hasApproximateFuncs())
128
130}
131
133 // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
134 if (UsePrecSqrtF32.getNumOccurrences() > 0)
135 return UsePrecSqrtF32;
136
137 if (N) {
138 const SDNodeFlags Flags = N->getFlags();
139 if (Flags.hasApproximateFuncs())
140 return false;
141 }
142
143 return true;
144}
145
150
151static bool IsPTXVectorType(MVT VT) {
152 switch (VT.SimpleTy) {
153 default:
154 return false;
155 case MVT::v2i1:
156 case MVT::v4i1:
157 case MVT::v2i8:
158 case MVT::v4i8:
159 case MVT::v8i8: // <2 x i8x4>
160 case MVT::v16i8: // <4 x i8x4>
161 case MVT::v2i16:
162 case MVT::v4i16:
163 case MVT::v8i16: // <4 x i16x2>
164 case MVT::v2i32:
165 case MVT::v4i32:
166 case MVT::v2i64:
167 case MVT::v2f16:
168 case MVT::v4f16:
169 case MVT::v8f16: // <4 x f16x2>
170 case MVT::v2bf16:
171 case MVT::v4bf16:
172 case MVT::v8bf16: // <4 x bf16x2>
173 case MVT::v2f32:
174 case MVT::v4f32:
175 case MVT::v2f64:
176 case MVT::v4i64:
177 case MVT::v4f64:
178 case MVT::v8i32:
179 case MVT::v8f32:
180 case MVT::v16f16: // <8 x f16x2>
181 case MVT::v16bf16: // <8 x bf16x2>
182 case MVT::v16i16: // <8 x i16x2>
183 case MVT::v32i8: // <8 x i8x4>
184 return true;
185 }
186}
187
188// When legalizing vector loads/stores, this function is called, which does two
189// things:
190// 1. Determines Whether the vector is something we want to custom lower,
191// std::nullopt is returned if we do not want to custom lower it.
192// 2. If we do want to handle it, returns two parameters:
193// - unsigned int NumElts - The number of elements in the final vector
194// - EVT EltVT - The type of the elements in the final vector
195static std::optional<std::pair<unsigned int, MVT>>
197 unsigned AddressSpace) {
198 const bool CanLowerTo256Bit = STI.has256BitVectorLoadStore(AddressSpace);
199
200 if (CanLowerTo256Bit && VectorEVT.isScalarInteger() &&
201 VectorEVT.getSizeInBits() == 256)
202 return {{4, MVT::i64}};
203
204 if (!VectorEVT.isSimple())
205 return std::nullopt;
206 const MVT VectorVT = VectorEVT.getSimpleVT();
207
208 if (!VectorVT.isVector()) {
209 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)
210 return {{2, MVT::i64}};
211 return std::nullopt;
212 }
213
214 const MVT EltVT = VectorVT.getVectorElementType();
215 const unsigned NumElts = VectorVT.getVectorNumElements();
216
217 // The size of the PTX virtual register that holds a packed type.
218 unsigned PackRegSize;
219
220 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
221 // legal. We can (and should) split that into 2 stores of <2 x double> here
222 // but I'm leaving that as a TODO for now.
223 switch (VectorVT.SimpleTy) {
224 default:
225 return std::nullopt;
226
227 case MVT::v4i64:
228 case MVT::v4f64:
229 // This is a "native" vector type iff the address space is global and the
230 // target supports 256-bit loads/stores
231 if (!CanLowerTo256Bit)
232 return std::nullopt;
233 [[fallthrough]];
234 case MVT::v2i8:
235 case MVT::v2i64:
236 case MVT::v2f64:
237 // This is a "native" vector type
238 return std::pair(NumElts, EltVT);
239
240 case MVT::v16f16: // <8 x f16x2>
241 case MVT::v16bf16: // <8 x bf16x2>
242 case MVT::v16i16: // <8 x i16x2>
243 case MVT::v32i8: // <8 x i8x4>
244 // This can be upsized into a "native" vector type iff the address space is
245 // global and the target supports 256-bit loads/stores.
246 if (!CanLowerTo256Bit)
247 return std::nullopt;
248 [[fallthrough]];
249 case MVT::v2i16: // <1 x i16x2>
250 case MVT::v2f16: // <1 x f16x2>
251 case MVT::v2bf16: // <1 x bf16x2>
252 case MVT::v4i8: // <1 x i8x4>
253 case MVT::v4i16: // <2 x i16x2>
254 case MVT::v4f16: // <2 x f16x2>
255 case MVT::v4bf16: // <2 x bf16x2>
256 case MVT::v8i8: // <2 x i8x4>
257 case MVT::v8f16: // <4 x f16x2>
258 case MVT::v8bf16: // <4 x bf16x2>
259 case MVT::v8i16: // <4 x i16x2>
260 case MVT::v16i8: // <4 x i8x4>
261 PackRegSize = 32;
262 break;
263
264 case MVT::v8f32: // <4 x f32x2>
265 case MVT::v8i32: // <4 x i32x2>
266 // This is a "native" vector type iff the address space is global and the
267 // target supports 256-bit loads/stores
268 if (!CanLowerTo256Bit)
269 return std::nullopt;
270 [[fallthrough]];
271 case MVT::v2f32: // <1 x f32x2>
272 case MVT::v4f32: // <2 x f32x2>
273 case MVT::v2i32: // <1 x i32x2>
274 case MVT::v4i32: // <2 x i32x2>
275 if (!STI.hasF32x2Instructions())
276 return std::pair(NumElts, EltVT);
277 PackRegSize = 64;
278 break;
279 }
280
281 // If we reach here, then we can pack 2 or more elements into a single 32-bit
282 // or 64-bit PTX register and treat the vector as a new vector containing
283 // packed elements.
284
285 // Number of elements to pack in one word.
286 const unsigned NPerReg = PackRegSize / EltVT.getSizeInBits();
287
288 return std::pair(NumElts / NPerReg, MVT::getVectorVT(EltVT, NPerReg));
289}
290
291/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
292/// legal-ish MVTs that compose it. Unlike ComputeValueVTs, this will legalize
293/// the types as required by the calling convention (with special handling for
294/// i8s).
295/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
296/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
297/// LowerCall, and LowerReturn.
298static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
299 LLVMContext &Ctx, CallingConv::ID CallConv,
300 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
302 uint64_t StartingOffset = 0) {
303 SmallVector<EVT, 16> TempVTs;
304 SmallVector<uint64_t, 16> TempOffsets;
305 ComputeValueVTs(TLI, DL, Ty, TempVTs, /*MemVTs=*/nullptr, &TempOffsets,
306 StartingOffset);
307
308 for (const auto [VT, Off] : zip(TempVTs, TempOffsets)) {
309 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT);
310 unsigned NumRegs = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT);
311
312 // Since we actually can load/store b8, we need to ensure that we'll use
313 // the original sized type for any i8s or i8 vectors.
314 if (VT.getScalarType() == MVT::i8) {
315 if (RegisterVT == MVT::i16)
316 RegisterVT = MVT::i8;
317 else if (RegisterVT == MVT::v2i16)
318 RegisterVT = MVT::v2i8;
319 else
320 assert(RegisterVT == MVT::v4i8 &&
321 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");
322 }
323
324 // TODO: This is horribly incorrect for cases where the vector elements are
325 // not a multiple of bytes (ex i1) and legal or i8. However, this problem
326 // has existed for as long as NVPTX has and no one has complained, so we'll
327 // leave it for now.
328 for (unsigned I : seq(NumRegs)) {
329 ValueVTs.push_back(RegisterVT);
330 Offsets.push_back(Off + I * RegisterVT.getStoreSize());
331 }
332 }
333}
334
335// We return an EVT that can hold N VTs
336// If the VT is a vector, the resulting EVT is a flat vector with the same
337// element type as VT's element type.
338static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C) {
339 if (N == 1)
340 return VT;
341
342 return VT.isVector() ? EVT::getVectorVT(C, VT.getScalarType(),
343 VT.getVectorNumElements() * N)
344 : EVT::getVectorVT(C, VT, N);
345}
346
348 const SDLoc &dl, SelectionDAG &DAG) {
349 if (V.getValueType() == VT) {
350 assert(I == 0 && "Index must be 0 for scalar value");
351 return V;
352 }
353
354 if (!VT.isVector())
355 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, V,
356 DAG.getVectorIdxConstant(I, dl));
357
358 return DAG.getNode(
359 ISD::EXTRACT_SUBVECTOR, dl, VT, V,
361}
362
363template <typename T>
364static inline SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl,
365 SelectionDAG &DAG, T GetElement) {
366 if (N == 1)
367 return GetElement(0);
368
370 for (const unsigned I : llvm::seq(N)) {
371 SDValue Val = GetElement(I);
372 if (Val.getValueType().isVector())
373 DAG.ExtractVectorElements(Val, Values);
374 else
375 Values.push_back(Val);
376 }
377
378 EVT VT = EVT::getVectorVT(*DAG.getContext(), Values[0].getValueType(),
379 Values.size());
380 return DAG.getBuildVector(VT, dl, Values);
381}
382
383/// PromoteScalarIntegerPTX
384/// Used to make sure the arguments/returns are suitable for passing
385/// and promote them to a larger size if they're not.
386///
387/// The promoted type is placed in \p PromoteVT if the function returns true.
389 if (VT.isScalarInteger()) {
390 switch (PowerOf2Ceil(VT.getFixedSizeInBits())) {
391 default:
393 "Promotion is not suitable for scalars of size larger than 64-bits");
394 case 1:
395 return MVT::i1;
396 case 2:
397 case 4:
398 case 8:
399 return MVT::i8;
400 case 16:
401 return MVT::i16;
402 case 32:
403 return MVT::i32;
404 case 64:
405 return MVT::i64;
406 }
407 }
408 return VT;
409}
410
411// Check whether we can merge loads/stores of some of the pieces of a
412// flattened function parameter or return value into a single vector
413// load/store.
414//
415// The flattened parameter is represented as a list of EVTs and
416// offsets, and the whole structure is aligned to ParamAlignment. This
417// function determines whether we can load/store pieces of the
418// parameter starting at index Idx using a single vectorized op of
419// size AccessSize. If so, it returns the number of param pieces
420// covered by the vector op. Otherwise, it returns 1.
421template <typename T>
423 unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
424 const SmallVectorImpl<T> &Offsets, Align ParamAlignment) {
425
426 // Can't vectorize if param alignment is not sufficient.
427 if (ParamAlignment < AccessSize)
428 return 1;
429 // Can't vectorize if offset is not aligned.
430 if (Offsets[Idx] & (AccessSize - 1))
431 return 1;
432
433 EVT EltVT = ValueVTs[Idx];
434 unsigned EltSize = EltVT.getStoreSize();
435
436 // Element is too large to vectorize.
437 if (EltSize >= AccessSize)
438 return 1;
439
440 unsigned NumElts = AccessSize / EltSize;
441 // Can't vectorize if AccessBytes if not a multiple of EltSize.
442 if (AccessSize != EltSize * NumElts)
443 return 1;
444
445 // We don't have enough elements to vectorize.
446 if (Idx + NumElts > ValueVTs.size())
447 return 1;
448
449 // PTX ISA can only deal with 2- and 4-element vector ops.
450 if (NumElts != 4 && NumElts != 2)
451 return 1;
452
453 for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
454 // Types do not match.
455 if (ValueVTs[j] != EltVT)
456 return 1;
457
458 // Elements are not contiguous.
459 if (Offsets[j] - Offsets[j - 1] != EltSize)
460 return 1;
461 }
462 // OK. We can vectorize ValueVTs[i..i+NumElts)
463 return NumElts;
464}
465
466// Computes whether and how we can vectorize the loads/stores of a
467// flattened function parameter or return value.
468//
469// The flattened parameter is represented as the list of ValueVTs and
470// Offsets, and is aligned to ParamAlignment bytes. We return a vector
471// of the same size as ValueVTs indicating how each piece should be
472// loaded/stored (i.e. as a scalar, or as part of a vector
473// load/store).
474template <typename T>
477 const SmallVectorImpl<T> &Offsets, Align ParamAlignment,
478 bool IsVAArg = false) {
479 // Set vector size to match ValueVTs and mark all elements as
480 // scalars by default.
481
482 if (IsVAArg)
483 return SmallVector<unsigned>(ValueVTs.size(), 1);
484
485 SmallVector<unsigned, 16> VectorInfo;
486
487 const auto GetNumElts = [&](unsigned I) -> unsigned {
488 for (const unsigned AccessSize : {16, 8, 4, 2}) {
489 const unsigned NumElts = canMergeParamLoadStoresStartingAt(
490 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
491 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&
492 "Unexpected vectorization size");
493 if (NumElts != 1)
494 return NumElts;
495 }
496 return 1;
497 };
498
499 // Check what we can vectorize using 128/64/32-bit accesses.
500 for (unsigned I = 0, E = ValueVTs.size(); I != E;) {
501 const unsigned NumElts = GetNumElts(I);
502 VectorInfo.push_back(NumElts);
503 I += NumElts;
504 }
505 assert(std::accumulate(VectorInfo.begin(), VectorInfo.end(), 0u) ==
506 ValueVTs.size());
507 return VectorInfo;
508}
509
510// NVPTXTargetLowering Constructor.
512 const NVPTXSubtarget &STI)
513 : TargetLowering(TM, STI), nvTM(&TM), STI(STI), GlobalUniqueCallSite(0) {
514 // always lower memset, memcpy, and memmove intrinsics to load/store
515 // instructions, rather
516 // then generating calls to memset, mempcy or memmove.
520
523
524 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
525 // condition branches.
526 setJumpIsExpensive(true);
527
528 // Wide divides are _very_ slow. Try to reduce the width of the divide if
529 // possible.
530 addBypassSlowDiv(64, 32);
531
532 // By default, use the Source scheduling
533 if (sched4reg)
535 else
537
538 auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
539 LegalizeAction NoF16Action) {
540 bool IsOpSupported = STI.allowFP16Math();
541 switch (Op) {
542 // Several FP16 instructions are available on sm_80 only.
543 case ISD::FMINNUM:
544 case ISD::FMAXNUM:
547 case ISD::FMAXIMUM:
548 case ISD::FMINIMUM:
549 case ISD::FMAXIMUMNUM:
550 case ISD::FMINIMUMNUM:
551 IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
552 break;
553 case ISD::FEXP2:
554 IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70;
555 break;
556 }
557 setOperationAction(Op, VT, IsOpSupported ? Action : NoF16Action);
558 };
559
560 auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
561 LegalizeAction NoBF16Action) {
562 bool IsOpSupported = STI.hasNativeBF16Support(Op);
564 Op, VT, IsOpSupported ? Action : NoBF16Action);
565 };
566
567 auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
568 LegalizeAction NoI16x2Action) {
569 bool IsOpSupported = false;
570 // instructions are available on sm_90 only
571 switch (Op) {
572 case ISD::ADD:
573 case ISD::SMAX:
574 case ISD::SMIN:
575 case ISD::UMIN:
576 case ISD::UMAX:
577 IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
578 break;
579 }
580 setOperationAction(Op, VT, IsOpSupported ? Action : NoI16x2Action);
581 };
582
583 addRegisterClass(MVT::i1, &NVPTX::B1RegClass);
584 addRegisterClass(MVT::i16, &NVPTX::B16RegClass);
585 addRegisterClass(MVT::v2i16, &NVPTX::B32RegClass);
586 addRegisterClass(MVT::v4i8, &NVPTX::B32RegClass);
587 addRegisterClass(MVT::i32, &NVPTX::B32RegClass);
588 addRegisterClass(MVT::i64, &NVPTX::B64RegClass);
589 addRegisterClass(MVT::f32, &NVPTX::B32RegClass);
590 addRegisterClass(MVT::f64, &NVPTX::B64RegClass);
591 addRegisterClass(MVT::f16, &NVPTX::B16RegClass);
592 addRegisterClass(MVT::v2f16, &NVPTX::B32RegClass);
593 addRegisterClass(MVT::bf16, &NVPTX::B16RegClass);
594 addRegisterClass(MVT::v2bf16, &NVPTX::B32RegClass);
595
596 if (STI.hasF32x2Instructions()) {
597 addRegisterClass(MVT::v2f32, &NVPTX::B64RegClass);
598 addRegisterClass(MVT::v2i32, &NVPTX::B64RegClass);
599 }
600
601 // Conversion to/from FP16/FP16x2 is always legal.
606
608 if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
610
611 setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
612 setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
613
614 // Conversion to/from BFP16/BFP16x2 is always legal.
619
620 setBF16OperationAction(ISD::SETCC, MVT::v2bf16, Legal, Expand);
621 setBF16OperationAction(ISD::SETCC, MVT::bf16, Legal, Promote);
622 if (getOperationAction(ISD::SETCC, MVT::bf16) == Promote)
623 AddPromotedToType(ISD::SETCC, MVT::bf16, MVT::f32);
624
625 // Conversion to/from i16/i16x2 is always legal.
630
635
636 // No support for these operations with v2f32/v2i32
637 setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand);
638 setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand);
639
642 MVT::v2i32, Expand);
643
644 // Need custom lowering in case the index is dynamic.
645 if (STI.hasF32x2Instructions())
646 setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32},
647 Custom);
648
649 // Custom conversions to/from v2i8.
651
652 // Only logical ops can be done on v4i8/v2i32 directly, others must be done
653 // elementwise.
670 {MVT::v4i8, MVT::v2i32}, Expand);
671
672 // Operations not directly supported by NVPTX.
673 for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
674 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
675 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {
678 }
679
680 // We don't want ops like FMINIMUM or UMAX to be lowered to SETCC+VSELECT.
681 setOperationAction(ISD::VSELECT, {MVT::v2f32, MVT::v2i32}, Expand);
682
683 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
684 // For others we will expand to a SHL/SRA pair.
690 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i16, MVT::v2i32}, Expand);
691
698
701
703 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
704 Expand);
705
706 if (STI.hasHWROT32()) {
709 Custom);
710 }
711
712 setOperationAction(ISD::BR_JT, MVT::Other, STI.hasBrx() ? Legal : Expand);
714
715 // We want to legalize constant related memmove and memcopy
716 // intrinsics.
718
719 // FP extload/truncstore is not legal in PTX. We need to expand all these.
720 for (auto FloatVTs :
722 for (MVT ValVT : FloatVTs) {
723 for (MVT MemVT : FloatVTs) {
724 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Expand);
725 setTruncStoreAction(ValVT, MemVT, Expand);
726 }
727 }
728 }
729
730 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
731 // how they'll be lowered in ISel anyway, and by doing this a little earlier
732 // we allow for more DAG combine opportunities.
733 for (auto IntVTs :
735 for (MVT ValVT : IntVTs)
736 for (MVT MemVT : IntVTs)
737 if (isTypeLegal(ValVT))
738 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Custom);
739
740 // PTX does not support load / store predicate registers
742 for (MVT VT : MVT::integer_valuetypes()) {
744 Promote);
745 setTruncStoreAction(VT, MVT::i1, Expand);
746 }
747
748 // Disable generations of extload/truncstore for v2i32/v2i16/v2i8. The generic
749 // expansion for these nodes when they are unaligned is incorrect if the
750 // type is a vector.
751 //
752 // TODO: Fix the generic expansion for these nodes found in
753 // TargetLowering::expandUnalignedLoad/Store.
755 MVT::v2i8, Expand);
757 {MVT::v2i8, MVT::v2i16}, Expand);
758 setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand);
759 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
760 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
761
762 // Register custom handling for illegal type loads/stores. We'll try to custom
763 // lower almost all illegal types and logic in the lowering will discard cases
764 // we can't handle.
765 setOperationAction({ISD::LOAD, ISD::STORE}, {MVT::i128, MVT::i256, MVT::f128},
766 Custom);
768 if (!isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)
770 Custom);
771
772 // Custom legalization for LDU intrinsics.
773 // TODO: The logic to lower these is not very robust and we should rewrite it.
774 // Perhaps LDU should not be represented as an intrinsic at all.
777 if (IsPTXVectorType(VT))
779
783 MVT::i1, Expand);
784
785 // This is legal in NVPTX
790
791 setOperationAction(ISD::DYNAMIC_STACKALLOC, {MVT::i32, MVT::i64}, Custom);
793
794 // TRAP can be lowered to PTX trap
795 setOperationAction(ISD::TRAP, MVT::Other, Legal);
796 // DEBUGTRAP can be lowered to PTX brkpt
798
799 // Support varargs.
804
806 {MVT::i16, MVT::i32, MVT::i64}, Legal);
807
809 Promote);
812
813 setI16x2OperationAction(ISD::ABS, MVT::v2i16, Legal, Custom);
814 setI16x2OperationAction(ISD::SMIN, MVT::v2i16, Legal, Custom);
815 setI16x2OperationAction(ISD::SMAX, MVT::v2i16, Legal, Custom);
816 setI16x2OperationAction(ISD::UMIN, MVT::v2i16, Legal, Custom);
817 setI16x2OperationAction(ISD::UMAX, MVT::v2i16, Legal, Custom);
818 setI16x2OperationAction(ISD::CTPOP, MVT::v2i16, Legal, Expand);
819 setI16x2OperationAction(ISD::CTLZ, MVT::v2i16, Legal, Expand);
820
821 setI16x2OperationAction(ISD::ADD, MVT::v2i16, Legal, Custom);
822 setI16x2OperationAction(ISD::SUB, MVT::v2i16, Legal, Custom);
823 setI16x2OperationAction(ISD::MUL, MVT::v2i16, Legal, Custom);
824 setI16x2OperationAction(ISD::SHL, MVT::v2i16, Legal, Custom);
825 setI16x2OperationAction(ISD::SREM, MVT::v2i16, Legal, Custom);
826 setI16x2OperationAction(ISD::UREM, MVT::v2i16, Legal, Custom);
827
828 // Other arithmetic and logic ops are unsupported.
832 {MVT::v2i16, MVT::v2i32}, Expand);
833
834 // v2i32 is not supported for any arithmetic operations
839 MVT::v2i32, Expand);
840
845 if (STI.getPTXVersion() >= 43) {
850 }
851
853 setOperationAction(ISD::CTTZ, {MVT::v2i16, MVT::v2i32}, Expand);
856
857 // PTX does not directly support SELP of i1, so promote to i32 first
859
860 // PTX cannot multiply two i64s in a single instruction.
863
864 // We have some custom DAG combine patterns for these nodes
866 ISD::AND,
868 ISD::FADD,
875 ISD::MUL,
877 ISD::SHL,
878 ISD::SREM,
879 ISD::UREM,
883 ISD::LOAD,
888
889 // If the vector operands require register coalescing, scalarize instead
890 if (STI.hasF32x2Instructions())
892
893 // setcc for f16x2 and bf16x2 needs special handling to prevent
894 // legalizer's attempt to scalarize it due to v2i1 not being legal.
895 if (STI.allowFP16Math() || STI.hasBF16Math())
897
898 // Vector reduction operations. These may be turned into shuffle or tree
899 // reductions depending on what instructions are available for each type.
901 MVT EltVT = VT.getVectorElementType();
902 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
905 VT, Custom);
906 }
907 }
908
909 // Promote fp16 arithmetic if fp16 hardware isn't available or the
910 // user passed --nvptx-no-fp16-math. The flag is useful because,
911 // although sm_53+ GPUs have some sort of FP16 support in
912 // hardware, only sm_53 and sm_60 have full implementation. Others
913 // only have token amount of hardware and are likely to run faster
914 // by using fp32 units instead.
915 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
916 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
917 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
918 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
919 // bf16 must be promoted to f32.
920 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
921 if (getOperationAction(Op, MVT::bf16) == Promote)
922 AddPromotedToType(Op, MVT::bf16, MVT::f32);
923 setOperationAction(Op, MVT::v2f32,
924 STI.hasF32x2Instructions() ? Legal : Expand);
925 }
926
927 // On SM80, we select add/mul/sub as fma to avoid promotion to float
928 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB}) {
929 for (const auto &VT : {MVT::bf16, MVT::v2bf16}) {
930 if (!STI.hasNativeBF16Support(Op) && STI.hasNativeBF16Support(ISD::FMA)) {
932 }
933 }
934 }
935
936 // f16/f16x2 neg was introduced in PTX 60, SM_53.
937 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&
938 STI.getPTXVersion() >= 60 &&
939 STI.allowFP16Math();
940 for (const auto &VT : {MVT::f16, MVT::v2f16})
942 IsFP16FP16x2NegAvailable ? Legal : Expand);
943
944 setBF16OperationAction(ISD::FNEG, MVT::bf16, Legal, Expand);
945 setBF16OperationAction(ISD::FNEG, MVT::v2bf16, Legal, Expand);
946 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
947 // (would be) Library functions.
948
949 // These map to conversion instructions for scalar FP types.
950 for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
952 setOperationAction(Op, MVT::f16, Legal);
953 setOperationAction(Op, MVT::f32, Legal);
954 setOperationAction(Op, MVT::f64, Legal);
955 setOperationAction(Op, MVT::v2f16, Expand);
956 setOperationAction(Op, MVT::v2bf16, Expand);
957 setOperationAction(Op, MVT::v2f32, Expand);
958 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
959 if (getOperationAction(Op, MVT::bf16) == Promote)
960 AddPromotedToType(Op, MVT::bf16, MVT::f32);
961 }
962
963 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {
965 }
966 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
967 for (MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
970 }
971 }
972
973 // Expand v2f32 = fp_extend
975 // Expand v2[b]f16 = fp_round v2f32
976 setOperationAction(ISD::FP_ROUND, {MVT::v2bf16, MVT::v2f16}, Expand);
977
978 // sm_80 only has conversions between f32 and bf16. Custom lower all other
979 // bf16 conversions.
980 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
981 for (MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
984 VT, Custom);
985 }
988 MVT::bf16, Custom);
989 }
990
997 AddPromotedToType(ISD::FROUND, MVT::bf16, MVT::f32);
998
999 // 'Expand' implements FCOPYSIGN without calling an external library.
1006
1007 // These map to corresponding instructions for f32/f64. f16 must be
1008 // promoted to f32. v2f16 is expanded to f16, which is then promoted
1009 // to f32.
1010 for (const auto &Op :
1012 setOperationAction(Op, MVT::f16, Promote);
1013 setOperationAction(Op, MVT::f32, Legal);
1014 // only div/rem/sqrt are legal for f64
1015 if (Op == ISD::FDIV || Op == ISD::FREM || Op == ISD::FSQRT) {
1016 setOperationAction(Op, MVT::f64, Legal);
1017 }
1018 setOperationAction(Op, {MVT::v2f16, MVT::v2bf16, MVT::v2f32}, Expand);
1019 setOperationAction(Op, MVT::bf16, Promote);
1020 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1021 }
1022 setOperationAction(ISD::FREM, {MVT::f32, MVT::f64}, Custom);
1023
1024 setOperationAction(ISD::FABS, {MVT::f32, MVT::f64}, Legal);
1025 setOperationAction(ISD::FABS, MVT::v2f32, Expand);
1026 if (STI.getPTXVersion() >= 65) {
1027 setFP16OperationAction(ISD::FABS, MVT::f16, Legal, Promote);
1028 setFP16OperationAction(ISD::FABS, MVT::v2f16, Legal, Expand);
1029 } else {
1031 setOperationAction(ISD::FABS, MVT::v2f16, Expand);
1032 }
1033 setBF16OperationAction(ISD::FABS, MVT::v2bf16, Legal, Expand);
1034 setBF16OperationAction(ISD::FABS, MVT::bf16, Legal, Promote);
1035 if (getOperationAction(ISD::FABS, MVT::bf16) == Promote)
1036 AddPromotedToType(ISD::FABS, MVT::bf16, MVT::f32);
1037
1038 for (const auto &Op :
1040 setOperationAction(Op, MVT::f32, Legal);
1041 setOperationAction(Op, MVT::f64, Legal);
1042 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
1043 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1044 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1045 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
1046 if (getOperationAction(Op, MVT::bf16) == Promote)
1047 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1048 setOperationAction(Op, MVT::v2f32, Expand);
1049 }
1050 bool SupportsF32MinMaxNaN =
1051 STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
1052 for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {
1053 setOperationAction(Op, MVT::f32, SupportsF32MinMaxNaN ? Legal : Expand);
1054 setFP16OperationAction(Op, MVT::f16, Legal, Expand);
1055 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1056 setBF16OperationAction(Op, MVT::bf16, Legal, Expand);
1057 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1058 setOperationAction(Op, MVT::v2f32, Expand);
1059 }
1060
1061 // Custom lowering for inline asm with 128-bit operands
1064
1065 // FEXP2 support:
1066 // - f32
1067 // - f16/f16x2 (sm_70+, PTX 7.0+)
1068 // - bf16/bf16x2 (sm_90+, PTX 7.8+)
1069 // When f16/bf16 types aren't supported, they are promoted/expanded to f32.
1071 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
1072 setFP16OperationAction(ISD::FEXP2, MVT::f16, Legal, Promote);
1073 setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand);
1074 setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote);
1075 setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand);
1076
1077 // FLOG2 supports f32 only
1078 // f16/bf16 types aren't supported, but they are promoted/expanded to f32.
1079 if (UseApproxLog2F32) {
1081 setOperationPromotedToType(ISD::FLOG2, MVT::f16, MVT::f32);
1082 setOperationPromotedToType(ISD::FLOG2, MVT::bf16, MVT::f32);
1083 setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32},
1084 Expand);
1085 }
1086
1087 setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom);
1088
1089 setOperationAction(ISD::ATOMIC_LOAD_SUB, {MVT::i32, MVT::i64}, Expand);
1090
1091 // atom.b128 is legal in PTX but since we don't represent i128 as a legal
1092 // type, we need to custom lower it.
1094 Custom);
1095
1096 // Now deduce the information based on the above mentioned
1097 // actions
1098 computeRegisterProperties(STI.getRegisterInfo());
1099
1100 // PTX support for 16-bit CAS is emulated. Only use 32+
1101 setMinCmpXchgSizeInBits(STI.getMinCmpXchgSizeInBits());
1102 setMaxAtomicSizeInBitsSupported(STI.hasAtomSwap128() ? 128 : 64);
1104
1105 // Custom lowering for tcgen05.ld vector operands
1107 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1108 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::v2f32,
1109 MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32,
1110 MVT::v64f32, MVT::v128f32},
1111 Custom);
1112
1113 // Custom lowering for tcgen05.st vector operands
1115 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1116 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other},
1117 Custom);
1118
1119 // Enable custom lowering for the following:
1120 // * MVT::i128 - clusterlaunchcontrol
1121 // * MVT::i32 - prmt
1122 // * MVT::v4f32 - cvt_rs fp{4/6/8}x4 intrinsics
1123 // * MVT::Other - internal.addrspace.wrap
1125 {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other}, Custom);
1126
1127 // Custom lowering for bswap
1128 setOperationAction(ISD::BSWAP, {MVT::i16, MVT::i32, MVT::i64, MVT::v2i16},
1129 Custom);
1130}
1131
1134 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
1135 VT.getScalarType() == MVT::i1)
1136 return TypeSplitVector;
1138}
1139
1141 int Enabled, int &ExtraSteps,
1142 bool &UseOneConst,
1143 bool Reciprocal) const {
1146 return SDValue();
1147
1148 if (ExtraSteps == ReciprocalEstimate::Unspecified)
1149 ExtraSteps = 0;
1150
1151 SDLoc DL(Operand);
1152 EVT VT = Operand.getValueType();
1153 bool Ftz = useF32FTZ(DAG.getMachineFunction());
1154
1155 auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
1156 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
1157 DAG.getConstant(IID, DL, MVT::i32), Operand);
1158 };
1159
1160 // The sqrt and rsqrt refinement processes assume we always start out with an
1161 // approximation of the rsqrt. Therefore, if we're going to do any refinement
1162 // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
1163 // any refinement, we must return a regular sqrt.
1164 if (Reciprocal || ExtraSteps > 0) {
1165 if (VT == MVT::f32)
1166 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1167 : Intrinsic::nvvm_rsqrt_approx_f);
1168 else if (VT == MVT::f64)
1169 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1170 else
1171 return SDValue();
1172 } else {
1173 if (VT == MVT::f32)
1174 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1175 : Intrinsic::nvvm_sqrt_approx_f);
1176 else {
1177 // There's no sqrt.approx.f64 instruction, so we emit
1178 // reciprocal(rsqrt(x)). This is faster than
1179 // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
1180 // x * rsqrt(x).)
1181 return DAG.getNode(
1183 DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
1184 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1185 }
1186 }
1187}
1188
1189static Align getArgumentAlignment(const CallBase *CB, Type *Ty, unsigned Idx,
1190 const DataLayout &DL);
1191
1193 const DataLayout &DL, Type *RetTy, const ArgListTy &Args,
1195 std::optional<unsigned> FirstVAArg, const CallBase &CB,
1196 unsigned UniqueCallSite) const {
1197 auto PtrVT = getPointerTy(DL);
1198
1199 std::string Prototype;
1200 raw_string_ostream O(Prototype);
1201 O << "prototype_" << UniqueCallSite << " : .callprototype ";
1202
1203 if (RetTy->isVoidTy()) {
1204 O << "()";
1205 } else {
1206 O << "(";
1207 if (shouldPassAsArray(RetTy)) {
1208 const Align RetAlign = getArgumentAlignment(&CB, RetTy, 0, DL);
1209 O << ".param .align " << RetAlign.value() << " .b8 _["
1210 << DL.getTypeAllocSize(RetTy) << "]";
1211 } else if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy()) {
1212 unsigned size = 0;
1213 if (auto *ITy = dyn_cast<IntegerType>(RetTy)) {
1214 size = ITy->getBitWidth();
1215 } else {
1216 assert(RetTy->isFloatingPointTy() &&
1217 "Floating point type expected here");
1218 size = RetTy->getPrimitiveSizeInBits();
1219 }
1220 // PTX ABI requires all scalar return values to be at least 32
1221 // bits in size. fp16 normally uses .b16 as its storage type in
1222 // PTX, so its size must be adjusted here, too.
1224
1225 O << ".param .b" << size << " _";
1226 } else if (isa<PointerType>(RetTy)) {
1227 O << ".param .b" << PtrVT.getSizeInBits() << " _";
1228 } else {
1229 llvm_unreachable("Unknown return type");
1230 }
1231 O << ") ";
1232 }
1233 O << "_ (";
1234
1235 bool first = true;
1236
1237 const unsigned NumArgs = FirstVAArg.value_or(Args.size());
1238 auto AllOuts = ArrayRef(Outs);
1239 for (const unsigned I : llvm::seq(NumArgs)) {
1240 const auto ArgOuts =
1241 AllOuts.take_while([I](auto O) { return O.OrigArgIndex == I; });
1242 AllOuts = AllOuts.drop_front(ArgOuts.size());
1243
1244 Type *Ty = Args[I].Ty;
1245 if (!first) {
1246 O << ", ";
1247 }
1248 first = false;
1249
1250 if (ArgOuts[0].Flags.isByVal()) {
1251 // Indirect calls need strict ABI alignment so we disable optimizations by
1252 // not providing a function to optimize.
1253 Type *ETy = Args[I].IndirectType;
1254 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1255 Align ParamByValAlign =
1256 getFunctionByValParamAlign(/*F=*/nullptr, ETy, InitialAlign, DL);
1257
1258 O << ".param .align " << ParamByValAlign.value() << " .b8 _["
1259 << ArgOuts[0].Flags.getByValSize() << "]";
1260 } else {
1261 if (shouldPassAsArray(Ty)) {
1262 Align ParamAlign =
1263 getArgumentAlignment(&CB, Ty, I + AttributeList::FirstArgIndex, DL);
1264 O << ".param .align " << ParamAlign.value() << " .b8 _["
1265 << DL.getTypeAllocSize(Ty) << "]";
1266 continue;
1267 }
1268 // i8 types in IR will be i16 types in SDAG
1269 assert((getValueType(DL, Ty) == ArgOuts[0].VT ||
1270 (getValueType(DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&
1271 "type mismatch between callee prototype and arguments");
1272 // scalar type
1273 unsigned sz = 0;
1274 if (auto *ITy = dyn_cast<IntegerType>(Ty)) {
1275 sz = promoteScalarArgumentSize(ITy->getBitWidth());
1276 } else if (isa<PointerType>(Ty)) {
1277 sz = PtrVT.getSizeInBits();
1278 } else {
1279 sz = Ty->getPrimitiveSizeInBits();
1280 }
1281 O << ".param .b" << sz << " _";
1282 }
1283 }
1284
1285 if (FirstVAArg)
1286 O << (first ? "" : ",") << " .param .align "
1287 << STI.getMaxRequiredAlignment() << " .b8 _[]";
1288 O << ")";
1289 if (shouldEmitPTXNoReturn(&CB, *nvTM))
1290 O << " .noreturn";
1291 O << ";";
1292
1293 return Prototype;
1294}
1295
1296static Align getArgumentAlignment(const CallBase *CB, Type *Ty, unsigned Idx,
1297 const DataLayout &DL) {
1298 if (!CB) {
1299 // CallSite is zero, fallback to ABI type alignment
1300 return DL.getABITypeAlign(Ty);
1301 }
1302
1303 const Function *DirectCallee = CB->getCalledFunction();
1304
1305 if (!DirectCallee) {
1306 // We don't have a direct function symbol, but that may be because of
1307 // constant cast instructions in the call.
1308
1309 // With bitcast'd call targets, the instruction will be the call
1310 if (const auto *CI = dyn_cast<CallInst>(CB)) {
1311 // Check if we have call alignment metadata
1312 if (MaybeAlign StackAlign = getAlign(*CI, Idx))
1313 return StackAlign.value();
1314 }
1315 DirectCallee = getMaybeBitcastedCallee(CB);
1316 }
1317
1318 // Check for function alignment information if we found that the
1319 // ultimate target is a Function
1320 if (DirectCallee)
1321 return getFunctionArgumentAlignment(DirectCallee, Ty, Idx, DL);
1322
1323 // Call is indirect, fall back to the ABI type alignment
1324 return DL.getABITypeAlign(Ty);
1325}
1326
1328 const DataLayout &DL,
1329 const TargetLowering &TL) {
1330 if (Ptr->getOpcode() == ISD::FrameIndex) {
1331 auto Ty = TL.getPointerTy(DL, ADDRESS_SPACE_LOCAL);
1332 Ptr = DAG.getAddrSpaceCast(SDLoc(), Ty, Ptr, ADDRESS_SPACE_GENERIC,
1334
1336 }
1337
1338 // Peel of an addrspacecast to generic and load directly from the specific
1339 // address space.
1340 if (Ptr->getOpcode() == ISD::ADDRSPACECAST) {
1341 const auto *ASC = cast<AddrSpaceCastSDNode>(Ptr);
1342 if (ASC->getDestAddressSpace() == ADDRESS_SPACE_GENERIC) {
1343 Ptr = ASC->getOperand(0);
1344 return MachinePointerInfo(ASC->getSrcAddressSpace());
1345 }
1346 }
1347
1348 return MachinePointerInfo();
1349}
1350
1352 if (Flags.isSExt())
1353 return ISD::SIGN_EXTEND;
1354 if (Flags.isZExt())
1355 return ISD::ZERO_EXTEND;
1356 return ISD::ANY_EXTEND;
1357}
1358
1360 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1361 SDLoc dl) {
1362 const EVT ActualVT = V.getValueType();
1363 assert((ActualVT == ExpectedVT ||
1364 (ExpectedVT.isInteger() && ActualVT.isInteger())) &&
1365 "Non-integer argument type size mismatch");
1366 if (ExpectedVT.bitsGT(ActualVT))
1367 return DAG.getNode(getExtOpcode(Flags), dl, ExpectedVT, V);
1368 if (ExpectedVT.bitsLT(ActualVT))
1369 return DAG.getNode(ISD::TRUNCATE, dl, ExpectedVT, V);
1370
1371 return V;
1372}
1373
1375 SmallVectorImpl<SDValue> &InVals) const {
1376
1377 if (CLI.IsVarArg && (STI.getPTXVersion() < 60 || STI.getSmVersion() < 30))
1379 "Support for variadic functions (unsized array parameter) introduced "
1380 "in PTX ISA version 6.0 and requires target sm_30.");
1381
1382 SelectionDAG &DAG = CLI.DAG;
1383 SDLoc dl = CLI.DL;
1384 const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1385 SDValue Callee = CLI.Callee;
1386 ArgListTy &Args = CLI.getArgs();
1387 Type *RetTy = CLI.RetTy;
1388 const CallBase *CB = CLI.CB;
1389 const DataLayout &DL = DAG.getDataLayout();
1390 LLVMContext &Ctx = *DAG.getContext();
1391
1392 const auto GetI32 = [&](const unsigned I) {
1393 return DAG.getConstant(I, dl, MVT::i32);
1394 };
1395
1396 const unsigned UniqueCallSite = GlobalUniqueCallSite++;
1397 const SDValue CallChain = CLI.Chain;
1398 const SDValue StartChain =
1399 DAG.getCALLSEQ_START(CallChain, UniqueCallSite, 0, dl);
1400 SDValue DeclareGlue = StartChain.getValue(1);
1401
1402 SmallVector<SDValue, 16> CallPrereqs{StartChain};
1403
1404 const auto MakeDeclareScalarParam = [&](SDValue Symbol, unsigned Size) {
1405 // PTX ABI requires integral types to be at least 32 bits in size. FP16 is
1406 // loaded/stored using i16, so it's handled here as well.
1407 const unsigned SizeBits = promoteScalarArgumentSize(Size * 8);
1408 SDValue Declare =
1409 DAG.getNode(NVPTXISD::DeclareScalarParam, dl, {MVT::Other, MVT::Glue},
1410 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});
1411 CallPrereqs.push_back(Declare);
1412 DeclareGlue = Declare.getValue(1);
1413 return Declare;
1414 };
1415
1416 const auto MakeDeclareArrayParam = [&](SDValue Symbol, Align Align,
1417 unsigned Size) {
1418 SDValue Declare = DAG.getNode(
1419 NVPTXISD::DeclareArrayParam, dl, {MVT::Other, MVT::Glue},
1420 {StartChain, Symbol, GetI32(Align.value()), GetI32(Size), DeclareGlue});
1421 CallPrereqs.push_back(Declare);
1422 DeclareGlue = Declare.getValue(1);
1423 return Declare;
1424 };
1425
1426 // Variadic arguments.
1427 //
1428 // Normally, for each argument, we declare a param scalar or a param
1429 // byte array in the .param space, and store the argument value to that
1430 // param scalar or array starting at offset 0.
1431 //
1432 // In the case of the first variadic argument, we declare a vararg byte array
1433 // with size 0. The exact size of this array isn't known at this point, so
1434 // it'll be patched later. All the variadic arguments will be stored to this
1435 // array at a certain offset (which gets tracked by 'VAOffset'). The offset is
1436 // initially set to 0, so it can be used for non-variadic arguments (which use
1437 // 0 offset) to simplify the code.
1438 //
1439 // After all vararg is processed, 'VAOffset' holds the size of the
1440 // vararg byte array.
1441 assert((CLI.IsVarArg || CLI.Args.size() == CLI.NumFixedArgs) &&
1442 "Non-VarArg function with extra arguments");
1443
1444 const unsigned FirstVAArg = CLI.NumFixedArgs; // position of first variadic
1445 unsigned VAOffset = 0; // current offset in the param array
1446
1447 const SDValue VADeclareParam =
1448 CLI.Args.size() > FirstVAArg
1449 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),
1450 Align(STI.getMaxRequiredAlignment()), 0)
1451 : SDValue();
1452
1453 // Args.size() and Outs.size() need not match.
1454 // Outs.size() will be larger
1455 // * if there is an aggregate argument with multiple fields (each field
1456 // showing up separately in Outs)
1457 // * if there is a vector argument with more than typical vector-length
1458 // elements (generally if more than 4) where each vector element is
1459 // individually present in Outs.
1460 // So a different index should be used for indexing into Outs/OutVals.
1461 // See similar issue in LowerFormalArguments.
1462 auto AllOuts = ArrayRef(CLI.Outs);
1463 auto AllOutVals = ArrayRef(CLI.OutVals);
1464 assert(AllOuts.size() == AllOutVals.size() &&
1465 "Outs and OutVals must be the same size");
1466 // Declare the .params or .reg need to pass values
1467 // to the function
1468 for (const auto E : llvm::enumerate(Args)) {
1469 const auto ArgI = E.index();
1470 const auto Arg = E.value();
1471 const auto ArgOuts =
1472 AllOuts.take_while([&](auto O) { return O.OrigArgIndex == ArgI; });
1473 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());
1474 AllOuts = AllOuts.drop_front(ArgOuts.size());
1475 AllOutVals = AllOutVals.drop_front(ArgOuts.size());
1476
1477 const bool IsVAArg = (ArgI >= FirstVAArg);
1478 const bool IsByVal = Arg.IsByVal;
1479
1480 const SDValue ParamSymbol =
1481 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);
1482
1483 assert((!IsByVal || Arg.IndirectType) &&
1484 "byval arg must have indirect type");
1485 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);
1486
1487 const Align ArgAlign = [&]() {
1488 if (IsByVal) {
1489 // The ByValAlign in the Outs[OIdx].Flags is always set at this point,
1490 // so we don't need to worry whether it's naturally aligned or not.
1491 // See TargetLowering::LowerCallTo().
1492 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1494 InitialAlign, DL);
1495 }
1496 return getArgumentAlignment(CB, Arg.Ty, ArgI + 1, DL);
1497 }();
1498
1499 const unsigned TySize = DL.getTypeAllocSize(ETy);
1500 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&
1501 "type size mismatch");
1502
1503 const SDValue ArgDeclare = [&]() {
1504 if (IsVAArg)
1505 return VADeclareParam;
1506
1507 if (IsByVal || shouldPassAsArray(Arg.Ty))
1508 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);
1509
1510 assert(ArgOuts.size() == 1 && "We must pass only one value as non-array");
1511 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&
1512 "Only int and float types are supported as non-array arguments");
1513
1514 return MakeDeclareScalarParam(ParamSymbol, TySize);
1515 }();
1516
1517 if (IsByVal) {
1518 assert(ArgOutVals.size() == 1 && "We must pass only one value as byval");
1519 SDValue SrcPtr = ArgOutVals[0];
1520 const auto PointerInfo = refinePtrAS(SrcPtr, DAG, DL, *this);
1521 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1522
1523 if (IsVAArg)
1524 VAOffset = alignTo(VAOffset, ArgAlign);
1525
1526 SmallVector<EVT, 4> ValueVTs, MemVTs;
1528 ComputeValueVTs(*this, DL, ETy, ValueVTs, &MemVTs, &Offsets);
1529
1530 unsigned J = 0;
1531 const auto VI = VectorizePTXValueVTs(MemVTs, Offsets, ArgAlign, IsVAArg);
1532 for (const unsigned NumElts : VI) {
1533 EVT LoadVT = getVectorizedVT(MemVTs[J], NumElts, Ctx);
1534 Align SrcAlign = commonAlignment(BaseSrcAlign, Offsets[J]);
1535 SDValue SrcAddr = DAG.getObjectPtrOffset(dl, SrcPtr, Offsets[J]);
1536 SDValue SrcLoad =
1537 DAG.getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);
1538
1539 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);
1540 Align ParamAlign = commonAlignment(ArgAlign, ParamOffset);
1541 SDValue ParamAddr =
1542 DAG.getObjectPtrOffset(dl, ParamSymbol, ParamOffset);
1543 SDValue StoreParam = DAG.getStore(
1544 ArgDeclare, dl, SrcLoad, ParamAddr,
1546 CallPrereqs.push_back(StoreParam);
1547
1548 J += NumElts;
1549 }
1550 if (IsVAArg)
1551 VAOffset += TySize;
1552 } else {
1555 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, Arg.Ty, VTs, Offsets,
1556 VAOffset);
1557 assert(VTs.size() == Offsets.size() && "Size mismatch");
1558 assert(VTs.size() == ArgOuts.size() && "Size mismatch");
1559
1560 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
1561 // than 32-bits are sign extended or zero extended, depending on
1562 // whether they are signed or unsigned types. This case applies
1563 // only to scalar parameters and not to aggregate values.
1564 const bool ExtendIntegerParam =
1565 Arg.Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Arg.Ty) < 32;
1566
1567 const auto GetStoredValue = [&](const unsigned I) {
1568 SDValue StVal = ArgOutVals[I];
1570 StVal.getValueType() &&
1571 "OutVal type should always be legal");
1572
1573 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1574 const EVT StoreVT =
1575 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1576
1577 return correctParamType(StVal, StoreVT, ArgOuts[I].Flags, DAG, dl);
1578 };
1579
1580 unsigned J = 0;
1581 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign, IsVAArg);
1582 for (const unsigned NumElts : VI) {
1583 const EVT EltVT = promoteScalarIntegerPTX(VTs[J]);
1584
1585 unsigned Offset;
1586 if (IsVAArg) {
1587 // TODO: We may need to support vector types that can be passed
1588 // as scalars in variadic arguments.
1589 assert(NumElts == 1 &&
1590 "Vectorization should be disabled for vaargs.");
1591
1592 // Align each part of the variadic argument to their type.
1593 VAOffset = alignTo(VAOffset, DAG.getEVTAlign(EltVT));
1594 Offset = VAOffset;
1595
1596 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1597 VAOffset += DL.getTypeAllocSize(TheStoreType.getTypeForEVT(Ctx));
1598 } else {
1599 assert(VAOffset == 0 && "VAOffset must be 0 for non-VA args");
1600 Offset = Offsets[J];
1601 }
1602
1603 SDValue Ptr =
1604 DAG.getObjectPtrOffset(dl, ParamSymbol, TypeSize::getFixed(Offset));
1605
1606 const MaybeAlign CurrentAlign = ExtendIntegerParam
1607 ? MaybeAlign(std::nullopt)
1608 : commonAlignment(ArgAlign, Offset);
1609
1610 SDValue Val =
1611 getBuildVectorizedValue(NumElts, dl, DAG, [&](unsigned K) {
1612 return GetStoredValue(J + K);
1613 });
1614
1615 SDValue StoreParam = DAG.getStore(
1616 ArgDeclare, dl, Val, Ptr,
1618 CallPrereqs.push_back(StoreParam);
1619
1620 J += NumElts;
1621 }
1622 }
1623 }
1624
1625 // Handle Result
1626 if (!Ins.empty()) {
1627 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1628 const unsigned ResultSize = DL.getTypeAllocSize(RetTy);
1629 if (shouldPassAsArray(RetTy)) {
1630 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1631 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);
1632 } else {
1633 MakeDeclareScalarParam(RetSymbol, ResultSize);
1634 }
1635 }
1636
1637 // Set the size of the vararg param byte array if the callee is a variadic
1638 // function and the variadic part is not empty.
1639 if (VADeclareParam) {
1640 SDValue DeclareParamOps[] = {VADeclareParam.getOperand(0),
1641 VADeclareParam.getOperand(1),
1642 VADeclareParam.getOperand(2), GetI32(VAOffset),
1643 VADeclareParam.getOperand(4)};
1644 DAG.MorphNodeTo(VADeclareParam.getNode(), VADeclareParam.getOpcode(),
1645 VADeclareParam->getVTList(), DeclareParamOps);
1646 }
1647
1648 const auto *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
1649 const auto *CalleeF = Func ? dyn_cast<Function>(Func->getGlobal()) : nullptr;
1650
1651 // If the type of the callsite does not match that of the function, convert
1652 // the callsite to an indirect call.
1653 const bool ConvertToIndirectCall =
1654 CalleeF && CB->getFunctionType() != CalleeF->getFunctionType();
1655
1656 // Both indirect calls and libcalls have nullptr Func. In order to distinguish
1657 // between them we must rely on the call site value which is valid for
1658 // indirect calls but is always null for libcalls.
1659 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;
1660
1661 if (isa<ExternalSymbolSDNode>(Callee)) {
1662 Function* CalleeFunc = nullptr;
1663
1664 // Try to find the callee in the current module.
1665 Callee = DAG.getSymbolFunctionGlobalAddress(Callee, &CalleeFunc);
1666 assert(CalleeFunc != nullptr && "Libcall callee must be set.");
1667
1668 // Set the "libcall callee" attribute to indicate that the function
1669 // must always have a declaration.
1670 CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");
1671 }
1672
1673 if (IsIndirectCall) {
1674 // This is indirect function call case : PTX requires a prototype of the
1675 // form
1676 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
1677 // to be emitted, and the label has to used as the last arg of call
1678 // instruction.
1679 // The prototype is embedded in a string and put as the operand for a
1680 // CallPrototype SDNode which will print out to the value of the string.
1681 const bool HasVAArgs = CLI.IsVarArg && (CLI.Args.size() > CLI.NumFixedArgs);
1682 std::string Proto =
1683 getPrototype(DL, RetTy, Args, CLI.Outs,
1684 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,
1685 UniqueCallSite);
1686 const char *ProtoStr = nvTM->getStrPool().save(Proto).data();
1687 const SDValue PrototypeDeclare = DAG.getNode(
1688 NVPTXISD::CallPrototype, dl, MVT::Other,
1689 {StartChain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32)});
1690 CallPrereqs.push_back(PrototypeDeclare);
1691 }
1692
1693 const bool IsUnknownIntrinsic =
1694 CalleeF && CalleeF->isIntrinsic() &&
1695 CalleeF->getIntrinsicID() == Intrinsic::not_intrinsic;
1696 if (IsUnknownIntrinsic) {
1699 "call to unknown intrinsic '" + CalleeF->getName() +
1700 "' cannot be lowered by the NVPTX backend",
1701 dl.getDebugLoc()));
1702 }
1703
1704 const unsigned Proto = IsIndirectCall ? UniqueCallSite : 0;
1705 const unsigned NumArgs =
1706 std::min<unsigned>(CLI.NumFixedArgs + 1, Args.size());
1707 /// CALL(Chain, IsConvergent, IsIndirectCall/IsUniform, NumReturns,
1708 /// NumParams, Callee, Proto)
1709 const SDValue CallToken = DAG.getTokenFactor(dl, CallPrereqs);
1710 const SDValue Call = DAG.getNode(
1711 NVPTXISD::CALL, dl, MVT::Other,
1712 {CallToken, GetI32(CLI.IsConvergent), GetI32(IsIndirectCall),
1713 GetI32(Ins.empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});
1714
1715 SmallVector<SDValue, 16> LoadChains{Call};
1716 SmallVector<SDValue, 16> ProxyRegOps;
1717 if (!Ins.empty()) {
1720 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, RetTy, VTs, Offsets);
1721 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1722
1723 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1724 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1725
1726 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
1727 // 32-bits are sign extended or zero extended, depending on whether
1728 // they are signed or unsigned types.
1729 const bool ExtendIntegerRetVal =
1730 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
1731
1732 unsigned I = 0;
1733 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
1734 for (const unsigned NumElts : VI) {
1735 const MaybeAlign CurrentAlign =
1736 ExtendIntegerRetVal ? MaybeAlign(std::nullopt)
1737 : commonAlignment(RetAlign, Offsets[I]);
1738
1739 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1740 const EVT LoadVT =
1741 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1742 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
1743 SDValue Ptr =
1744 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
1745
1746 SDValue R = DAG.getLoad(
1747 VecVT, dl, Call, Ptr,
1749
1750 LoadChains.push_back(R.getValue(1));
1751 for (const unsigned J : llvm::seq(NumElts))
1752 ProxyRegOps.push_back(getExtractVectorizedValue(R, J, LoadVT, dl, DAG));
1753 I += NumElts;
1754 }
1755 }
1756
1757 const SDValue EndToken = DAG.getTokenFactor(dl, LoadChains);
1758 const SDValue CallEnd = DAG.getCALLSEQ_END(EndToken, UniqueCallSite,
1759 UniqueCallSite + 1, SDValue(), dl);
1760
1761 // Append ProxyReg instructions to the chain to make sure that `callseq_end`
1762 // will not get lost. Otherwise, during libcalls expansion, the nodes can become
1763 // dangling.
1764 for (const auto [I, Reg] : llvm::enumerate(ProxyRegOps)) {
1765 SDValue Proxy =
1766 DAG.getNode(NVPTXISD::ProxyReg, dl, Reg.getValueType(), {CallEnd, Reg});
1767 SDValue Ret = correctParamType(Proxy, Ins[I].VT, Ins[I].Flags, DAG, dl);
1768 InVals.push_back(Ret);
1769 }
1770
1771 // set IsTailCall to false for now, until we figure out how to express
1772 // tail call optimization in PTX
1773 CLI.IsTailCall = false;
1774 return CallEnd;
1775}
1776
1778 SelectionDAG &DAG) const {
1779
1780 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1781 const Function &Fn = DAG.getMachineFunction().getFunction();
1782
1784 Fn,
1785 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1786 "requires target sm_52.",
1787 SDLoc(Op).getDebugLoc()));
1788 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()),
1789 Op.getOperand(0)};
1790 return DAG.getMergeValues(Ops, SDLoc());
1791 }
1792
1793 SDLoc DL(Op.getNode());
1794 SDValue Chain = Op.getOperand(0);
1795 SDValue Size = Op.getOperand(1);
1796 uint64_t Align = Op.getConstantOperandVal(2);
1797
1798 // The alignment on a ISD::DYNAMIC_STACKALLOC node may be 0 to indicate that
1799 // the default stack alignment should be used.
1800 if (Align == 0)
1802
1803 // The size for ptx alloca instruction is 64-bit for m64 and 32-bit for m32.
1804 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1805
1806 SDValue Alloc =
1807 DAG.getNode(NVPTXISD::DYNAMIC_STACKALLOC, DL, {LocalVT, MVT::Other},
1808 {Chain, DAG.getZExtOrTrunc(Size, DL, LocalVT),
1809 DAG.getTargetConstant(Align, DL, MVT::i32)});
1810
1811 SDValue ASC = DAG.getAddrSpaceCast(
1813
1814 return DAG.getMergeValues({ASC, SDValue(Alloc.getNode(), 1)}, DL);
1815}
1816
1818 SelectionDAG &DAG) const {
1819 SDLoc DL(Op.getNode());
1820 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1821 const Function &Fn = DAG.getMachineFunction().getFunction();
1822
1824 Fn,
1825 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
1826 ">= sm_52.",
1827 DL.getDebugLoc()));
1828 return Op.getOperand(0);
1829 }
1830
1831 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1832 SDValue Chain = Op.getOperand(0);
1833 SDValue Ptr = Op.getOperand(1);
1834 SDValue ASC = DAG.getAddrSpaceCast(DL, LocalVT, Ptr, ADDRESS_SPACE_GENERIC,
1836 return DAG.getNode(NVPTXISD::STACKRESTORE, DL, MVT::Other, {Chain, ASC});
1837}
1838
1840 SelectionDAG &DAG) const {
1841 SDLoc DL(Op.getNode());
1842 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1843 const Function &Fn = DAG.getMachineFunction().getFunction();
1844
1846 Fn,
1847 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
1848 "sm_52.",
1849 DL.getDebugLoc()));
1850 auto Ops = {DAG.getConstant(0, DL, Op.getValueType()), Op.getOperand(0)};
1851 return DAG.getMergeValues(Ops, DL);
1852 }
1853
1854 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1855 SDValue Chain = Op.getOperand(0);
1856 SDValue SS =
1857 DAG.getNode(NVPTXISD::STACKSAVE, DL, {LocalVT, MVT::Other}, Chain);
1858 SDValue ASC = DAG.getAddrSpaceCast(
1859 DL, Op.getValueType(), SS, ADDRESS_SPACE_LOCAL, ADDRESS_SPACE_GENERIC);
1860 return DAG.getMergeValues({ASC, SDValue(SS.getNode(), 1)}, DL);
1861}
1862
1863// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1864// (see LegalizeDAG.cpp). This is slow and uses local memory.
1865// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1866SDValue
1867NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1868 SDNode *Node = Op.getNode();
1869 SDLoc dl(Node);
1871 unsigned NumOperands = Node->getNumOperands();
1872 for (unsigned i = 0; i < NumOperands; ++i) {
1873 SDValue SubOp = Node->getOperand(i);
1874 EVT VVT = SubOp.getNode()->getValueType(0);
1875 EVT EltVT = VVT.getVectorElementType();
1876 unsigned NumSubElem = VVT.getVectorNumElements();
1877 for (unsigned j = 0; j < NumSubElem; ++j) {
1878 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1879 DAG.getIntPtrConstant(j, dl)));
1880 }
1881 }
1882 return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
1883}
1884
1886 SelectionDAG &DAG,
1887 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1888 assert(A.getValueType() == MVT::i32 && B.getValueType() == MVT::i32 &&
1889 Selector.getValueType() == MVT::i32 && "PRMT must have i32 operands");
1890 return DAG.getNode(NVPTXISD::PRMT, DL, MVT::i32,
1891 {A, B, Selector, DAG.getConstant(Mode, DL, MVT::i32)});
1892}
1893
1895 SelectionDAG &DAG,
1896 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1897 return getPRMT(A, B, DAG.getConstant(Selector, DL, MVT::i32), DL, DAG, Mode);
1898}
1899
1900/// Reduces the elements using the scalar operations provided. The operations
1901/// are sorted descending in number of inputs they take. The flags on the
1902/// original reduction operation will be propagated to each scalar operation.
1903/// Nearby elements are grouped in tree reduction, unlike the shuffle reduction
1904/// used in ExpandReductions and SelectionDAG.
1906 const SmallVector<SDValue> &Elements, EVT EltTy,
1907 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>> Ops,
1908 const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG) {
1909 // Build the reduction tree at each level, starting with all the elements.
1910 SmallVector<SDValue> Level = Elements;
1911
1912 unsigned OpIdx = 0;
1913 while (Level.size() > 1) {
1914 // Try to reduce this level using the current operator.
1915 const auto [Op, NumInputs] = Ops[OpIdx];
1916
1917 // Build the next level by partially reducing all elements.
1918 SmallVector<SDValue> ReducedLevel;
1919 unsigned I = 0, E = Level.size();
1920 for (; I + NumInputs <= E; I += NumInputs) {
1921 // Reduce elements in groups of [NumInputs], as much as possible.
1922 ReducedLevel.push_back(DAG.getNode(
1923 Op, DL, EltTy, ArrayRef<SDValue>(Level).slice(I, NumInputs), Flags));
1924 }
1925
1926 if (I < E) {
1927 // Handle leftover elements.
1928
1929 if (ReducedLevel.empty()) {
1930 // We didn't reduce anything at this level. We need to pick a smaller
1931 // operator.
1932 ++OpIdx;
1933 assert(OpIdx < Ops.size() && "no smaller operators for reduction");
1934 continue;
1935 }
1936
1937 // We reduced some things but there's still more left, meaning the
1938 // operator's number of inputs doesn't evenly divide this level size. Move
1939 // these elements to the next level.
1940 for (; I < E; ++I)
1941 ReducedLevel.push_back(Level[I]);
1942 }
1943
1944 // Process the next level.
1945 Level = ReducedLevel;
1946 }
1947
1948 return *Level.begin();
1949}
1950
1951// Get scalar reduction opcode
1952static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode) {
1953 switch (ReductionOpcode) {
1955 return ISD::FMAXNUM;
1957 return ISD::FMINNUM;
1959 return ISD::FMAXIMUM;
1961 return ISD::FMINIMUM;
1962 default:
1963 llvm_unreachable("unhandled reduction opcode");
1964 }
1965}
1966
1967/// Get 3-input scalar reduction opcode
1968static std::optional<unsigned>
1969getScalar3OpcodeForReduction(unsigned ReductionOpcode) {
1970 switch (ReductionOpcode) {
1972 return NVPTXISD::FMAXNUM3;
1974 return NVPTXISD::FMINNUM3;
1976 return NVPTXISD::FMAXIMUM3;
1978 return NVPTXISD::FMINIMUM3;
1979 default:
1980 return std::nullopt;
1981 }
1982}
1983
1984/// Lower reductions to either a sequence of operations or a tree if
1985/// reassociations are allowed. This method will use larger operations like
1986/// max3/min3 when the target supports them.
1987SDValue NVPTXTargetLowering::LowerVECREDUCE(SDValue Op,
1988 SelectionDAG &DAG) const {
1989 SDLoc DL(Op);
1990 const SDNodeFlags Flags = Op->getFlags();
1991 SDValue Vector = Op.getOperand(0);
1992
1993 const unsigned Opcode = Op->getOpcode();
1994 const EVT EltTy = Vector.getValueType().getVectorElementType();
1995
1996 // Whether we can use 3-input min/max when expanding the reduction.
1997 const bool CanUseMinMax3 =
1998 EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&
1999 STI.getPTXVersion() >= 88 &&
2000 (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN ||
2001 Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM);
2002
2003 // A list of SDNode opcodes with equivalent semantics, sorted descending by
2004 // number of inputs they take.
2005 SmallVector<std::pair<unsigned /*Op*/, unsigned /*NumIn*/>, 2> ScalarOps;
2006
2007 if (auto Opcode3Elem = getScalar3OpcodeForReduction(Opcode);
2008 CanUseMinMax3 && Opcode3Elem)
2009 ScalarOps.push_back({*Opcode3Elem, 3});
2010 ScalarOps.push_back({getScalarOpcodeForReduction(Opcode), 2});
2011
2013 DAG.ExtractVectorElements(Vector, Elements);
2014
2015 return buildTreeReduction(Elements, EltTy, ScalarOps, DL, Flags, DAG);
2016}
2017
2018SDValue NVPTXTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
2019 // Handle bitcasting from v2i8 without hitting the default promotion
2020 // strategy which goes through stack memory.
2021 EVT FromVT = Op->getOperand(0)->getValueType(0);
2022 if (FromVT != MVT::v2i8) {
2023 return Op;
2024 }
2025
2026 // Pack vector elements into i16 and bitcast to final type
2027 SDLoc DL(Op);
2028 SDValue Vec0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2029 Op->getOperand(0), DAG.getIntPtrConstant(0, DL));
2030 SDValue Vec1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2031 Op->getOperand(0), DAG.getIntPtrConstant(1, DL));
2032 SDValue Extend0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec0);
2033 SDValue Extend1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec1);
2034 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
2035 SDValue AsInt = DAG.getNode(
2036 ISD::OR, DL, MVT::i16,
2037 {Extend0, DAG.getNode(ISD::SHL, DL, MVT::i16, {Extend1, Const8})});
2038 EVT ToVT = Op->getValueType(0);
2039 return DAG.getBitcast(ToVT, AsInt);
2040}
2041
2042// We can init constant f16x2/v2i16/v4i8 with a single .b32 move. Normally it
2043// would get lowered as two constant loads and vector-packing move.
2044// Instead we want just a constant move:
2045// mov.b32 %r2, 0x40003C00
2046SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2047 SelectionDAG &DAG) const {
2048 EVT VT = Op->getValueType(0);
2049 if (!(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector()))
2050 return Op;
2051 SDLoc DL(Op);
2052
2053 if (!llvm::all_of(Op->ops(), [](SDValue Operand) {
2054 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2055 isa<ConstantFPSDNode>(Operand);
2056 })) {
2057 if (VT != MVT::v4i8)
2058 return Op;
2059 // Lower non-const v4i8 vector as byte-wise constructed i32, which allows us
2060 // to optimize calculation of constant parts.
2061 auto GetPRMT = [&](const SDValue Left, const SDValue Right, bool Cast,
2062 uint64_t SelectionValue) -> SDValue {
2063 SDValue L = Left;
2064 SDValue R = Right;
2065 if (Cast) {
2066 L = DAG.getAnyExtOrTrunc(L, DL, MVT::i32);
2067 R = DAG.getAnyExtOrTrunc(R, DL, MVT::i32);
2068 }
2069 return getPRMT(L, R, SelectionValue, DL, DAG);
2070 };
2071 auto PRMT__10 = GetPRMT(Op->getOperand(0), Op->getOperand(1), true, 0x3340);
2072 auto PRMT__32 = GetPRMT(Op->getOperand(2), Op->getOperand(3), true, 0x3340);
2073 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32, false, 0x5410);
2074 return DAG.getBitcast(VT, PRMT3210);
2075 }
2076
2077 // Get value or the Nth operand as an APInt(32). Undef values treated as 0.
2078 auto GetOperand = [](SDValue Op, int N) -> APInt {
2079 const SDValue &Operand = Op->getOperand(N);
2080 EVT VT = Op->getValueType(0);
2081 if (Operand->isUndef())
2082 return APInt(32, 0);
2083 APInt Value;
2084 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2085 Value = cast<ConstantFPSDNode>(Operand)->getValueAPF().bitcastToAPInt();
2086 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2087 Value = Operand->getAsAPIntVal();
2088 else
2089 llvm_unreachable("Unsupported type");
2090 // i8 values are carried around as i16, so we need to zero out upper bits,
2091 // so they do not get in the way of combining individual byte values
2092 if (VT == MVT::v4i8)
2093 Value = Value.trunc(8);
2094 return Value.zext(32);
2095 };
2096
2097 // Construct a 32-bit constant by shifting into place smaller values
2098 // (elements of the vector type VT).
2099 // For example, if VT has 2 elements, then N == 2:
2100 // ShiftAmount = 32 / N = 16
2101 // Value |= Op0 (b16) << 0
2102 // Value |= Op1 (b16) << 16
2103 // If N == 4:
2104 // ShiftAmount = 32 / N = 8
2105 // Value |= Op0 (b8) << 0
2106 // Value |= Op1 (b8) << 8
2107 // Value |= Op2 (b8) << 16
2108 // Value |= Op3 (b8) << 24
2109 // ...etc
2110 APInt Value(32, 0);
2111 const unsigned NumElements = VT.getVectorNumElements();
2112 assert(32 % NumElements == 0 && "must evenly divide bit length");
2113 const unsigned ShiftAmount = 32 / NumElements;
2114 for (unsigned ElementNo : seq(NumElements))
2115 Value |= GetOperand(Op, ElementNo).shl(ElementNo * ShiftAmount);
2116 SDValue Const = DAG.getConstant(Value, DL, MVT::i32);
2117 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), Const);
2118}
2119
2120SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2121 SelectionDAG &DAG) const {
2122 SDValue Index = Op->getOperand(1);
2123 SDValue Vector = Op->getOperand(0);
2124 SDLoc DL(Op);
2125 EVT VectorVT = Vector.getValueType();
2126
2127 if (VectorVT == MVT::v4i8) {
2128 SDValue Selector = DAG.getNode(ISD::OR, DL, MVT::i32,
2129 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2130 DAG.getConstant(0x7770, DL, MVT::i32));
2131 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, Vector),
2132 DAG.getConstant(0, DL, MVT::i32), Selector, DL, DAG);
2133 SDValue Ext = DAG.getAnyExtOrTrunc(PRMT, DL, Op->getValueType(0));
2134 SDNodeFlags Flags;
2135 Flags.setNoSignedWrap(Ext.getScalarValueSizeInBits() > 8);
2136 Flags.setNoUnsignedWrap(Ext.getScalarValueSizeInBits() >= 8);
2137 Ext->setFlags(Flags);
2138 return Ext;
2139 }
2140
2141 // Constant index will be matched by tablegen.
2142 if (isa<ConstantSDNode>(Index.getNode()))
2143 return Op;
2144
2145 // Extract individual elements and select one of them.
2146 assert(NVPTX::isPackedVectorTy(VectorVT) &&
2147 VectorVT.getVectorNumElements() == 2 && "Unexpected vector type.");
2148 EVT EltVT = VectorVT.getVectorElementType();
2149
2150 SDLoc dl(Op.getNode());
2151 SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2152 DAG.getIntPtrConstant(0, dl));
2153 SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2154 DAG.getIntPtrConstant(1, dl));
2155 return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
2157}
2158
2159SDValue NVPTXTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2160 SelectionDAG &DAG) const {
2161 SDValue Vector = Op->getOperand(0);
2162 EVT VectorVT = Vector.getValueType();
2163
2164 if (VectorVT != MVT::v4i8)
2165 return Op;
2166 SDLoc DL(Op);
2167 SDValue Value = Op->getOperand(1);
2168 if (Value->isUndef())
2169 return Vector;
2170
2171 SDValue Index = Op->getOperand(2);
2172
2173 SDValue BFI =
2174 DAG.getNode(NVPTXISD::BFI, DL, MVT::i32,
2175 {DAG.getZExtOrTrunc(Value, DL, MVT::i32), Vector,
2176 DAG.getNode(ISD::MUL, DL, MVT::i32,
2177 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2178 DAG.getConstant(8, DL, MVT::i32)),
2179 DAG.getConstant(8, DL, MVT::i32)});
2180 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), BFI);
2181}
2182
2183SDValue NVPTXTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2184 SelectionDAG &DAG) const {
2185 SDValue V1 = Op.getOperand(0);
2186 EVT VectorVT = V1.getValueType();
2187 if (VectorVT != MVT::v4i8 || Op.getValueType() != MVT::v4i8)
2188 return Op;
2189
2190 // Lower shuffle to PRMT instruction.
2191 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2192 SDValue V2 = Op.getOperand(1);
2193 uint32_t Selector = 0;
2194 for (auto I : llvm::enumerate(SVN->getMask())) {
2195 if (I.value() != -1) // -1 is a placeholder for undef.
2196 Selector |= (I.value() << (I.index() * 4));
2197 }
2198
2199 SDLoc DL(Op);
2200 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, V1),
2201 DAG.getBitcast(MVT::i32, V2), Selector, DL, DAG);
2202 return DAG.getBitcast(Op.getValueType(), PRMT);
2203}
2204/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
2205/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2206/// amount, or
2207/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2208/// amount.
2209SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
2210 SelectionDAG &DAG) const {
2211 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2212 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
2213
2214 EVT VT = Op.getValueType();
2215 unsigned VTBits = VT.getSizeInBits();
2216 SDLoc dl(Op);
2217 SDValue ShOpLo = Op.getOperand(0);
2218 SDValue ShOpHi = Op.getOperand(1);
2219 SDValue ShAmt = Op.getOperand(2);
2220 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
2221
2222 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2223 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2224 // {dHi, dLo} = {aHi, aLo} >> Amt
2225 // dHi = aHi >> Amt
2226 // dLo = shf.r.clamp aLo, aHi, Amt
2227
2228 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2229 SDValue Lo =
2230 DAG.getNode(NVPTXISD::FSHR_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2231
2232 SDValue Ops[2] = { Lo, Hi };
2233 return DAG.getMergeValues(Ops, dl);
2234 }
2235 else {
2236 // {dHi, dLo} = {aHi, aLo} >> Amt
2237 // - if (Amt>=size) then
2238 // dLo = aHi >> (Amt-size)
2239 // dHi = aHi >> Amt (this is either all 0 or all 1)
2240 // else
2241 // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
2242 // dHi = aHi >> Amt
2243
2244 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2245 DAG.getConstant(VTBits, dl, MVT::i32),
2246 ShAmt);
2247 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
2248 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2249 DAG.getConstant(VTBits, dl, MVT::i32));
2250 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
2251 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2252 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
2253
2254 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2255 DAG.getConstant(VTBits, dl, MVT::i32),
2256 ISD::SETGE);
2257 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2258 SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2259
2260 SDValue Ops[2] = { Lo, Hi };
2261 return DAG.getMergeValues(Ops, dl);
2262 }
2263}
2264
2265/// LowerShiftLeftParts - Lower SHL_PARTS, which
2266/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2267/// amount, or
2268/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2269/// amount.
2270SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
2271 SelectionDAG &DAG) const {
2272 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2273 assert(Op.getOpcode() == ISD::SHL_PARTS);
2274
2275 EVT VT = Op.getValueType();
2276 unsigned VTBits = VT.getSizeInBits();
2277 SDLoc dl(Op);
2278 SDValue ShOpLo = Op.getOperand(0);
2279 SDValue ShOpHi = Op.getOperand(1);
2280 SDValue ShAmt = Op.getOperand(2);
2281
2282 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2283 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2284 // {dHi, dLo} = {aHi, aLo} << Amt
2285 // dHi = shf.l.clamp aLo, aHi, Amt
2286 // dLo = aLo << Amt
2287
2288 SDValue Hi =
2289 DAG.getNode(NVPTXISD::FSHL_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2290 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2291
2292 SDValue Ops[2] = { Lo, Hi };
2293 return DAG.getMergeValues(Ops, dl);
2294 }
2295 else {
2296 // {dHi, dLo} = {aHi, aLo} << Amt
2297 // - if (Amt>=size) then
2298 // dLo = aLo << Amt (all 0)
2299 // dLo = aLo << (Amt-size)
2300 // else
2301 // dLo = aLo << Amt
2302 // dHi = (aHi << Amt) | (aLo >> (size-Amt))
2303
2304 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2305 DAG.getConstant(VTBits, dl, MVT::i32),
2306 ShAmt);
2307 SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
2308 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2309 DAG.getConstant(VTBits, dl, MVT::i32));
2310 SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
2311 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2312 SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
2313
2314 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2315 DAG.getConstant(VTBits, dl, MVT::i32),
2316 ISD::SETGE);
2317 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2318 SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2319
2320 SDValue Ops[2] = { Lo, Hi };
2321 return DAG.getMergeValues(Ops, dl);
2322 }
2323}
2324
2325/// If the types match, convert the generic copysign to the NVPTXISD version,
2326/// otherwise bail ensuring that mismatched cases are properly expaned.
2327SDValue NVPTXTargetLowering::LowerFCOPYSIGN(SDValue Op,
2328 SelectionDAG &DAG) const {
2329 EVT VT = Op.getValueType();
2330 SDLoc DL(Op);
2331
2332 SDValue In1 = Op.getOperand(0);
2333 SDValue In2 = Op.getOperand(1);
2334 EVT SrcVT = In2.getValueType();
2335
2336 if (!SrcVT.bitsEq(VT))
2337 return SDValue();
2338
2339 return DAG.getNode(NVPTXISD::FCOPYSIGN, DL, VT, In1, In2);
2340}
2341
2342SDValue NVPTXTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2343 EVT VT = Op.getValueType();
2344
2345 if (VT == MVT::f32)
2346 return LowerFROUND32(Op, DAG);
2347
2348 if (VT == MVT::f64)
2349 return LowerFROUND64(Op, DAG);
2350
2351 llvm_unreachable("unhandled type");
2352}
2353
2354// This is the the rounding method used in CUDA libdevice in C like code:
2355// float roundf(float A)
2356// {
2357// float RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f));
2358// RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2359// return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2360// }
2361SDValue NVPTXTargetLowering::LowerFROUND32(SDValue Op,
2362 SelectionDAG &DAG) const {
2363 SDLoc SL(Op);
2364 SDValue A = Op.getOperand(0);
2365 EVT VT = Op.getValueType();
2366
2367 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2368
2369 // RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f))
2370 SDValue Bitcast = DAG.getNode(ISD::BITCAST, SL, MVT::i32, A);
2371 const unsigned SignBitMask = 0x80000000;
2372 SDValue Sign = DAG.getNode(ISD::AND, SL, MVT::i32, Bitcast,
2373 DAG.getConstant(SignBitMask, SL, MVT::i32));
2374 const unsigned PointFiveInBits = 0x3F000000;
2375 SDValue PointFiveWithSignRaw =
2376 DAG.getNode(ISD::OR, SL, MVT::i32, Sign,
2377 DAG.getConstant(PointFiveInBits, SL, MVT::i32));
2378 SDValue PointFiveWithSign =
2379 DAG.getNode(ISD::BITCAST, SL, VT, PointFiveWithSignRaw);
2380 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, A, PointFiveWithSign);
2381 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2382
2383 // RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2384 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2385 SDValue IsLarge =
2386 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 23.0), SL, VT),
2387 ISD::SETOGT);
2388 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2389
2390 // return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2391 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2392 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2393 SDValue RoundedAForSmallA = DAG.getNode(ISD::FTRUNC, SL, VT, A);
2394 return DAG.getNode(ISD::SELECT, SL, VT, IsSmall, RoundedAForSmallA, RoundedA);
2395}
2396
2397// The implementation of round(double) is similar to that of round(float) in
2398// that they both separate the value range into three regions and use a method
2399// specific to the region to round the values. However, round(double) first
2400// calculates the round of the absolute value and then adds the sign back while
2401// round(float) directly rounds the value with sign.
2402SDValue NVPTXTargetLowering::LowerFROUND64(SDValue Op,
2403 SelectionDAG &DAG) const {
2404 SDLoc SL(Op);
2405 SDValue A = Op.getOperand(0);
2406 EVT VT = Op.getValueType();
2407
2408 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2409
2410 // double RoundedA = (double) (int) (abs(A) + 0.5f);
2411 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, AbsA,
2412 DAG.getConstantFP(0.5, SL, VT));
2413 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2414
2415 // RoundedA = abs(A) < 0.5 ? (double)0 : RoundedA;
2416 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2417 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2418 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2419 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsSmall,
2420 DAG.getConstantFP(0, SL, VT),
2421 RoundedA);
2422
2423 // Add sign to rounded_A
2424 RoundedA = DAG.getNode(ISD::FCOPYSIGN, SL, VT, RoundedA, A);
2425 DAG.getNode(ISD::FTRUNC, SL, VT, A);
2426
2427 // RoundedA = abs(A) > 0x1.0p52 ? A : RoundedA;
2428 SDValue IsLarge =
2429 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 52.0), SL, VT),
2430 ISD::SETOGT);
2431 return DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2432}
2433
2435 EVT VT = N->getValueType(0);
2436 EVT NVT = MVT::f32;
2437 if (VT.isVector()) {
2438 NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount());
2439 }
2440 SDLoc DL(N);
2441 SDValue Tmp0 = DAG.getFPExtendOrRound(N->getOperand(0), DL, NVT);
2442 SDValue Tmp1 = DAG.getFPExtendOrRound(N->getOperand(1), DL, NVT);
2443 SDValue Res = DAG.getNode(N->getOpcode(), DL, NVT, Tmp0, Tmp1, N->getFlags());
2444 return DAG.getFPExtendOrRound(Res, DL, VT);
2445}
2446
2447SDValue NVPTXTargetLowering::PromoteBinOpIfF32FTZ(SDValue Op,
2448 SelectionDAG &DAG) const {
2449 if (useF32FTZ(DAG.getMachineFunction())) {
2450 return PromoteBinOpToF32(Op.getNode(), DAG);
2451 }
2452 return Op;
2453}
2454
2455SDValue NVPTXTargetLowering::LowerINT_TO_FP(SDValue Op,
2456 SelectionDAG &DAG) const {
2457 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2458
2459 if (Op.getValueType() == MVT::bf16) {
2460 SDLoc Loc(Op);
2461 return DAG.getNode(
2462 ISD::FP_ROUND, Loc, MVT::bf16,
2463 DAG.getNode(Op.getOpcode(), Loc, MVT::f32, Op.getOperand(0)),
2464 DAG.getIntPtrConstant(0, Loc, /*isTarget=*/true));
2465 }
2466
2467 // Everything else is considered legal.
2468 return Op;
2469}
2470
2471SDValue NVPTXTargetLowering::LowerFP_TO_INT(SDValue Op,
2472 SelectionDAG &DAG) const {
2473 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2474
2475 if (Op.getOperand(0).getValueType() == MVT::bf16) {
2476 SDLoc Loc(Op);
2477 return DAG.getNode(
2478 Op.getOpcode(), Loc, Op.getValueType(),
2479 DAG.getNode(ISD::FP_EXTEND, Loc, MVT::f32, Op.getOperand(0)));
2480 }
2481
2482 // Everything else is considered legal.
2483 return Op;
2484}
2485
2486SDValue NVPTXTargetLowering::LowerFP_ROUND(SDValue Op,
2487 SelectionDAG &DAG) const {
2488 EVT NarrowVT = Op.getValueType();
2489 SDValue Wide = Op.getOperand(0);
2490 EVT WideVT = Wide.getValueType();
2491 if (NarrowVT.getScalarType() == MVT::bf16) {
2492 const TargetLowering *TLI = STI.getTargetLowering();
2493 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 70) {
2494 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2495 }
2496 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
2497 // This combination was the first to support f32 -> bf16.
2498 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70) {
2499 if (WideVT.getScalarType() == MVT::f32) {
2500 return Op;
2501 }
2502 if (WideVT.getScalarType() == MVT::f64) {
2503 SDLoc Loc(Op);
2504 // Round-inexact-to-odd f64 to f32, then do the final rounding using
2505 // the hardware f32 -> bf16 instruction.
2507 WideVT.changeElementType(*DAG.getContext(), MVT::f32), Wide, Loc,
2508 DAG);
2509 return DAG.getFPExtendOrRound(rod, Loc, NarrowVT);
2510 }
2511 }
2512 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2513 }
2514 }
2515
2516 // Everything else is considered legal.
2517 return Op;
2518}
2519
2520SDValue NVPTXTargetLowering::LowerFP_EXTEND(SDValue Op,
2521 SelectionDAG &DAG) const {
2522 SDValue Narrow = Op.getOperand(0);
2523 EVT NarrowVT = Narrow.getValueType();
2524 EVT WideVT = Op.getValueType();
2525 if (NarrowVT.getScalarType() == MVT::bf16) {
2526 if (WideVT.getScalarType() == MVT::f32 &&
2527 (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71)) {
2528 SDLoc Loc(Op);
2529 return DAG.getNode(ISD::BF16_TO_FP, Loc, WideVT, Narrow);
2530 }
2531 if (WideVT.getScalarType() == MVT::f64 &&
2532 (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
2533 EVT F32 = NarrowVT.changeElementType(*DAG.getContext(), MVT::f32);
2534 SDLoc Loc(Op);
2535 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
2536 Op = DAG.getNode(ISD::FP_EXTEND, Loc, F32, Narrow);
2537 } else {
2538 Op = DAG.getNode(ISD::BF16_TO_FP, Loc, F32, Narrow);
2539 }
2540 return DAG.getNode(ISD::FP_EXTEND, Loc, WideVT, Op);
2541 }
2542 }
2543
2544 // Everything else is considered legal.
2545 return Op;
2546}
2547
2549 SDLoc DL(Op);
2550 if (Op.getValueType() != MVT::v2i16)
2551 return Op;
2552 EVT EltVT = Op.getValueType().getVectorElementType();
2553 SmallVector<SDValue> VecElements;
2554 for (int I = 0, E = Op.getValueType().getVectorNumElements(); I < E; I++) {
2555 SmallVector<SDValue> ScalarArgs;
2556 llvm::transform(Op->ops(), std::back_inserter(ScalarArgs),
2557 [&](const SDUse &O) {
2558 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2559 O.get(), DAG.getIntPtrConstant(I, DL));
2560 });
2561 VecElements.push_back(DAG.getNode(Op.getOpcode(), DL, EltVT, ScalarArgs));
2562 }
2563 SDValue V =
2564 DAG.getNode(ISD::BUILD_VECTOR, DL, Op.getValueType(), VecElements);
2565 return V;
2566}
2567
2569 bool hasOffset = false) {
2570 // skip lowering if the vector operand is already legalized
2571 if (!Op->getOperand(hasOffset ? 4 : 3).getValueType().isVector())
2572 return Op;
2573
2574 SDNode *N = Op.getNode();
2575 SDLoc DL(N);
2577
2578 // split the vector argument
2579 for (size_t I = 0; I < N->getNumOperands(); I++) {
2580 SDValue Val = N->getOperand(I);
2581 EVT ValVT = Val.getValueType();
2582 if (ValVT.isVector()) {
2583 EVT EltVT = ValVT.getVectorElementType();
2584 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2585 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2586 DAG.getIntPtrConstant(J, DL)));
2587 } else
2588 Ops.push_back(Val);
2589 }
2590
2592 SDValue Tcgen05StNode =
2593 DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, N->getVTList(), Ops,
2594 MemSD->getMemoryVT(), MemSD->getMemOperand());
2595
2596 return Tcgen05StNode;
2597}
2598
2600 SDLoc DL(Op);
2601 SDValue Src = Op.getOperand(0);
2602 EVT VT = Op.getValueType();
2603
2604 switch (VT.getSimpleVT().SimpleTy) {
2605 case MVT::i16: {
2606 SDValue Extended = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
2607 SDValue Swapped =
2608 getPRMT(Extended, DAG.getConstant(0, DL, MVT::i32), 0x7701, DL, DAG);
2609 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Swapped);
2610 }
2611 case MVT::i32: {
2612 return getPRMT(Src, DAG.getConstant(0, DL, MVT::i32), 0x0123, DL, DAG);
2613 }
2614 case MVT::v2i16: {
2615 SDValue Converted = DAG.getBitcast(MVT::i32, Src);
2616 SDValue Swapped =
2617 getPRMT(Converted, DAG.getConstant(0, DL, MVT::i32), 0x2301, DL, DAG);
2618 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i16, Swapped);
2619 }
2620 case MVT::i64: {
2621 SDValue UnpackSrc =
2622 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, Src);
2623 SDValue SwappedLow =
2624 getPRMT(UnpackSrc.getValue(0), DAG.getConstant(0, DL, MVT::i32), 0x0123,
2625 DL, DAG);
2626 SDValue SwappedHigh =
2627 getPRMT(UnpackSrc.getValue(1), DAG.getConstant(0, DL, MVT::i32), 0x0123,
2628 DL, DAG);
2629 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64,
2630 {SwappedHigh, SwappedLow});
2631 }
2632 default:
2633 llvm_unreachable("unsupported type for bswap");
2634 }
2635}
2636
2637static unsigned getTcgen05MMADisableOutputLane(unsigned IID) {
2638 switch (IID) {
2639 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2640 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1;
2641 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2642 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2;
2643 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2644 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2645 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2646 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2647 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2648 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2649 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2650 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2651 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2652 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2653 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2654 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2655 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2656 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2657 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2658 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2659 case Intrinsic::
2660 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2661 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2662 case Intrinsic::
2663 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2664 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2665 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2666 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1;
2667 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2668 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2;
2669 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2670 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2671 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2672 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2673 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2674 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2675 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2676 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2677 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2678 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2679 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2680 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2681 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2682 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2683 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2684 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2685 case Intrinsic::
2686 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2687 return NVPTXISD::
2688 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2689 case Intrinsic::
2690 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2691 return NVPTXISD::
2692 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2693 };
2694 llvm_unreachable("unhandled tcgen05.mma.disable_output_lane intrinsic");
2695}
2696
2698 SDNode *N = Op.getNode();
2699 SDLoc DL(N);
2700 unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2701
2703 // split the vector argument
2704 for (size_t I = 0; I < N->getNumOperands(); I++) {
2705 if (I == 1)
2706 continue; // skip IID
2707 SDValue Val = N->getOperand(I);
2708 EVT ValVT = Val.getValueType();
2709 if (ValVT.isVector()) {
2710 EVT EltVT = ValVT.getVectorElementType();
2711 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2712 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2713 DAG.getIntPtrConstant(J, DL)));
2714 } else
2715 Ops.push_back(Val);
2716 }
2717
2719 SDValue Tcgen05MMANode = DAG.getMemIntrinsicNode(
2720 getTcgen05MMADisableOutputLane(IID), DL, N->getVTList(), Ops,
2721 MemSD->getMemoryVT(), MemSD->getMemOperand());
2722
2723 return Tcgen05MMANode;
2724}
2725
2726// Lower vector return type of tcgen05.ld intrinsics
2727static std::optional<std::pair<SDValue, SDValue>>
2728lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset = false) {
2729 SDLoc DL(N);
2730 EVT ResVT = N->getValueType(0);
2731 if (!ResVT.isVector())
2732 return {}; // already legalized.
2733
2734 const unsigned NumElts = ResVT.getVectorNumElements();
2735
2736 // Create the return type of the instructions
2737 SmallVector<EVT, 5> ListVTs;
2738 for (unsigned i = 0; i < NumElts; ++i)
2739 ListVTs.push_back(MVT::i32);
2740
2741 ListVTs.push_back(N->getValueType(1)); // Chain
2742
2743 SDVTList ResVTs = DAG.getVTList(ListVTs);
2744
2745 SmallVector<SDValue, 8> Ops{N->getOperand(0), N->getOperand(1),
2746 N->getOperand(2)};
2747
2748 if (HasOffset) {
2749 Ops.push_back(N->getOperand(3)); // offset
2750 Ops.push_back(N->getOperand(4)); // Pack flag
2751 } else
2752 Ops.push_back(N->getOperand(3)); // Pack flag
2753
2755 SDValue NewNode =
2757 MemSD->getMemoryVT(), MemSD->getMemOperand());
2758
2759 // split the vector result
2760 SmallVector<SDValue, 4> ScalarRes;
2761 for (unsigned i = 0; i < NumElts; ++i) {
2762 SDValue Res = NewNode.getValue(i);
2763 ScalarRes.push_back(Res);
2764 }
2765
2766 SDValue Chain = NewNode.getValue(NumElts);
2767 SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
2768 return {{BuildVector, Chain}};
2769}
2770
2772 unsigned Val) {
2773 SDNode *N = Op.getNode();
2774 SDLoc DL(N);
2775
2776 const Function &Fn = DAG.getMachineFunction().getFunction();
2777
2778 unsigned AS = 0;
2779 if (auto *MemN = dyn_cast<MemIntrinsicSDNode>(N))
2780 AS = MemN->getAddressSpace();
2781 Type *PtrTy = PointerType::get(*DAG.getContext(), AS);
2783
2785 Fn,
2786 "Intrinsic " +
2787 Intrinsic::getName(N->getConstantOperandVal(1), {PtrTy}, M) +
2788 " with value " + Twine(Val) +
2789 " is not supported on the given target.",
2790 DL.getDebugLoc()));
2791 return Op.getOperand(0);
2792}
2793
2795 SDNode *N = Op.getNode();
2796 SDLoc DL(N);
2797
2798 // immediate argument representing elemtype
2799 unsigned Val = N->getConstantOperandVal(3);
2800
2802 Val))
2803 return reportInvalidTensormapReplaceUsage(Op, DAG, Val);
2804
2805 return Op;
2806}
2807
2809 SDNode *N = Op.getNode();
2810 SDLoc DL(N);
2811
2812 // immediate argument representing swizzle mode
2813 unsigned Val = N->getConstantOperandVal(3);
2814
2816 Val))
2817 return reportInvalidTensormapReplaceUsage(Op, DAG, Val);
2818
2819 return Op;
2820}
2821
2823 SDNode *N = Op.getNode();
2824 SDValue Intrin = N->getOperand(1);
2825
2826 // Get the intrinsic ID
2827 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
2828 switch (IntrinNo) {
2829 default:
2830 break;
2831 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
2832 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
2833 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
2834 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
2835 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
2836 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
2837 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
2838 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
2839 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
2840 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
2841 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
2842 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
2843 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
2844 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
2845 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
2846 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
2847 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
2848 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
2849 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
2850 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
2851 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
2852 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
2853 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
2854 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
2855 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
2856 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
2857 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
2858 return lowerTcgen05St(Op, DAG);
2859 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:
2860 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:
2861 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:
2862 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:
2863 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:
2864 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:
2865 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:
2866 return lowerTcgen05St(Op, DAG, /* hasOffset */ true);
2867 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2868 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2869 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2870 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2871 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2872 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2873 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2874 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2875 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2876 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2877 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2878 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2879 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2880 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2881 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2882 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2883 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2884 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2885 case Intrinsic::
2886 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2887 case Intrinsic::
2888 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2889 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2890 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2891 case Intrinsic::
2892 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2893 case Intrinsic::
2894 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2896 case Intrinsic::nvvm_tensormap_replace_elemtype:
2897 return lowerTensormapReplaceElemtype(Op, DAG);
2898 case Intrinsic::nvvm_tensormap_replace_swizzle_mode:
2900 }
2901 return Op;
2902}
2903
2905 SelectionDAG &DAG) {
2906
2907 SDNode *N = Op.getNode();
2908 if (N->getOperand(1).getValueType() != MVT::i128) {
2909 // return, if the operand is already lowered
2910 return SDValue();
2911 }
2912
2913 unsigned IID =
2914 cast<ConstantSDNode>(N->getOperand(0).getNode())->getZExtValue();
2915 auto Opcode = [&]() {
2916 switch (IID) {
2917 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2918 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED;
2919 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2920 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X;
2921 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2922 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y;
2923 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2924 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z;
2925 default:
2926 llvm_unreachable("unsupported/unhandled intrinsic");
2927 }
2928 }();
2929
2930 SDLoc DL(N);
2931 SDValue TryCancelResponse = N->getOperand(1);
2932 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TryCancelResponse);
2933 SDValue TryCancelResponse0 =
2934 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2935 DAG.getIntPtrConstant(0, DL));
2936 SDValue TryCancelResponse1 =
2937 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2938 DAG.getIntPtrConstant(1, DL));
2939
2940 return DAG.getNode(Opcode, DL, N->getVTList(),
2941 {TryCancelResponse0, TryCancelResponse1});
2942}
2943
2945 SDNode *N = Op.getNode();
2946 SDLoc DL(N);
2947 SDValue F32Vec = N->getOperand(1);
2948 SDValue RBits = N->getOperand(2);
2949
2950 unsigned IntrinsicID = N->getConstantOperandVal(0);
2951
2952 // Extract the 4 float elements from the vector
2954 for (unsigned i = 0; i < 4; ++i)
2955 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, F32Vec,
2956 DAG.getIntPtrConstant(i, DL)));
2957
2959
2960 auto [OpCode, RetTy, CvtModeFlag] =
2961 [&]() -> std::tuple<unsigned, MVT::SimpleValueType, uint32_t> {
2962 switch (IntrinsicID) {
2963 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2964 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8,
2965 CvtMode::RS | CvtMode::RELU_FLAG};
2966 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2967 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2968 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2969 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8,
2970 CvtMode::RS | CvtMode::RELU_FLAG};
2971 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2972 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2973 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2974 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8,
2975 CvtMode::RS | CvtMode::RELU_FLAG};
2976 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2977 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2978 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
2979 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8,
2980 CvtMode::RS | CvtMode::RELU_FLAG};
2981 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
2982 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2983 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
2984 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16,
2985 CvtMode::RS | CvtMode::RELU_FLAG};
2986 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
2987 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS};
2988 default:
2989 llvm_unreachable("unsupported/unhandled intrinsic");
2990 }
2991 }();
2992
2993 Ops.push_back(RBits);
2994 Ops.push_back(DAG.getConstant(CvtModeFlag, DL, MVT::i32));
2995
2996 return DAG.getNode(OpCode, DL, RetTy, Ops);
2997}
2998
3000 const unsigned Mode = [&]() {
3001 switch (Op->getConstantOperandVal(0)) {
3002 case Intrinsic::nvvm_prmt:
3004 case Intrinsic::nvvm_prmt_b4e:
3006 case Intrinsic::nvvm_prmt_ecl:
3008 case Intrinsic::nvvm_prmt_ecr:
3010 case Intrinsic::nvvm_prmt_f4e:
3012 case Intrinsic::nvvm_prmt_rc16:
3014 case Intrinsic::nvvm_prmt_rc8:
3016 default:
3017 llvm_unreachable("unsupported/unhandled intrinsic");
3018 }
3019 }();
3020 SDLoc DL(Op);
3021 SDValue A = Op->getOperand(1);
3022 SDValue B = Op.getNumOperands() == 4 ? Op.getOperand(2)
3023 : DAG.getConstant(0, DL, MVT::i32);
3024 SDValue Selector = (Op->op_end() - 1)->get();
3025 return getPRMT(A, B, Selector, DL, DAG, Mode);
3026}
3027
3028#define TCGEN05_LD_RED_INTR(SHAPE, NUM, TYPE) \
3029 Intrinsic::nvvm_tcgen05_ld_red_##SHAPE##_x##NUM##_##TYPE
3030
3031#define TCGEN05_LD_RED_INST(SHAPE, NUM, TYPE) \
3032 NVPTXISD::TCGEN05_LD_RED_##SHAPE##_X##NUM##_##TYPE
3033
3034static unsigned getTcgen05LdRedID(Intrinsic::ID IID) {
3035 switch (IID) {
3036 case TCGEN05_LD_RED_INTR(32x32b, 2, f32):
3037 return TCGEN05_LD_RED_INST(32x32b, 2, F32);
3038 case TCGEN05_LD_RED_INTR(32x32b, 4, f32):
3039 return TCGEN05_LD_RED_INST(32x32b, 4, F32);
3040 case TCGEN05_LD_RED_INTR(32x32b, 8, f32):
3041 return TCGEN05_LD_RED_INST(32x32b, 8, F32);
3042 case TCGEN05_LD_RED_INTR(32x32b, 16, f32):
3043 return TCGEN05_LD_RED_INST(32x32b, 16, F32);
3044 case TCGEN05_LD_RED_INTR(32x32b, 32, f32):
3045 return TCGEN05_LD_RED_INST(32x32b, 32, F32);
3046 case TCGEN05_LD_RED_INTR(32x32b, 64, f32):
3047 return TCGEN05_LD_RED_INST(32x32b, 64, F32);
3048 case TCGEN05_LD_RED_INTR(32x32b, 128, f32):
3049 return TCGEN05_LD_RED_INST(32x32b, 128, F32);
3050 case TCGEN05_LD_RED_INTR(16x32bx2, 2, f32):
3051 return TCGEN05_LD_RED_INST(16x32bx2, 2, F32);
3052 case TCGEN05_LD_RED_INTR(16x32bx2, 4, f32):
3053 return TCGEN05_LD_RED_INST(16x32bx2, 4, F32);
3054 case TCGEN05_LD_RED_INTR(16x32bx2, 8, f32):
3055 return TCGEN05_LD_RED_INST(16x32bx2, 8, F32);
3056 case TCGEN05_LD_RED_INTR(16x32bx2, 16, f32):
3057 return TCGEN05_LD_RED_INST(16x32bx2, 16, F32);
3058 case TCGEN05_LD_RED_INTR(16x32bx2, 32, f32):
3059 return TCGEN05_LD_RED_INST(16x32bx2, 32, F32);
3060 case TCGEN05_LD_RED_INTR(16x32bx2, 64, f32):
3061 return TCGEN05_LD_RED_INST(16x32bx2, 64, F32);
3062 case TCGEN05_LD_RED_INTR(16x32bx2, 128, f32):
3063 return TCGEN05_LD_RED_INST(16x32bx2, 128, F32);
3064 case TCGEN05_LD_RED_INTR(32x32b, 2, i32):
3065 return TCGEN05_LD_RED_INST(32x32b, 2, I32);
3066 case TCGEN05_LD_RED_INTR(32x32b, 4, i32):
3067 return TCGEN05_LD_RED_INST(32x32b, 4, I32);
3068 case TCGEN05_LD_RED_INTR(32x32b, 8, i32):
3069 return TCGEN05_LD_RED_INST(32x32b, 8, I32);
3070 case TCGEN05_LD_RED_INTR(32x32b, 16, i32):
3071 return TCGEN05_LD_RED_INST(32x32b, 16, I32);
3072 case TCGEN05_LD_RED_INTR(32x32b, 32, i32):
3073 return TCGEN05_LD_RED_INST(32x32b, 32, I32);
3074 case TCGEN05_LD_RED_INTR(32x32b, 64, i32):
3075 return TCGEN05_LD_RED_INST(32x32b, 64, I32);
3076 case TCGEN05_LD_RED_INTR(32x32b, 128, i32):
3077 return TCGEN05_LD_RED_INST(32x32b, 128, I32);
3078 case TCGEN05_LD_RED_INTR(16x32bx2, 2, i32):
3079 return TCGEN05_LD_RED_INST(16x32bx2, 2, I32);
3080 case TCGEN05_LD_RED_INTR(16x32bx2, 4, i32):
3081 return TCGEN05_LD_RED_INST(16x32bx2, 4, I32);
3082 case TCGEN05_LD_RED_INTR(16x32bx2, 8, i32):
3083 return TCGEN05_LD_RED_INST(16x32bx2, 8, I32);
3084 case TCGEN05_LD_RED_INTR(16x32bx2, 16, i32):
3085 return TCGEN05_LD_RED_INST(16x32bx2, 16, I32);
3086 case TCGEN05_LD_RED_INTR(16x32bx2, 32, i32):
3087 return TCGEN05_LD_RED_INST(16x32bx2, 32, I32);
3088 case TCGEN05_LD_RED_INTR(16x32bx2, 64, i32):
3089 return TCGEN05_LD_RED_INST(16x32bx2, 64, I32);
3090 case TCGEN05_LD_RED_INTR(16x32bx2, 128, i32):
3091 return TCGEN05_LD_RED_INST(16x32bx2, 128, I32);
3092 default:
3093 llvm_unreachable("Invalid tcgen05.ld.red intrinsic ID");
3094 }
3095}
3096
3097// Lower vector return type of tcgen05.ld intrinsics
3098static std::optional<std::tuple<SDValue, SDValue, SDValue>>
3100 SDLoc DL(N);
3101 EVT ResVT = N->getValueType(0);
3102 if (!ResVT.isVector())
3103 return {}; // already legalized.
3104
3105 const unsigned NumElts = ResVT.getVectorNumElements();
3106
3107 // Create the return type of the instructions
3108 // +1 represents the reduction value
3109 SmallVector<EVT, 132> ListVTs{
3110 NumElts + 1,
3111 ResVT.getVectorElementType().isFloatingPoint() ? MVT::f32 : MVT::i32};
3112
3113 ListVTs.push_back(MVT::Other); // Chain
3114
3115 SDVTList ResVTs = DAG.getVTList(ListVTs);
3116
3117 // Prepare the Operands
3118 SmallVector<SDValue, 8> Ops{N->getOperand(0)}; // Chain
3119
3120 // skip IID at index 1
3121 for (unsigned i = 2; i < N->getNumOperands(); i++)
3122 Ops.push_back(N->getOperand(i));
3123
3124 unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
3126 SDValue NewNode =
3127 DAG.getMemIntrinsicNode(getTcgen05LdRedID(IID), DL, ResVTs, Ops,
3128 MemSD->getMemoryVT(), MemSD->getMemOperand());
3129
3130 // Split vector result
3131 SmallVector<SDValue, 132> ScalarRes;
3132 for (unsigned i = 0; i < NumElts; ++i) {
3133 SDValue Res = NewNode.getValue(i);
3134 ScalarRes.push_back(Res);
3135 }
3136
3137 SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
3138 SDValue RedResult = NewNode.getValue(NumElts);
3139 SDValue Chain = NewNode.getValue(NumElts + 1);
3140 return {{BuildVector, RedResult, Chain}};
3141}
3142
3144 switch (Op->getConstantOperandVal(1)) {
3145 default:
3146 return Op;
3147
3148 // These tcgen05 intrinsics return a v2i32, which is legal, so we have to
3149 // lower them through LowerOperation() instead of ReplaceNodeResults().
3150 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
3151 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
3152 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
3153 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG))
3154 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
3155 return SDValue();
3156
3157 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
3158 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG, /*HasOffset=*/true))
3159 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
3160 return SDValue();
3161
3162 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_f32:
3163 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_i32:
3164 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_f32:
3165 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_i32:
3166 if (auto Res = lowerTcgen05LdRed(Op.getNode(), DAG))
3167 return DAG.getMergeValues(
3168 {std::get<0>(*Res), std::get<1>(*Res), std::get<2>(*Res)}, SDLoc(Op));
3169 return SDValue();
3170 }
3171}
3172
3174 switch (Op->getConstantOperandVal(0)) {
3175 default:
3176 return Op;
3177 case Intrinsic::nvvm_prmt:
3178 case Intrinsic::nvvm_prmt_b4e:
3179 case Intrinsic::nvvm_prmt_ecl:
3180 case Intrinsic::nvvm_prmt_ecr:
3181 case Intrinsic::nvvm_prmt_f4e:
3182 case Intrinsic::nvvm_prmt_rc16:
3183 case Intrinsic::nvvm_prmt_rc8:
3184 return lowerPrmtIntrinsic(Op, DAG);
3185 case Intrinsic::nvvm_internal_addrspace_wrap:
3186 return Op.getOperand(1);
3187 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
3188 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
3189 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
3190 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
3192 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
3193 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
3194 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
3195 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
3196 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
3197 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
3198 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
3199 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
3200 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
3201 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
3202 return lowerCvtRSIntrinsics(Op, DAG);
3203 }
3204}
3205
3206// In PTX 64-bit CTLZ and CTPOP are supported, but they return a 32-bit value.
3207// Lower these into a node returning the correct type which is zero-extended
3208// back to the correct size.
3210 SDValue V = Op->getOperand(0);
3211 assert(V.getValueType() == MVT::i64 &&
3212 "Unexpected CTLZ/CTPOP type to legalize");
3213
3214 SDLoc DL(Op);
3215 SDValue CT = DAG.getNode(Op->getOpcode(), DL, MVT::i32, V);
3216 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, CT, SDNodeFlags::NonNeg);
3217}
3218
3220 unsigned Opcode, SelectionDAG &DAG) {
3221 assert(A.getValueType() == MVT::i64 && B.getValueType() == MVT::i64);
3222
3223 const auto *AmtConst = dyn_cast<ConstantSDNode>(ShiftAmount);
3224 if (!AmtConst)
3225 return SDValue();
3226 const auto Amt = AmtConst->getZExtValue() & 63;
3227
3228 SDValue UnpackA =
3229 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, A);
3230 SDValue UnpackB =
3231 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, B);
3232
3233 // Arch is Little endiain: 0 = low bits, 1 = high bits
3234 SDValue ALo = UnpackA.getValue(0);
3235 SDValue AHi = UnpackA.getValue(1);
3236 SDValue BLo = UnpackB.getValue(0);
3237 SDValue BHi = UnpackB.getValue(1);
3238
3239 // The bitfeild consists of { AHi : ALo : BHi : BLo }
3240 //
3241 // * FSHL, Amt < 32 - The window will contain { AHi : ALo : BHi }
3242 // * FSHL, Amt >= 32 - The window will contain { ALo : BHi : BLo }
3243 // * FSHR, Amt < 32 - The window will contain { ALo : BHi : BLo }
3244 // * FSHR, Amt >= 32 - The window will contain { AHi : ALo : BHi }
3245 //
3246 // Note that Amt = 0 and Amt = 32 are special cases where 32-bit funnel shifts
3247 // are not needed at all. Amt = 0 is a no-op producing either A or B depending
3248 // on the direction. Amt = 32 can be implemented by a packing and unpacking
3249 // move to select and arrange the 32bit values. For simplicity, these cases
3250 // are not handled here explicitly and instead we rely on DAGCombiner to
3251 // remove the no-op funnel shifts we insert.
3252 auto [High, Mid, Low] = ((Opcode == ISD::FSHL) == (Amt < 32))
3253 ? std::make_tuple(AHi, ALo, BHi)
3254 : std::make_tuple(ALo, BHi, BLo);
3255
3256 SDValue NewAmt = DAG.getConstant(Amt & 31, DL, MVT::i32);
3257 SDValue RHi = DAG.getNode(Opcode, DL, MVT::i32, {High, Mid, NewAmt});
3258 SDValue RLo = DAG.getNode(Opcode, DL, MVT::i32, {Mid, Low, NewAmt});
3259
3260 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64, {RLo, RHi});
3261}
3262
3264 return expandFSH64(Op->getOperand(0), Op->getOperand(1), Op->getOperand(2),
3265 SDLoc(Op), Op->getOpcode(), DAG);
3266}
3267
3269 unsigned Opcode = Op->getOpcode() == ISD::ROTL ? ISD::FSHL : ISD::FSHR;
3270 return expandFSH64(Op->getOperand(0), Op->getOperand(0), Op->getOperand(1),
3271 SDLoc(Op), Opcode, DAG);
3272}
3273
3275 // Lower (frem x, y) into (sub x, (mul (ftrunc (div x, y)) y)),
3276 // i.e. "poor man's fmod()". When y is infinite, x is returned. This matches
3277 // the semantics of LLVM's frem.
3278 SDLoc DL(Op);
3279 SDValue X = Op->getOperand(0);
3280 SDValue Y = Op->getOperand(1);
3281 EVT Ty = Op.getValueType();
3282 SDNodeFlags Flags = Op->getFlags();
3283
3284 SDValue Div = DAG.getNode(ISD::FDIV, DL, Ty, X, Y, Flags);
3285 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, Ty, Div, Flags);
3286 SDValue Mul = DAG.getNode(ISD::FMUL, DL, Ty, Trunc, Y,
3288 SDValue Sub = DAG.getNode(ISD::FSUB, DL, Ty, X, Mul,
3290
3291 if (Flags.hasNoInfs())
3292 return Sub;
3293
3294 // If Y is infinite, return X
3295 SDValue AbsY = DAG.getNode(ISD::FABS, DL, Ty, Y);
3296 SDValue Inf =
3297 DAG.getConstantFP(APFloat::getInf(Ty.getFltSemantics()), DL, Ty);
3298 SDValue IsInf = DAG.getSetCC(DL, MVT::i1, AbsY, Inf, ISD::SETEQ);
3299 return DAG.getSelect(DL, Ty, IsInf, X, Sub);
3300}
3301
3303 assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
3304
3305 SDValue Cond = Op->getOperand(0);
3306 SDValue TrueVal = Op->getOperand(1);
3307 SDValue FalseVal = Op->getOperand(2);
3308 SDLoc DL(Op);
3309
3310 // If both operands are truncated, we push the select through the truncates.
3311 if (TrueVal.getOpcode() == ISD::TRUNCATE &&
3312 FalseVal.getOpcode() == ISD::TRUNCATE) {
3313 TrueVal = TrueVal.getOperand(0);
3314 FalseVal = FalseVal.getOperand(0);
3315
3316 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())
3317 ? TrueVal.getValueType()
3318 : FalseVal.getValueType();
3319 TrueVal = DAG.getAnyExtOrTrunc(TrueVal, DL, VT);
3320 FalseVal = DAG.getAnyExtOrTrunc(FalseVal, DL, VT);
3321 SDValue Select = DAG.getSelect(DL, VT, Cond, TrueVal, FalseVal);
3322 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
3323 }
3324
3325 // Otherwise, expand the select into a series of logical operations. These
3326 // often can be folded into other operations either by us or ptxas.
3327 TrueVal = DAG.getFreeze(TrueVal);
3328 FalseVal = DAG.getFreeze(FalseVal);
3329 SDValue And1 = DAG.getNode(ISD::AND, DL, MVT::i1, Cond, TrueVal);
3330 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
3331 SDValue And2 = DAG.getNode(ISD::AND, DL, MVT::i1, NotCond, FalseVal);
3332 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i1, And1, And2);
3333 return Or;
3334}
3335
3337 SDNode *N = Op.getNode();
3338
3339 SDValue Chain = N->getOperand(0);
3340 SDValue Val = N->getOperand(1);
3341 SDValue BasePtr = N->getOperand(2);
3342 SDValue Offset = N->getOperand(3);
3343 SDValue Mask = N->getOperand(4);
3344
3345 SDLoc DL(N);
3346 EVT ValVT = Val.getValueType();
3347 MemSDNode *MemSD = cast<MemSDNode>(N);
3348 assert(ValVT.isVector() && "Masked vector store must have vector type");
3349 assert(MemSD->getAlign() >= DAG.getEVTAlign(ValVT) &&
3350 "Unexpected alignment for masked store");
3351
3352 unsigned Opcode = 0;
3353 switch (ValVT.getSimpleVT().SimpleTy) {
3354 default:
3355 llvm_unreachable("Unexpected masked vector store type");
3356 case MVT::v4i64:
3357 case MVT::v4f64: {
3358 Opcode = NVPTXISD::StoreV4;
3359 break;
3360 }
3361 case MVT::v8i32:
3362 case MVT::v8f32: {
3363 Opcode = NVPTXISD::StoreV8;
3364 break;
3365 }
3366 }
3367
3369
3370 // Construct the new SDNode. First operand is the chain.
3371 Ops.push_back(Chain);
3372
3373 // The next N operands are the values to store. Encode the mask into the
3374 // values using the sentinel register 0 to represent a masked-off element.
3375 assert(Mask.getValueType().isVector() &&
3376 Mask.getValueType().getVectorElementType() == MVT::i1 &&
3377 "Mask must be a vector of i1");
3378 assert(Mask.getOpcode() == ISD::BUILD_VECTOR &&
3379 "Mask expected to be a BUILD_VECTOR");
3380 assert(Mask.getValueType().getVectorNumElements() ==
3381 ValVT.getVectorNumElements() &&
3382 "Mask size must be the same as the vector size");
3383 for (auto [I, Op] : enumerate(Mask->ops())) {
3384 // Mask elements must be constants.
3385 if (Op.getNode()->getAsZExtVal() == 0) {
3386 // Append a sentinel register 0 to the Ops vector to represent a masked
3387 // off element, this will be handled in tablegen
3389 ValVT.getVectorElementType()));
3390 } else {
3391 // Extract the element from the vector to store
3392 SDValue ExtVal =
3394 Val, DAG.getIntPtrConstant(I, DL));
3395 Ops.push_back(ExtVal);
3396 }
3397 }
3398
3399 // Next, the pointer operand.
3400 Ops.push_back(BasePtr);
3401
3402 // Finally, the offset operand. We expect this to always be undef, and it will
3403 // be ignored in lowering, but to mirror the handling of the other vector
3404 // store instructions we include it in the new SDNode.
3405 assert(Offset.getOpcode() == ISD::UNDEF &&
3406 "Offset operand expected to be undef");
3407 Ops.push_back(Offset);
3408
3409 SDValue NewSt =
3410 DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
3411 MemSD->getMemoryVT(), MemSD->getMemOperand());
3412
3413 return NewSt;
3414}
3415
3416SDValue
3418 switch (Op.getOpcode()) {
3419 case ISD::RETURNADDR:
3420 return SDValue();
3421 case ISD::FRAMEADDR:
3422 return SDValue();
3423 case ISD::ADDRSPACECAST:
3424 return LowerADDRSPACECAST(Op, DAG);
3426 return lowerIntrinsicWChain(Op, DAG);
3428 return lowerIntrinsicWOChain(Op, DAG);
3430 return lowerIntrinsicVoid(Op, DAG);
3431 case ISD::BUILD_VECTOR:
3432 return LowerBUILD_VECTOR(Op, DAG);
3433 case ISD::BITCAST:
3434 return LowerBITCAST(Op, DAG);
3436 return Op;
3438 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3440 return LowerINSERT_VECTOR_ELT(Op, DAG);
3442 return LowerVECTOR_SHUFFLE(Op, DAG);
3444 return LowerCONCAT_VECTORS(Op, DAG);
3449 return LowerVECREDUCE(Op, DAG);
3450 case ISD::STORE:
3451 return LowerSTORE(Op, DAG);
3452 case ISD::MSTORE: {
3453 assert(STI.has256BitVectorLoadStore(
3454 cast<MemSDNode>(Op.getNode())->getAddressSpace()) &&
3455 "Masked store vector not supported on subtarget.");
3456 return lowerMSTORE(Op, DAG);
3457 }
3458 case ISD::LOAD:
3459 return LowerLOAD(Op, DAG);
3460 case ISD::MLOAD:
3461 return LowerMLOAD(Op, DAG);
3462 case ISD::SHL_PARTS:
3463 return LowerShiftLeftParts(Op, DAG);
3464 case ISD::SRA_PARTS:
3465 case ISD::SRL_PARTS:
3466 return LowerShiftRightParts(Op, DAG);
3467 case ISD::SELECT:
3468 return lowerSELECT(Op, DAG);
3469 case ISD::FROUND:
3470 return LowerFROUND(Op, DAG);
3471 case ISD::FCOPYSIGN:
3472 return LowerFCOPYSIGN(Op, DAG);
3473 case ISD::SINT_TO_FP:
3474 case ISD::UINT_TO_FP:
3475 return LowerINT_TO_FP(Op, DAG);
3476 case ISD::FP_TO_SINT:
3477 case ISD::FP_TO_UINT:
3478 return LowerFP_TO_INT(Op, DAG);
3479 case ISD::FP_ROUND:
3480 return LowerFP_ROUND(Op, DAG);
3481 case ISD::FP_EXTEND:
3482 return LowerFP_EXTEND(Op, DAG);
3483 case ISD::VAARG:
3484 return LowerVAARG(Op, DAG);
3485 case ISD::VASTART:
3486 return LowerVASTART(Op, DAG);
3487 case ISD::FSHL:
3488 case ISD::FSHR:
3489 return lowerFSH(Op, DAG);
3490 case ISD::ROTL:
3491 case ISD::ROTR:
3492 return lowerROT(Op, DAG);
3493 case ISD::ABS:
3494 case ISD::SMIN:
3495 case ISD::SMAX:
3496 case ISD::UMIN:
3497 case ISD::UMAX:
3498 case ISD::ADD:
3499 case ISD::SUB:
3500 case ISD::MUL:
3501 case ISD::SHL:
3502 case ISD::SREM:
3503 case ISD::UREM:
3504 return LowerVectorArith(Op, DAG);
3506 return LowerDYNAMIC_STACKALLOC(Op, DAG);
3507 case ISD::STACKRESTORE:
3508 return LowerSTACKRESTORE(Op, DAG);
3509 case ISD::STACKSAVE:
3510 return LowerSTACKSAVE(Op, DAG);
3511 case ISD::CopyToReg:
3512 return LowerCopyToReg_128(Op, DAG);
3513 case ISD::FADD:
3514 case ISD::FSUB:
3515 case ISD::FMUL:
3516 // Used only for bf16 on SM80, where we select fma for non-ftz operation
3517 return PromoteBinOpIfF32FTZ(Op, DAG);
3518 case ISD::CTPOP:
3519 case ISD::CTLZ:
3520 return lowerCTLZCTPOP(Op, DAG);
3521 case ISD::FREM:
3522 return lowerFREM(Op, DAG);
3523 case ISD::BSWAP:
3524 return lowerBSWAP(Op, DAG);
3525 default:
3526 llvm_unreachable("Custom lowering not defined for operation");
3527 }
3528}
3529
3530// This will prevent AsmPrinter from trying to print the jump tables itself.
3534
3535SDValue NVPTXTargetLowering::LowerADDRSPACECAST(SDValue Op,
3536 SelectionDAG &DAG) const {
3538 unsigned SrcAS = N->getSrcAddressSpace();
3539 unsigned DestAS = N->getDestAddressSpace();
3540 if (SrcAS != llvm::ADDRESS_SPACE_GENERIC &&
3541 DestAS != llvm::ADDRESS_SPACE_GENERIC) {
3542 // Shared and SharedCluster can be converted to each other through generic
3543 // space
3544 if ((SrcAS == llvm::ADDRESS_SPACE_SHARED &&
3547 DestAS == llvm::ADDRESS_SPACE_SHARED)) {
3548 SDLoc DL(Op.getNode());
3549 const MVT GenerictVT =
3551 SDValue GenericConversion = DAG.getAddrSpaceCast(
3552 DL, GenerictVT, Op.getOperand(0), SrcAS, ADDRESS_SPACE_GENERIC);
3553 SDValue SharedClusterConversion =
3554 DAG.getAddrSpaceCast(DL, Op.getValueType(), GenericConversion,
3555 ADDRESS_SPACE_GENERIC, DestAS);
3556 return SharedClusterConversion;
3557 }
3558
3559 return DAG.getUNDEF(Op.getValueType());
3560 }
3561
3562 return Op;
3563}
3564
3565// This function is almost a copy of SelectionDAG::expandVAArg().
3566// The only diff is that this one produces loads from local address space.
3567SDValue NVPTXTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3568 const TargetLowering *TLI = STI.getTargetLowering();
3569 SDLoc DL(Op);
3570
3571 SDNode *Node = Op.getNode();
3572 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3573 EVT VT = Node->getValueType(0);
3574 auto *Ty = VT.getTypeForEVT(*DAG.getContext());
3575 SDValue Tmp1 = Node->getOperand(0);
3576 SDValue Tmp2 = Node->getOperand(1);
3577 const MaybeAlign MA(Node->getConstantOperandVal(3));
3578
3579 SDValue VAListLoad = DAG.getLoad(TLI->getPointerTy(DAG.getDataLayout()), DL,
3580 Tmp1, Tmp2, MachinePointerInfo(V));
3581 SDValue VAList = VAListLoad;
3582
3583 if (MA && *MA > TLI->getMinStackArgumentAlignment()) {
3584 VAList = DAG.getNode(
3585 ISD::ADD, DL, VAList.getValueType(), VAList,
3586 DAG.getConstant(MA->value() - 1, DL, VAList.getValueType()));
3587
3588 VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
3589 DAG.getSignedConstant(-(int64_t)MA->value(), DL,
3590 VAList.getValueType()));
3591 }
3592
3593 // Increment the pointer, VAList, to the next vaarg
3594 Tmp1 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
3596 DL, VAList.getValueType()));
3597
3598 // Store the incremented VAList to the legalized pointer
3599 Tmp1 = DAG.getStore(VAListLoad.getValue(1), DL, Tmp1, Tmp2,
3600 MachinePointerInfo(V));
3601
3602 const Value *SrcV = Constant::getNullValue(
3604
3605 // Load the actual argument out of the pointer VAList
3606 return DAG.getLoad(VT, DL, Tmp1, VAList, MachinePointerInfo(SrcV));
3607}
3608
3609SDValue NVPTXTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3610 const TargetLowering *TLI = STI.getTargetLowering();
3611 SDLoc DL(Op);
3612 EVT PtrVT = TLI->getPointerTy(DAG.getDataLayout());
3613
3614 // Store the address of unsized array <function>_vararg[] in the ap object.
3615 SDValue VAReg = getParamSymbol(DAG, /* vararg */ -1, PtrVT);
3616
3617 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3618 return DAG.getStore(Op.getOperand(0), DL, VAReg, Op.getOperand(1),
3619 MachinePointerInfo(SV));
3620}
3621
3622static std::pair<MemSDNode *, uint32_t>
3624 const NVPTXSubtarget &STI) {
3625 SDValue Chain = N->getOperand(0);
3626 SDValue BasePtr = N->getOperand(1);
3627 SDValue Mask = N->getOperand(3);
3628 [[maybe_unused]] SDValue Passthru = N->getOperand(4);
3629
3630 SDLoc DL(N);
3631 EVT ResVT = N->getValueType(0);
3632 assert(ResVT.isVector() && "Masked vector load must have vector type");
3633 // While we only expect poison passthru vectors as an input to the backend,
3634 // when the legalization framework splits a poison vector in half, it creates
3635 // two undef vectors, so we can technically expect those too.
3636 assert((Passthru.getOpcode() == ISD::POISON ||
3637 Passthru.getOpcode() == ISD::UNDEF) &&
3638 "Passthru operand expected to be poison or undef");
3639
3640 // Extract the mask and convert it to a uint32_t representing the used bytes
3641 // of the entire vector load
3642 uint32_t UsedBytesMask = 0;
3643 uint32_t ElementSizeInBits = ResVT.getVectorElementType().getSizeInBits();
3644 assert(ElementSizeInBits % 8 == 0 && "Unexpected element size");
3645 uint32_t ElementSizeInBytes = ElementSizeInBits / 8;
3646 uint32_t ElementMask = (1u << ElementSizeInBytes) - 1u;
3647
3648 for (SDValue Op : reverse(Mask->ops())) {
3649 // We technically only want to do this shift for every
3650 // iteration *but* the first, but in the first iteration UsedBytesMask is 0,
3651 // so this shift is a no-op.
3652 UsedBytesMask <<= ElementSizeInBytes;
3653
3654 // Mask elements must be constants.
3655 if (Op->getAsZExtVal() != 0)
3656 UsedBytesMask |= ElementMask;
3657 }
3658
3659 assert(UsedBytesMask != 0 && UsedBytesMask != UINT32_MAX &&
3660 "Unexpected masked load with elements masked all on or all off");
3661
3662 // Create a new load sd node to be handled normally by ReplaceLoadVector.
3663 MemSDNode *NewLD = cast<MemSDNode>(
3664 DAG.getLoad(ResVT, DL, Chain, BasePtr, N->getMemOperand()).getNode());
3665
3666 // If our subtarget does not support the used bytes mask pragma, "drop" the
3667 // mask by setting it to UINT32_MAX
3668 if (!STI.hasUsedBytesMaskPragma())
3669 UsedBytesMask = UINT32_MAX;
3670
3671 return {NewLD, UsedBytesMask};
3672}
3673
3674/// replaceLoadVector - Convert vector loads into multi-output scalar loads.
3675static std::optional<std::pair<SDValue, SDValue>>
3678 const EVT ResVT = LD->getValueType(0);
3679 const EVT MemVT = LD->getMemoryVT();
3680
3681 // If we're doing sign/zero extension as part of the load, avoid lowering to
3682 // a LoadV node. TODO: consider relaxing this restriction.
3683 if (ResVT != MemVT)
3684 return std::nullopt;
3685
3686 const auto NumEltsAndEltVT =
3687 getVectorLoweringShape(ResVT, STI, LD->getAddressSpace());
3688 if (!NumEltsAndEltVT)
3689 return std::nullopt;
3690 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3691
3692 Align Alignment = LD->getAlign();
3693 const auto &TD = DAG.getDataLayout();
3694 Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext()));
3695 if (Alignment < PrefAlign) {
3696 // This load is not sufficiently aligned, so bail out and let this vector
3697 // load be scalarized. Note that we may still be able to emit smaller
3698 // vector loads. For example, if we are loading a <4 x float> with an
3699 // alignment of 8, this check will fail but the legalizer will try again
3700 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3701 return std::nullopt;
3702 }
3703
3704 // If we have a masked load, convert it to a normal load now
3705 std::optional<uint32_t> UsedBytesMask = std::nullopt;
3706 if (LD->getOpcode() == ISD::MLOAD)
3707 std::tie(LD, UsedBytesMask) =
3709
3710 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
3711 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3712 // loaded type to i16 and propagate the "real" type as the memory type.
3713 const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT;
3714
3715 unsigned Opcode;
3716 switch (NumElts) {
3717 default:
3718 return std::nullopt;
3719 case 2:
3720 Opcode = NVPTXISD::LoadV2;
3721 break;
3722 case 4:
3723 Opcode = NVPTXISD::LoadV4;
3724 break;
3725 case 8:
3726 Opcode = NVPTXISD::LoadV8;
3727 break;
3728 }
3729 auto ListVTs = SmallVector<EVT, 9>(NumElts, LoadEltVT);
3730 ListVTs.push_back(MVT::Other);
3731 SDVTList LdResVTs = DAG.getVTList(ListVTs);
3732
3733 SDLoc DL(LD);
3734
3735 // Copy regular operands
3736 SmallVector<SDValue, 8> OtherOps(LD->ops());
3737
3738 OtherOps.push_back(
3739 DAG.getConstant(UsedBytesMask.value_or(UINT32_MAX), DL, MVT::i32));
3740
3741 // The select routine does not have access to the LoadSDNode instance, so
3742 // pass along the extension information
3743 OtherOps.push_back(
3744 DAG.getIntPtrConstant(cast<LoadSDNode>(LD)->getExtensionType(), DL));
3745
3746 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps, MemVT,
3747 LD->getMemOperand());
3748
3749 SmallVector<SDValue> ScalarRes;
3750 if (EltVT.isVector()) {
3752 assert(NumElts * EltVT.getVectorNumElements() ==
3753 ResVT.getVectorNumElements());
3754 // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back
3755 // into individual elements.
3756 for (const unsigned I : llvm::seq(NumElts)) {
3757 SDValue SubVector = NewLD.getValue(I);
3758 DAG.ExtractVectorElements(SubVector, ScalarRes);
3759 }
3760 } else {
3761 for (const unsigned I : llvm::seq(NumElts)) {
3762 SDValue Res = NewLD.getValue(I);
3763 if (LoadEltVT != EltVT)
3764 Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
3765 ScalarRes.push_back(Res);
3766 }
3767 }
3768
3769 SDValue LoadChain = NewLD.getValue(NumElts);
3770
3771 const MVT BuildVecVT =
3772 MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size());
3773 SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes);
3774 SDValue LoadValue = DAG.getBitcast(ResVT, BuildVec);
3775
3776 return {{LoadValue, LoadChain}};
3777}
3778
3781 const NVPTXSubtarget &STI) {
3782 if (auto Res = replaceLoadVector(N, DAG, STI))
3783 Results.append({Res->first, Res->second});
3784}
3785
3787 const NVPTXSubtarget &STI) {
3788 if (auto Res = replaceLoadVector(N, DAG, STI))
3789 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(N));
3790 return SDValue();
3791}
3792
3793// v = ld i1* addr
3794// =>
3795// v1 = ld i8* addr (-> i16)
3796// v = trunc i16 to i1
3798 SDLoc dl(LD);
3799 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
3800 assert(LD->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only");
3801 SDValue newLD = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i16, LD->getChain(),
3802 LD->getBasePtr(), LD->getPointerInfo(),
3803 MVT::i8, LD->getAlign(),
3804 LD->getMemOperand()->getFlags());
3805 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
3806 // The legalizer (the caller) is expecting two values from the legalized
3807 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
3808 // in LegalizeDAG.cpp which also uses MergeValues.
3809 return DAG.getMergeValues({result, LD->getChain()}, dl);
3810}
3811
3812SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
3813 LoadSDNode *LD = cast<LoadSDNode>(Op);
3814
3815 if (Op.getValueType() == MVT::i1)
3816 return lowerLOADi1(LD, DAG);
3817
3818 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
3819 // how they'll be lowered in ISel anyway, and by doing this a little earlier
3820 // we allow for more DAG combine opportunities.
3821 if (LD->getExtensionType() == ISD::EXTLOAD) {
3822 assert(LD->getValueType(0).isInteger() && LD->getMemoryVT().isInteger() &&
3823 "Unexpected fpext-load");
3824 return DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(Op), Op.getValueType(),
3825 LD->getChain(), LD->getBasePtr(), LD->getMemoryVT(),
3826 LD->getMemOperand());
3827 }
3828
3829 llvm_unreachable("Unexpected custom lowering for load");
3830}
3831
3832SDValue NVPTXTargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
3833 // v2f16/v2bf16/v2i16/v4i8 are legal, so we can't rely on legalizer to handle
3834 // masked loads of these types and have to handle them here.
3835 // v2f32 also needs to be handled here if the subtarget has f32x2
3836 // instructions, making it legal.
3837 //
3838 // Note: misaligned masked loads should never reach this point
3839 // because the override of isLegalMaskedLoad in NVPTXTargetTransformInfo.cpp
3840 // will validate alignment. Therefore, we do not need to special case handle
3841 // them here.
3842 EVT VT = Op.getValueType();
3843 if (NVPTX::isPackedVectorTy(VT)) {
3845 cast<MemSDNode>(Op.getNode()), DAG, STI);
3846 MemSDNode *LD = std::get<0>(Result);
3847 uint32_t UsedBytesMask = std::get<1>(Result);
3848
3849 SDLoc DL(LD);
3850
3851 // Copy regular operands
3852 SmallVector<SDValue, 8> OtherOps(LD->ops());
3853
3854 OtherOps.push_back(DAG.getConstant(UsedBytesMask, DL, MVT::i32));
3855
3856 // We currently are not lowering extending loads, but pass the extension
3857 // type anyway as later handling expects it.
3858 OtherOps.push_back(
3859 DAG.getIntPtrConstant(cast<LoadSDNode>(LD)->getExtensionType(), DL));
3860 SDValue NewLD =
3861 DAG.getMemIntrinsicNode(NVPTXISD::MLoad, DL, LD->getVTList(), OtherOps,
3862 LD->getMemoryVT(), LD->getMemOperand());
3863 return NewLD;
3864 }
3865 return SDValue();
3866}
3867
3869 const NVPTXSubtarget &STI) {
3870 MemSDNode *N = cast<MemSDNode>(Op.getNode());
3871 SDValue Val = N->getOperand(1);
3872 SDLoc DL(N);
3873 const EVT ValVT = Val.getValueType();
3874 const EVT MemVT = N->getMemoryVT();
3875
3876 // If we're truncating as part of the store, avoid lowering to a StoreV node.
3877 // TODO: consider relaxing this restriction.
3878 if (ValVT != MemVT)
3879 return SDValue();
3880
3881 const auto NumEltsAndEltVT =
3882 getVectorLoweringShape(ValVT, STI, N->getAddressSpace());
3883 if (!NumEltsAndEltVT)
3884 return SDValue();
3885 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3886
3887 const DataLayout &TD = DAG.getDataLayout();
3888
3889 Align Alignment = N->getAlign();
3890 Align PrefAlign = TD.getPrefTypeAlign(ValVT.getTypeForEVT(*DAG.getContext()));
3891 if (Alignment < PrefAlign) {
3892 // This store is not sufficiently aligned, so bail out and let this vector
3893 // store be scalarized. Note that we may still be able to emit smaller
3894 // vector stores. For example, if we are storing a <4 x float> with an
3895 // alignment of 8, this check will fail but the legalizer will try again
3896 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3897 return SDValue();
3898 }
3899
3900 unsigned Opcode;
3901 switch (NumElts) {
3902 default:
3903 return SDValue();
3904 case 2:
3905 Opcode = NVPTXISD::StoreV2;
3906 break;
3907 case 4:
3908 Opcode = NVPTXISD::StoreV4;
3909 break;
3910 case 8:
3911 Opcode = NVPTXISD::StoreV8;
3912 break;
3913 }
3914
3916
3917 // First is the chain
3918 Ops.push_back(N->getOperand(0));
3919
3920 // Then the split values
3921 if (EltVT.isVector()) {
3923 assert(NumElts * EltVT.getVectorNumElements() ==
3924 ValVT.getVectorNumElements());
3925 // Combine individual elements into v2[i,f,bf]16/v4i8 subvectors to be
3926 // stored as b32s
3927 const unsigned NumEltsPerSubVector = EltVT.getVectorNumElements();
3928 for (const unsigned I : llvm::seq(NumElts)) {
3929 SmallVector<SDValue, 4> SubVectorElts;
3930 DAG.ExtractVectorElements(Val, SubVectorElts, I * NumEltsPerSubVector,
3931 NumEltsPerSubVector);
3932 Ops.push_back(DAG.getBuildVector(EltVT, DL, SubVectorElts));
3933 }
3934 } else {
3935 SDValue V = DAG.getBitcast(MVT::getVectorVT(EltVT, NumElts), Val);
3936 for (const unsigned I : llvm::seq(NumElts)) {
3937 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, V,
3938 DAG.getIntPtrConstant(I, DL));
3939
3940 // Since StoreV2 is a target node, we cannot rely on DAG type
3941 // legalization. Therefore, we must ensure the type is legal. For i1 and
3942 // i8, we set the stored type to i16 and propagate the "real" type as the
3943 // memory type.
3944 if (EltVT.getSizeInBits() < 16)
3945 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
3946 Ops.push_back(ExtVal);
3947 }
3948 }
3949
3950 // Then any remaining arguments
3951 Ops.append(N->op_begin() + 2, N->op_end());
3952
3953 SDValue NewSt =
3954 DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
3955 N->getMemoryVT(), N->getMemOperand());
3956
3957 // return DCI.CombineTo(N, NewSt, true);
3958 return NewSt;
3959}
3960
3961SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
3962 StoreSDNode *Store = cast<StoreSDNode>(Op);
3963 EVT VT = Store->getMemoryVT();
3964
3965 if (VT == MVT::i1)
3966 return LowerSTOREi1(Op, DAG);
3967
3968 // Lower store of any other vector type, including v2f32 as we want to break
3969 // it apart since this is not a widely-supported type.
3970 return lowerSTOREVector(Op, DAG, STI);
3971}
3972
3973// st i1 v, addr
3974// =>
3975// v1 = zxt v to i16
3976// st.u8 i16, addr
3977SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
3978 SDNode *Node = Op.getNode();
3979 SDLoc dl(Node);
3980 StoreSDNode *ST = cast<StoreSDNode>(Node);
3981 SDValue Tmp1 = ST->getChain();
3982 SDValue Tmp2 = ST->getBasePtr();
3983 SDValue Tmp3 = ST->getValue();
3984 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
3985 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
3986 SDValue Result =
3987 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
3988 ST->getAlign(), ST->getMemOperand()->getFlags());
3989 return Result;
3990}
3991
3992SDValue NVPTXTargetLowering::LowerCopyToReg_128(SDValue Op,
3993 SelectionDAG &DAG) const {
3994 // Change the CopyToReg to take in two 64-bit operands instead of a 128-bit
3995 // operand so that it can pass the legalization.
3996
3997 assert(Op.getOperand(1).getValueType() == MVT::i128 &&
3998 "Custom lowering for 128-bit CopyToReg only");
3999
4000 SDNode *Node = Op.getNode();
4001 SDLoc DL(Node);
4002
4003 SDValue Cast = DAG.getBitcast(MVT::v2i64, Op->getOperand(2));
4004 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
4005 DAG.getIntPtrConstant(0, DL));
4006 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
4007 DAG.getIntPtrConstant(1, DL));
4008
4010 SmallVector<EVT, 3> ResultsType(Node->values());
4011
4012 NewOps[0] = Op->getOperand(0); // Chain
4013 NewOps[1] = Op->getOperand(1); // Dst Reg
4014 NewOps[2] = Lo; // Lower 64-bit
4015 NewOps[3] = Hi; // Higher 64-bit
4016 if (Op.getNumOperands() == 4)
4017 NewOps[4] = Op->getOperand(3); // Glue if exists
4018
4019 return DAG.getNode(ISD::CopyToReg, DL, ResultsType, NewOps);
4020}
4021
4022unsigned NVPTXTargetLowering::getNumRegisters(
4023 LLVMContext &Context, EVT VT,
4024 std::optional<MVT> RegisterVT = std::nullopt) const {
4025 if (VT == MVT::i128 && RegisterVT == MVT::i128)
4026 return 1;
4027 return TargetLoweringBase::getNumRegisters(Context, VT, RegisterVT);
4028}
4029
4030bool NVPTXTargetLowering::splitValueIntoRegisterParts(
4031 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4032 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
4033 if (Val.getValueType() == MVT::i128 && NumParts == 1) {
4034 Parts[0] = Val;
4035 return true;
4036 }
4037 return false;
4038}
4039
4040// This creates target external symbol for a function parameter.
4041// Name of the symbol is composed from its index and the function name.
4042// Negative index corresponds to special parameter (unsized array) used for
4043// passing variable arguments.
4044SDValue NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int I,
4045 EVT T) const {
4046 StringRef SavedStr = nvTM->getStrPool().save(
4048 return DAG.getExternalSymbol(SavedStr.data(), T);
4049}
4050
4051SDValue NVPTXTargetLowering::getCallParamSymbol(SelectionDAG &DAG, int I,
4052 EVT T) const {
4053 const StringRef SavedStr = nvTM->getStrPool().save("param" + Twine(I));
4054 return DAG.getExternalSymbol(SavedStr.data(), T);
4055}
4056
4058 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4059 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4060 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4061 const DataLayout &DL = DAG.getDataLayout();
4062 LLVMContext &Ctx = *DAG.getContext();
4063 auto PtrVT = getPointerTy(DAG.getDataLayout());
4064
4065 const Function &F = DAG.getMachineFunction().getFunction();
4066 const bool IsKernel = isKernelFunction(F);
4067
4068 SDValue Root = DAG.getRoot();
4069 SmallVector<SDValue, 16> OutChains;
4070
4071 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
4072 // Ins.size() will be larger
4073 // * if there is an aggregate argument with multiple fields (each field
4074 // showing up separately in Ins)
4075 // * if there is a vector argument with more than typical vector-length
4076 // elements (generally if more than 4) where each vector element is
4077 // individually present in Ins.
4078 // So a different index should be used for indexing into Ins.
4079 // See similar issue in LowerCall.
4080
4081 auto AllIns = ArrayRef(Ins);
4082 for (const auto &Arg : F.args()) {
4083 const auto ArgIns = AllIns.take_while(
4084 [&](auto I) { return I.OrigArgIndex == Arg.getArgNo(); });
4085 AllIns = AllIns.drop_front(ArgIns.size());
4086
4087 Type *Ty = Arg.getType();
4088
4089 if (ArgIns.empty())
4090 report_fatal_error("Empty parameter types are not supported");
4091
4092 if (Arg.use_empty()) {
4093 // argument is dead
4094 for (const auto &In : ArgIns) {
4095 assert(!In.Used && "Arg.use_empty() is true but Arg is used?");
4096 InVals.push_back(DAG.getUNDEF(In.VT));
4097 }
4098 continue;
4099 }
4100
4101 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);
4102
4103 // In the following cases, assign a node order of "i+1"
4104 // to newly created nodes. The SDNodes for params have to
4105 // appear in the same order as their order of appearance
4106 // in the original function. "i+1" holds that order.
4107 if (Arg.hasByValAttr()) {
4108 // Param has ByVal attribute
4109 // Return MoveParam(param symbol).
4110 // Ideally, the param symbol can be returned directly,
4111 // but when SDNode builder decides to use it in a CopyToReg(),
4112 // machine instruction fails because TargetExternalSymbol
4113 // (not lowered) is target dependent, and CopyToReg assumes
4114 // the source is lowered.
4115 assert(ArgIns.size() == 1 && "ByVal argument must be a pointer");
4116 const auto &ByvalIn = ArgIns[0];
4117 assert(getValueType(DL, Ty) == ByvalIn.VT &&
4118 "Ins type did not match function type");
4119 assert(ByvalIn.VT == PtrVT && "ByVal argument must be a pointer");
4120
4121 SDValue P;
4122 if (IsKernel) {
4123 assert(isParamGridConstant(Arg) && "ByVal argument must be lowered to "
4124 "grid_constant by NVPTXLowerArgs");
4125 P = ArgSymbol;
4126 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4127 } else {
4128 P = DAG.getNode(NVPTXISD::MoveParam, dl, ByvalIn.VT, ArgSymbol);
4129 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4130 P = DAG.getAddrSpaceCast(dl, ByvalIn.VT, P, ADDRESS_SPACE_LOCAL,
4132 }
4133 InVals.push_back(P);
4134 } else {
4137 ComputePTXValueVTs(*this, DL, Ctx, CallConv, Ty, VTs, Offsets);
4138 assert(VTs.size() == ArgIns.size() && "Size mismatch");
4139 assert(VTs.size() == Offsets.size() && "Size mismatch");
4140
4141 const Align ArgAlign = getFunctionArgumentAlignment(
4142 &F, Ty, Arg.getArgNo() + AttributeList::FirstArgIndex, DL);
4143
4144 unsigned I = 0;
4145 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
4146 for (const unsigned NumElts : VI) {
4147 // i1 is loaded/stored as i8
4148 const EVT LoadVT = VTs[I] == MVT::i1 ? MVT::i8 : VTs[I];
4149 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
4150
4151 SDValue VecAddr = DAG.getObjectPtrOffset(
4152 dl, ArgSymbol, TypeSize::getFixed(Offsets[I]));
4153
4154 const Align PartAlign = commonAlignment(ArgAlign, Offsets[I]);
4155 const unsigned AS = IsKernel ? NVPTX::AddressSpace::EntryParam
4157 SDValue P = DAG.getLoad(VecVT, dl, Root, VecAddr,
4158 MachinePointerInfo(AS), PartAlign,
4161 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4162 for (const unsigned J : llvm::seq(NumElts)) {
4163 SDValue Elt = getExtractVectorizedValue(P, J, LoadVT, dl, DAG);
4164
4165 Elt = correctParamType(Elt, ArgIns[I + J].VT, ArgIns[I + J].Flags,
4166 DAG, dl);
4167 InVals.push_back(Elt);
4168 }
4169 I += NumElts;
4170 }
4171 }
4172 }
4173
4174 if (!OutChains.empty())
4175 DAG.setRoot(DAG.getTokenFactor(dl, OutChains));
4176
4177 return Chain;
4178}
4179
4180SDValue
4182 bool isVarArg,
4184 const SmallVectorImpl<SDValue> &OutVals,
4185 const SDLoc &dl, SelectionDAG &DAG) const {
4186 const Function &F = DAG.getMachineFunction().getFunction();
4187 Type *RetTy = F.getReturnType();
4188
4189 if (RetTy->isVoidTy()) {
4190 assert(OutVals.empty() && Outs.empty() && "Return value expected for void");
4191 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4192 }
4193
4194 const DataLayout &DL = DAG.getDataLayout();
4195 LLVMContext &Ctx = *DAG.getContext();
4196
4197 const SDValue RetSymbol = DAG.getExternalSymbol("func_retval0", MVT::i32);
4198 const auto RetAlign = getFunctionParamOptimizedAlign(&F, RetTy, DL);
4199
4200 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
4201 // 32-bits are sign extended or zero extended, depending on whether
4202 // they are signed or unsigned types.
4203 const bool ExtendIntegerRetVal =
4204 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
4205
4208 ComputePTXValueVTs(*this, DL, Ctx, CallConv, RetTy, VTs, Offsets);
4209 assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
4210
4211 const auto GetRetVal = [&](unsigned I) -> SDValue {
4212 SDValue RetVal = OutVals[I];
4214 RetVal.getValueType() &&
4215 "OutVal type should always be legal");
4216
4217 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
4218 const EVT StoreVT =
4219 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
4220 return correctParamType(RetVal, StoreVT, Outs[I].Flags, DAG, dl);
4221 };
4222
4223 unsigned I = 0;
4224 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
4225 for (const unsigned NumElts : VI) {
4226 const MaybeAlign CurrentAlign = ExtendIntegerRetVal
4227 ? MaybeAlign(std::nullopt)
4228 : commonAlignment(RetAlign, Offsets[I]);
4229
4231 NumElts, dl, DAG, [&](unsigned K) { return GetRetVal(I + K); });
4232
4233 SDValue Ptr =
4234 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
4235
4236 Chain = DAG.getStore(Chain, dl, Val, Ptr,
4238 CurrentAlign);
4239
4240 I += NumElts;
4241 }
4242
4243 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4244}
4245
4247 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
4248 SelectionDAG &DAG) const {
4249 if (Constraint.size() > 1)
4250 return;
4252}
4253
4254// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
4255// TgtMemIntrinsic
4256// because we need the information that is only available in the "Value" type
4257// of destination
4258// pointer. In particular, the address space information.
4261 MachineFunction &MF, unsigned Intrinsic) const {
4262 IntrinsicInfo Info;
4263 switch (Intrinsic) {
4264 default:
4265 return;
4266 case Intrinsic::nvvm_match_all_sync_i32p:
4267 case Intrinsic::nvvm_match_all_sync_i64p:
4268 Info.opc = ISD::INTRINSIC_W_CHAIN;
4269 // memVT is bogus. These intrinsics have IntrInaccessibleMemOnly attribute
4270 // in order to model data exchange with other threads, but perform no real
4271 // memory accesses.
4272 Info.memVT = MVT::i1;
4273
4274 // Our result depends on both our and other thread's arguments.
4276 Infos.push_back(Info);
4277 return;
4278 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
4279 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
4280 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
4281 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
4282 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
4283 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
4284 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
4285 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
4286 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
4287 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
4288 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
4289 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
4290 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
4291 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
4292 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
4293 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
4294 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
4295 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
4296 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
4297 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
4298 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
4299 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
4300 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
4301 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
4302 Info.opc = ISD::INTRINSIC_W_CHAIN;
4303 Info.memVT = MVT::v8f16;
4304 Info.ptrVal = I.getArgOperand(0);
4305 Info.offset = 0;
4306 Info.flags = MachineMemOperand::MOLoad;
4307 Info.align = Align(16);
4308 Infos.push_back(Info);
4309 return;
4310 }
4311 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
4312 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
4313 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
4314 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
4315 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
4316 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
4317 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
4318 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
4319 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
4320 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
4321 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
4322 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
4323 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
4324 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
4325 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
4326 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
4327 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
4328 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
4329 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
4330 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
4331 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
4332 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
4333 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
4334 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
4335 Info.opc = ISD::INTRINSIC_W_CHAIN;
4336 Info.memVT = MVT::v2i32;
4337 Info.ptrVal = I.getArgOperand(0);
4338 Info.offset = 0;
4339 Info.flags = MachineMemOperand::MOLoad;
4340 Info.align = Align(8);
4341 Infos.push_back(Info);
4342 return;
4343 }
4344
4345 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
4346 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
4347 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
4348 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
4349 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
4350 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
4351 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
4352 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
4353 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
4354 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
4355 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
4356 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
4357 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
4358 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
4359 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
4360 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
4361
4362 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
4363 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
4364 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
4365 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
4366 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
4367 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
4368 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
4369 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
4370 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
4371 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
4372 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
4373 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
4374 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
4375 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
4376 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
4377 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
4378 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
4379 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:
4380 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:
4381 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:
4382 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:
4383 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:
4384 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {
4385 Info.opc = ISD::INTRINSIC_W_CHAIN;
4386 Info.memVT = MVT::v4i32;
4387 Info.ptrVal = I.getArgOperand(0);
4388 Info.offset = 0;
4389 Info.flags = MachineMemOperand::MOLoad;
4390 Info.align = Align(16);
4391 Infos.push_back(Info);
4392 return;
4393 }
4394
4395 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
4396 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
4397 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
4398 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
4399 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
4400 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
4401 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
4402 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
4403
4404 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
4405 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
4406 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
4407 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
4408 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
4409 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
4410 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
4411 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
4412 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
4413 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
4414 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
4415 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
4416 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
4417 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
4418 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
4419 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
4420 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
4421 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
4422 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
4423 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
4424 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
4425 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:
4426 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:
4427 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {
4428 Info.opc = ISD::INTRINSIC_W_CHAIN;
4429 Info.memVT = MVT::i32;
4430 Info.ptrVal = I.getArgOperand(0);
4431 Info.offset = 0;
4432 Info.flags = MachineMemOperand::MOLoad;
4433 Info.align = Align(4);
4434 Infos.push_back(Info);
4435 return;
4436 }
4437
4438 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
4439 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
4440 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
4441 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
4442 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
4443 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
4444 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
4445 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
4446 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
4447 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
4448 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
4449 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
4450 Info.opc = ISD::INTRINSIC_W_CHAIN;
4451 Info.memVT = MVT::v4f16;
4452 Info.ptrVal = I.getArgOperand(0);
4453 Info.offset = 0;
4454 Info.flags = MachineMemOperand::MOLoad;
4455 Info.align = Align(16);
4456 Infos.push_back(Info);
4457 return;
4458 }
4459
4460 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
4461 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
4462 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
4463 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
4464 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
4465 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
4466 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
4467 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
4468 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
4469 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
4470 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
4471 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
4472 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
4473 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
4474 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
4475 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
4476 Info.opc = ISD::INTRINSIC_W_CHAIN;
4477 Info.memVT = MVT::v8f32;
4478 Info.ptrVal = I.getArgOperand(0);
4479 Info.offset = 0;
4480 Info.flags = MachineMemOperand::MOLoad;
4481 Info.align = Align(16);
4482 Infos.push_back(Info);
4483 return;
4484 }
4485
4486 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
4487 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
4488 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
4489 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
4490
4491 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
4492 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
4493 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
4494 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
4495
4496 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
4497 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
4498 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
4499 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
4500 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
4501 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
4502 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
4503 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
4504 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
4505 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
4506 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
4507 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
4508 Info.opc = ISD::INTRINSIC_W_CHAIN;
4509 Info.memVT = MVT::v8i32;
4510 Info.ptrVal = I.getArgOperand(0);
4511 Info.offset = 0;
4512 Info.flags = MachineMemOperand::MOLoad;
4513 Info.align = Align(16);
4514 Infos.push_back(Info);
4515 return;
4516 }
4517
4518 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
4519 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
4520 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
4521 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
4522 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
4523 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
4524 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
4525 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
4526 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
4527 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:
4528 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:
4529 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:
4530 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:
4531 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:
4532 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {
4533 Info.opc = ISD::INTRINSIC_W_CHAIN;
4534 Info.memVT = MVT::v2i32;
4535 Info.ptrVal = I.getArgOperand(0);
4536 Info.offset = 0;
4537 Info.flags = MachineMemOperand::MOLoad;
4538 Info.align = Align(8);
4539 Infos.push_back(Info);
4540 return;
4541 }
4542
4543 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
4544 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
4545 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
4546 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
4547
4548 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
4549 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
4550 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
4551 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
4552 Info.opc = ISD::INTRINSIC_W_CHAIN;
4553 Info.memVT = MVT::f64;
4554 Info.ptrVal = I.getArgOperand(0);
4555 Info.offset = 0;
4556 Info.flags = MachineMemOperand::MOLoad;
4557 Info.align = Align(8);
4558 Infos.push_back(Info);
4559 return;
4560 }
4561
4562 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
4563 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
4564 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
4565 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
4566 Info.opc = ISD::INTRINSIC_W_CHAIN;
4567 Info.memVT = MVT::v2f64;
4568 Info.ptrVal = I.getArgOperand(0);
4569 Info.offset = 0;
4570 Info.flags = MachineMemOperand::MOLoad;
4571 Info.align = Align(16);
4572 Infos.push_back(Info);
4573 return;
4574 }
4575
4576 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
4577 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
4578 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
4579 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
4580 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
4581 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
4582 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
4583 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
4584 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
4585 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
4586 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
4587 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
4588 Info.opc = ISD::INTRINSIC_VOID;
4589 Info.memVT = MVT::v4f16;
4590 Info.ptrVal = I.getArgOperand(0);
4591 Info.offset = 0;
4592 Info.flags = MachineMemOperand::MOStore;
4593 Info.align = Align(16);
4594 Infos.push_back(Info);
4595 return;
4596 }
4597
4598 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
4599 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
4600 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
4601 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
4602 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
4603 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
4604 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
4605 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
4606 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
4607 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
4608 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
4609 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
4610 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
4611 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
4612 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
4613 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
4614 Info.opc = ISD::INTRINSIC_VOID;
4615 Info.memVT = MVT::v8f32;
4616 Info.ptrVal = I.getArgOperand(0);
4617 Info.offset = 0;
4618 Info.flags = MachineMemOperand::MOStore;
4619 Info.align = Align(16);
4620 Infos.push_back(Info);
4621 return;
4622 }
4623
4624 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
4625 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
4626 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
4627 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
4628 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
4629 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
4630 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
4631 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
4632 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
4633 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
4634 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
4635 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
4636 Info.opc = ISD::INTRINSIC_VOID;
4637 Info.memVT = MVT::v8i32;
4638 Info.ptrVal = I.getArgOperand(0);
4639 Info.offset = 0;
4640 Info.flags = MachineMemOperand::MOStore;
4641 Info.align = Align(16);
4642 Infos.push_back(Info);
4643 return;
4644 }
4645
4646 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4647 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4648 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4649 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4650 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4651 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4652 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4653 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:
4654 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:
4655 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:
4656 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {
4657 Info.opc = ISD::INTRINSIC_VOID;
4658 Info.memVT = MVT::v2i32;
4659 Info.ptrVal = I.getArgOperand(0);
4660 Info.offset = 0;
4661 Info.flags = MachineMemOperand::MOStore;
4662 Info.align = Align(8);
4663 Infos.push_back(Info);
4664 return;
4665 }
4666
4667 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4668 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4669 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4670 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4671 Info.opc = ISD::INTRINSIC_VOID;
4672 Info.memVT = MVT::v2f64;
4673 Info.ptrVal = I.getArgOperand(0);
4674 Info.offset = 0;
4675 Info.flags = MachineMemOperand::MOStore;
4676 Info.align = Align(16);
4677 Infos.push_back(Info);
4678 return;
4679 }
4680
4681 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:
4682 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:
4683 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {
4684 Info.opc = ISD::INTRINSIC_VOID;
4685 Info.memVT = MVT::i32;
4686 Info.ptrVal = I.getArgOperand(0);
4687 Info.offset = 0;
4688 Info.flags = MachineMemOperand::MOStore;
4689 Info.align = Align(4);
4690 Infos.push_back(Info);
4691 return;
4692 }
4693
4694 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:
4695 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:
4696 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {
4697 Info.opc = ISD::INTRINSIC_VOID;
4698 Info.memVT = MVT::v4i32;
4699 Info.ptrVal = I.getArgOperand(0);
4700 Info.offset = 0;
4701 Info.flags = MachineMemOperand::MOStore;
4702 Info.align = Align(16);
4703 Infos.push_back(Info);
4704 return;
4705 }
4706
4707 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4708 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4709 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4710 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4711 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4712 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4713 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4714 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4715 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4716 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4717 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4718 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4719 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4720 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4721 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4722 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4723 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4724 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4725 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4726 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4727 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4728 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4729 auto &DL = I.getDataLayout();
4730 Info.opc = ISD::INTRINSIC_W_CHAIN;
4731 Info.memVT = getValueType(DL, I.getType());
4732 Info.ptrVal = I.getArgOperand(0);
4733 Info.offset = 0;
4735 Info.align.reset();
4736 Infos.push_back(Info);
4737 return;
4738 }
4739
4740 case Intrinsic::nvvm_prefetch_tensormap: {
4741 auto &DL = I.getDataLayout();
4742 Info.opc = ISD::INTRINSIC_VOID;
4743 Info.memVT = getPointerTy(DL);
4744 Info.ptrVal = I.getArgOperand(0);
4745 Info.offset = 0;
4746 Info.flags =
4748 Info.align.reset();
4749 Infos.push_back(Info);
4750 return;
4751 }
4752
4753 case Intrinsic::nvvm_tensormap_replace_global_address:
4754 case Intrinsic::nvvm_tensormap_replace_global_stride: {
4755 Info.opc = ISD::INTRINSIC_VOID;
4756 Info.memVT = MVT::i64;
4757 Info.ptrVal = I.getArgOperand(0);
4758 Info.offset = 0;
4759 Info.flags = MachineMemOperand::MOStore;
4760 Info.align.reset();
4761 Infos.push_back(Info);
4762 return;
4763 }
4764
4765 case Intrinsic::nvvm_tensormap_replace_rank:
4766 case Intrinsic::nvvm_tensormap_replace_box_dim:
4767 case Intrinsic::nvvm_tensormap_replace_global_dim:
4768 case Intrinsic::nvvm_tensormap_replace_element_stride:
4769 case Intrinsic::nvvm_tensormap_replace_elemtype:
4770 case Intrinsic::nvvm_tensormap_replace_interleave_layout:
4771 case Intrinsic::nvvm_tensormap_replace_swizzle_mode:
4772 case Intrinsic::nvvm_tensormap_replace_swizzle_atomicity:
4773 case Intrinsic::nvvm_tensormap_replace_fill_mode: {
4774 Info.opc = ISD::INTRINSIC_VOID;
4775 Info.memVT = MVT::i32;
4776 Info.ptrVal = I.getArgOperand(0);
4777 Info.offset = 0;
4778 Info.flags = MachineMemOperand::MOStore;
4779 Info.align.reset();
4780 Infos.push_back(Info);
4781 return;
4782 }
4783
4784 case Intrinsic::nvvm_ldu_global_i:
4785 case Intrinsic::nvvm_ldu_global_f:
4786 case Intrinsic::nvvm_ldu_global_p: {
4787 Info.opc = ISD::INTRINSIC_W_CHAIN;
4788 Info.memVT = getValueType(I.getDataLayout(), I.getType());
4789 Info.ptrVal = I.getArgOperand(0);
4790 Info.offset = 0;
4791 Info.flags = MachineMemOperand::MOLoad;
4792 Info.align = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4793
4794 Infos.push_back(Info);
4795 return;
4796 }
4797 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4798 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4799 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4800 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4801 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4802 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4803 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4804 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4805 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4806 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4807 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4808 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4809 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4810 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4811 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4812 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4813 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4814 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4815 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4816 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4817 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4818 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4819 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4820 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4821 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4822 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4823 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4824 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4825 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4826 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4827 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4828 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4829 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4830 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4831 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4832 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4833 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4834 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4835 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4836 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4837 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4838 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4839 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4840 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4841 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4842 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4843 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4844 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4845 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4846 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4847 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4848 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4849 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4850 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4851 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4852 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4853 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4854 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4855 Info.opc = ISD::INTRINSIC_W_CHAIN;
4856 Info.memVT = MVT::v4f32;
4857 Info.ptrVal = nullptr;
4858 Info.offset = 0;
4859 Info.flags = MachineMemOperand::MOLoad;
4860 Info.align = Align(16);
4861 Infos.push_back(Info);
4862 return;
4863
4864 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4865 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4866 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4867 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4868 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4869 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4870 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4871 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4872 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4873 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4874 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4875 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4876 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4877 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4878 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4879 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4880 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4881 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4882 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4883 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4884 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4885 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4886 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4887 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4888 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4889 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4890 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4891 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4892 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4893 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4894 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4895 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4896 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4897 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4898 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4899 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4900 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4901 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4902 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4903 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4904 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4905 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4906 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4907 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4908 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4909 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4910 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4911 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4912 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4913 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4914 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4915 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4916 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4917 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4918 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4919 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4920 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4921 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4922 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4923 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4924 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4925 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4926 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4927 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4928 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4929 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4930 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4931 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4932 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4933 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4934 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4935 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4936 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4937 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4938 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4939 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4940 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4941 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4942 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4943 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4944 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4945 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4946 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4947 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4948 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4949 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4950 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4951 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4952 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4953 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4954 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4955 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4956 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4957 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4958 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4959 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4960 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4961 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4962 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4963 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4964 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4965 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4966 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4967 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4968 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4969 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4970 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4971 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4972 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4973 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4974 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4975 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4976 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4977 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4978 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4979 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4980 Info.opc = ISD::INTRINSIC_W_CHAIN;
4981 Info.memVT = MVT::v4i32;
4982 Info.ptrVal = nullptr;
4983 Info.offset = 0;
4984 Info.flags = MachineMemOperand::MOLoad;
4985 Info.align = Align(16);
4986 Infos.push_back(Info);
4987 return;
4988
4989 case Intrinsic::nvvm_suld_1d_i8_clamp:
4990 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4991 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4992 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4993 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4994 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4995 case Intrinsic::nvvm_suld_2d_i8_clamp:
4996 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4997 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4998 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4999 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
5000 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
5001 case Intrinsic::nvvm_suld_3d_i8_clamp:
5002 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
5003 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
5004 case Intrinsic::nvvm_suld_1d_i8_trap:
5005 case Intrinsic::nvvm_suld_1d_v2i8_trap:
5006 case Intrinsic::nvvm_suld_1d_v4i8_trap:
5007 case Intrinsic::nvvm_suld_1d_array_i8_trap:
5008 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
5009 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
5010 case Intrinsic::nvvm_suld_2d_i8_trap:
5011 case Intrinsic::nvvm_suld_2d_v2i8_trap:
5012 case Intrinsic::nvvm_suld_2d_v4i8_trap:
5013 case Intrinsic::nvvm_suld_2d_array_i8_trap:
5014 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
5015 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
5016 case Intrinsic::nvvm_suld_3d_i8_trap:
5017 case Intrinsic::nvvm_suld_3d_v2i8_trap:
5018 case Intrinsic::nvvm_suld_3d_v4i8_trap:
5019 case Intrinsic::nvvm_suld_1d_i8_zero:
5020 case Intrinsic::nvvm_suld_1d_v2i8_zero:
5021 case Intrinsic::nvvm_suld_1d_v4i8_zero:
5022 case Intrinsic::nvvm_suld_1d_array_i8_zero:
5023 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
5024 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
5025 case Intrinsic::nvvm_suld_2d_i8_zero:
5026 case Intrinsic::nvvm_suld_2d_v2i8_zero:
5027 case Intrinsic::nvvm_suld_2d_v4i8_zero:
5028 case Intrinsic::nvvm_suld_2d_array_i8_zero:
5029 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
5030 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
5031 case Intrinsic::nvvm_suld_3d_i8_zero:
5032 case Intrinsic::nvvm_suld_3d_v2i8_zero:
5033 case Intrinsic::nvvm_suld_3d_v4i8_zero:
5034 Info.opc = ISD::INTRINSIC_W_CHAIN;
5035 Info.memVT = MVT::i8;
5036 Info.ptrVal = nullptr;
5037 Info.offset = 0;
5038 Info.flags = MachineMemOperand::MOLoad;
5039 Info.align = Align(16);
5040 Infos.push_back(Info);
5041 return;
5042
5043 case Intrinsic::nvvm_suld_1d_i16_clamp:
5044 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
5045 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
5046 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
5047 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
5048 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
5049 case Intrinsic::nvvm_suld_2d_i16_clamp:
5050 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
5051 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
5052 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
5053 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
5054 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
5055 case Intrinsic::nvvm_suld_3d_i16_clamp:
5056 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
5057 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
5058 case Intrinsic::nvvm_suld_1d_i16_trap:
5059 case Intrinsic::nvvm_suld_1d_v2i16_trap:
5060 case Intrinsic::nvvm_suld_1d_v4i16_trap:
5061 case Intrinsic::nvvm_suld_1d_array_i16_trap:
5062 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
5063 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
5064 case Intrinsic::nvvm_suld_2d_i16_trap:
5065 case Intrinsic::nvvm_suld_2d_v2i16_trap:
5066 case Intrinsic::nvvm_suld_2d_v4i16_trap:
5067 case Intrinsic::nvvm_suld_2d_array_i16_trap:
5068 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
5069 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
5070 case Intrinsic::nvvm_suld_3d_i16_trap:
5071 case Intrinsic::nvvm_suld_3d_v2i16_trap:
5072 case Intrinsic::nvvm_suld_3d_v4i16_trap:
5073 case Intrinsic::nvvm_suld_1d_i16_zero:
5074 case Intrinsic::nvvm_suld_1d_v2i16_zero:
5075 case Intrinsic::nvvm_suld_1d_v4i16_zero:
5076 case Intrinsic::nvvm_suld_1d_array_i16_zero:
5077 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
5078 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
5079 case Intrinsic::nvvm_suld_2d_i16_zero:
5080 case Intrinsic::nvvm_suld_2d_v2i16_zero:
5081 case Intrinsic::nvvm_suld_2d_v4i16_zero:
5082 case Intrinsic::nvvm_suld_2d_array_i16_zero:
5083 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
5084 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
5085 case Intrinsic::nvvm_suld_3d_i16_zero:
5086 case Intrinsic::nvvm_suld_3d_v2i16_zero:
5087 case Intrinsic::nvvm_suld_3d_v4i16_zero:
5088 Info.opc = ISD::INTRINSIC_W_CHAIN;
5089 Info.memVT = MVT::i16;
5090 Info.ptrVal = nullptr;
5091 Info.offset = 0;
5092 Info.flags = MachineMemOperand::MOLoad;
5093 Info.align = Align(16);
5094 Infos.push_back(Info);
5095 return;
5096
5097 case Intrinsic::nvvm_suld_1d_i32_clamp:
5098 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
5099 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
5100 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
5101 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
5102 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
5103 case Intrinsic::nvvm_suld_2d_i32_clamp:
5104 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
5105 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
5106 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
5107 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
5108 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
5109 case Intrinsic::nvvm_suld_3d_i32_clamp:
5110 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
5111 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
5112 case Intrinsic::nvvm_suld_1d_i32_trap:
5113 case Intrinsic::nvvm_suld_1d_v2i32_trap:
5114 case Intrinsic::nvvm_suld_1d_v4i32_trap:
5115 case Intrinsic::nvvm_suld_1d_array_i32_trap:
5116 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
5117 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
5118 case Intrinsic::nvvm_suld_2d_i32_trap:
5119 case Intrinsic::nvvm_suld_2d_v2i32_trap:
5120 case Intrinsic::nvvm_suld_2d_v4i32_trap:
5121 case Intrinsic::nvvm_suld_2d_array_i32_trap:
5122 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
5123 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
5124 case Intrinsic::nvvm_suld_3d_i32_trap:
5125 case Intrinsic::nvvm_suld_3d_v2i32_trap:
5126 case Intrinsic::nvvm_suld_3d_v4i32_trap:
5127 case Intrinsic::nvvm_suld_1d_i32_zero:
5128 case Intrinsic::nvvm_suld_1d_v2i32_zero:
5129 case Intrinsic::nvvm_suld_1d_v4i32_zero:
5130 case Intrinsic::nvvm_suld_1d_array_i32_zero:
5131 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
5132 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
5133 case Intrinsic::nvvm_suld_2d_i32_zero:
5134 case Intrinsic::nvvm_suld_2d_v2i32_zero:
5135 case Intrinsic::nvvm_suld_2d_v4i32_zero:
5136 case Intrinsic::nvvm_suld_2d_array_i32_zero:
5137 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
5138 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
5139 case Intrinsic::nvvm_suld_3d_i32_zero:
5140 case Intrinsic::nvvm_suld_3d_v2i32_zero:
5141 case Intrinsic::nvvm_suld_3d_v4i32_zero:
5142 Info.opc = ISD::INTRINSIC_W_CHAIN;
5143 Info.memVT = MVT::i32;
5144 Info.ptrVal = nullptr;
5145 Info.offset = 0;
5146 Info.flags = MachineMemOperand::MOLoad;
5147 Info.align = Align(16);
5148 Infos.push_back(Info);
5149 return;
5150
5151 case Intrinsic::nvvm_suld_1d_i64_clamp:
5152 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
5153 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
5154 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
5155 case Intrinsic::nvvm_suld_2d_i64_clamp:
5156 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
5157 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
5158 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
5159 case Intrinsic::nvvm_suld_3d_i64_clamp:
5160 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
5161 case Intrinsic::nvvm_suld_1d_i64_trap:
5162 case Intrinsic::nvvm_suld_1d_v2i64_trap:
5163 case Intrinsic::nvvm_suld_1d_array_i64_trap:
5164 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
5165 case Intrinsic::nvvm_suld_2d_i64_trap:
5166 case Intrinsic::nvvm_suld_2d_v2i64_trap:
5167 case Intrinsic::nvvm_suld_2d_array_i64_trap:
5168 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
5169 case Intrinsic::nvvm_suld_3d_i64_trap:
5170 case Intrinsic::nvvm_suld_3d_v2i64_trap:
5171 case Intrinsic::nvvm_suld_1d_i64_zero:
5172 case Intrinsic::nvvm_suld_1d_v2i64_zero:
5173 case Intrinsic::nvvm_suld_1d_array_i64_zero:
5174 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
5175 case Intrinsic::nvvm_suld_2d_i64_zero:
5176 case Intrinsic::nvvm_suld_2d_v2i64_zero:
5177 case Intrinsic::nvvm_suld_2d_array_i64_zero:
5178 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
5179 case Intrinsic::nvvm_suld_3d_i64_zero:
5180 case Intrinsic::nvvm_suld_3d_v2i64_zero:
5181 Info.opc = ISD::INTRINSIC_W_CHAIN;
5182 Info.memVT = MVT::i64;
5183 Info.ptrVal = nullptr;
5184 Info.offset = 0;
5185 Info.flags = MachineMemOperand::MOLoad;
5186 Info.align = Align(16);
5187 Infos.push_back(Info);
5188 return;
5189
5190 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:
5191 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:
5192 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {
5193 Info.opc = ISD::INTRINSIC_W_CHAIN;
5194 Info.memVT = MVT::v1i32;
5195 Info.ptrVal = I.getArgOperand(0);
5196 Info.offset = 0;
5197 Info.flags = MachineMemOperand::MOLoad;
5198 Info.align.reset();
5199 Infos.push_back(Info);
5200 return;
5201 }
5202
5203 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
5204 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
5205 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
5206 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
5207 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_i32:
5208 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_i32: {
5209 Info.opc = ISD::INTRINSIC_W_CHAIN;
5210 Info.memVT = MVT::v2i32;
5211 Info.ptrVal = I.getArgOperand(0);
5212 Info.offset = 0;
5213 Info.flags = MachineMemOperand::MOLoad;
5214 Info.align.reset();
5215 Infos.push_back(Info);
5216 return;
5217 }
5218
5219 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_f32:
5220 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_f32: {
5221 Info.opc = ISD::INTRINSIC_W_CHAIN;
5222 Info.memVT = MVT::v2f32;
5223 Info.ptrVal = I.getArgOperand(0);
5224 Info.offset = 0;
5225 Info.flags = MachineMemOperand::MOLoad;
5226 Info.align.reset();
5227 Infos.push_back(Info);
5228 return;
5229 }
5230
5231 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
5232 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
5233 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
5234 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
5235 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
5236 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_i32:
5237 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_i32: {
5238 Info.opc = ISD::INTRINSIC_W_CHAIN;
5239 Info.memVT = MVT::v4i32;
5240 Info.ptrVal = I.getArgOperand(0);
5241 Info.offset = 0;
5242 Info.flags = MachineMemOperand::MOLoad;
5243 Info.align.reset();
5244 Infos.push_back(Info);
5245 return;
5246 }
5247
5248 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_f32:
5249 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_f32: {
5250 Info.opc = ISD::INTRINSIC_W_CHAIN;
5251 Info.memVT = MVT::v4f32;
5252 Info.ptrVal = I.getArgOperand(0);
5253 Info.offset = 0;
5254 Info.flags = MachineMemOperand::MOLoad;
5255 Info.align.reset();
5256 Infos.push_back(Info);
5257 return;
5258 }
5259
5260 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
5261 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
5262 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
5263 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
5264 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
5265 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_i32:
5266 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_i32: {
5267 Info.opc = ISD::INTRINSIC_W_CHAIN;
5268 Info.memVT = MVT::v8i32;
5269 Info.ptrVal = I.getArgOperand(0);
5270 Info.offset = 0;
5271 Info.flags = MachineMemOperand::MOLoad;
5272 Info.align.reset();
5273 Infos.push_back(Info);
5274 return;
5275 }
5276
5277 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_f32:
5278 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_f32: {
5279 Info.opc = ISD::INTRINSIC_W_CHAIN;
5280 Info.memVT = MVT::v8f32;
5281 Info.ptrVal = I.getArgOperand(0);
5282 Info.offset = 0;
5283 Info.flags = MachineMemOperand::MOLoad;
5284 Info.align.reset();
5285 Infos.push_back(Info);
5286 return;
5287 }
5288
5289 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
5290 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
5291 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
5292 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
5293 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
5294 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_i32:
5295 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_i32: {
5296 Info.opc = ISD::INTRINSIC_W_CHAIN;
5297 Info.memVT = MVT::v16i32;
5298 Info.ptrVal = I.getArgOperand(0);
5299 Info.offset = 0;
5300 Info.flags = MachineMemOperand::MOLoad;
5301 Info.align.reset();
5302 Infos.push_back(Info);
5303 return;
5304 }
5305
5306 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_f32:
5307 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_f32: {
5308 Info.opc = ISD::INTRINSIC_W_CHAIN;
5309 Info.memVT = MVT::v16f32;
5310 Info.ptrVal = I.getArgOperand(0);
5311 Info.offset = 0;
5312 Info.flags = MachineMemOperand::MOLoad;
5313 Info.align.reset();
5314 Infos.push_back(Info);
5315 return;
5316 }
5317
5318 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
5319 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
5320 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
5321 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
5322 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
5323 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_i32:
5324 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_i32: {
5325 Info.opc = ISD::INTRINSIC_W_CHAIN;
5326 Info.memVT = MVT::v32i32;
5327 Info.ptrVal = I.getArgOperand(0);
5328 Info.offset = 0;
5329 Info.flags = MachineMemOperand::MOLoad;
5330 Info.align.reset();
5331 Infos.push_back(Info);
5332 return;
5333 }
5334
5335 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_f32:
5336 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_f32: {
5337 Info.opc = ISD::INTRINSIC_W_CHAIN;
5338 Info.memVT = MVT::v32f32;
5339 Info.ptrVal = I.getArgOperand(0);
5340 Info.offset = 0;
5341 Info.flags = MachineMemOperand::MOLoad;
5342 Info.align.reset();
5343 Infos.push_back(Info);
5344 return;
5345 }
5346
5347 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
5348 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
5349 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
5350 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
5351 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
5352 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_i32:
5353 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_i32: {
5354 Info.opc = ISD::INTRINSIC_W_CHAIN;
5355 Info.memVT = MVT::v64i32;
5356 Info.ptrVal = I.getArgOperand(0);
5357 Info.offset = 0;
5358 Info.flags = MachineMemOperand::MOLoad;
5359 Info.align.reset();
5360 Infos.push_back(Info);
5361 return;
5362 }
5363
5364 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_f32:
5365 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_f32: {
5366 Info.opc = ISD::INTRINSIC_W_CHAIN;
5367 Info.memVT = MVT::v64f32;
5368 Info.ptrVal = I.getArgOperand(0);
5369 Info.offset = 0;
5370 Info.flags = MachineMemOperand::MOLoad;
5371 Info.align.reset();
5372 Infos.push_back(Info);
5373 return;
5374 }
5375
5376 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
5377 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
5378 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
5379 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
5380 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
5381 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_i32:
5382 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_i32: {
5383 Info.opc = ISD::INTRINSIC_W_CHAIN;
5384 Info.memVT = MVT::v128i32;
5385 Info.ptrVal = I.getArgOperand(0);
5386 Info.offset = 0;
5387 Info.flags = MachineMemOperand::MOLoad;
5388 Info.align.reset();
5389 Infos.push_back(Info);
5390 return;
5391 }
5392
5393 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_f32:
5394 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_f32: {
5395 Info.opc = ISD::INTRINSIC_W_CHAIN;
5396 Info.memVT = MVT::v128f32;
5397 Info.ptrVal = I.getArgOperand(0);
5398 Info.offset = 0;
5399 Info.flags = MachineMemOperand::MOLoad;
5400 Info.align.reset();
5401 Infos.push_back(Info);
5402 return;
5403 }
5404
5405 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
5406 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
5407 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {
5408 Info.opc = ISD::INTRINSIC_VOID;
5409 Info.memVT = MVT::i32;
5410 Info.ptrVal = I.getArgOperand(0);
5411 Info.offset = 0;
5412 Info.flags = MachineMemOperand::MOStore;
5413 Info.align.reset();
5414 Infos.push_back(Info);
5415 return;
5416 }
5417
5418 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
5419 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
5420 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
5421 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {
5422 Info.opc = ISD::INTRINSIC_VOID;
5423 Info.memVT = MVT::v2i32;
5424 Info.ptrVal = I.getArgOperand(0);
5425 Info.offset = 0;
5426 Info.flags = MachineMemOperand::MOStore;
5427 Info.align.reset();
5428 Infos.push_back(Info);
5429 return;
5430 }
5431
5432 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
5433 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
5434 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
5435 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
5436 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {
5437 Info.opc = ISD::INTRINSIC_VOID;
5438 Info.memVT = MVT::v4i32;
5439 Info.ptrVal = I.getArgOperand(0);
5440 Info.offset = 0;
5441 Info.flags = MachineMemOperand::MOStore;
5442 Info.align.reset();
5443 Infos.push_back(Info);
5444 return;
5445 }
5446
5447 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
5448 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
5449 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
5450 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
5451 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {
5452 Info.opc = ISD::INTRINSIC_VOID;
5453 Info.memVT = MVT::v8i32;
5454 Info.ptrVal = I.getArgOperand(0);
5455 Info.offset = 0;
5456 Info.flags = MachineMemOperand::MOStore;
5457 Info.align.reset();
5458 Infos.push_back(Info);
5459 return;
5460 }
5461
5462 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
5463 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
5464 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
5465 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
5466 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {
5467 Info.opc = ISD::INTRINSIC_VOID;
5468 Info.memVT = MVT::v16i32;
5469 Info.ptrVal = I.getArgOperand(0);
5470 Info.offset = 0;
5471 Info.flags = MachineMemOperand::MOStore;
5472 Info.align.reset();
5473 Infos.push_back(Info);
5474 return;
5475 }
5476
5477 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
5478 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
5479 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
5480 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
5481 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {
5482 Info.opc = ISD::INTRINSIC_VOID;
5483 Info.memVT = MVT::v32i32;
5484 Info.ptrVal = I.getArgOperand(0);
5485 Info.offset = 0;
5486 Info.flags = MachineMemOperand::MOStore;
5487 Info.align.reset();
5488 Infos.push_back(Info);
5489 return;
5490 }
5491
5492 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
5493 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
5494 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
5495 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
5496 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {
5497 Info.opc = ISD::INTRINSIC_VOID;
5498 Info.memVT = MVT::v64i32;
5499 Info.ptrVal = I.getArgOperand(0);
5500 Info.offset = 0;
5501 Info.flags = MachineMemOperand::MOStore;
5502 Info.align.reset();
5503 Infos.push_back(Info);
5504 return;
5505 }
5506
5507 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
5508 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
5509 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
5510 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
5511 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {
5512 Info.opc = ISD::INTRINSIC_VOID;
5513 Info.memVT = MVT::v128i32;
5514 Info.ptrVal = I.getArgOperand(0);
5515 Info.offset = 0;
5516 Info.flags = MachineMemOperand::MOStore;
5517 Info.align.reset();
5518 Infos.push_back(Info);
5519 return;
5520 }
5521 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
5522 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
5523 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
5524 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
5525 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
5526 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
5527 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
5528 case Intrinsic::
5529 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
5530 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
5531 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
5532 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
5533 case Intrinsic::
5534 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: {
5535 // We are reading and writing back to TMem
5536 Info.opc = ISD::INTRINSIC_VOID;
5537 Info.memVT = MVT::v4i32;
5538 Info.ptrVal = I.getArgOperand(0);
5539 Info.offset = 0;
5541 Info.align = Align(16);
5542 Infos.push_back(Info);
5543 return;
5544 }
5545
5546 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
5547 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
5548 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
5549 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
5550 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
5551 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
5552 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
5553 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
5554 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
5555 case Intrinsic::
5556 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
5557 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
5558 case Intrinsic::
5559 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: {
5560 // We are reading and writing back to TMem
5561 Info.opc = ISD::INTRINSIC_VOID;
5562 Info.memVT = MVT::v8i32;
5563 Info.ptrVal = I.getArgOperand(0);
5564 Info.offset = 0;
5566 Info.align = Align(16);
5567 Infos.push_back(Info);
5568 return;
5569 }
5570 }
5571}
5572
5573// Helper for getting a function parameter name. Name is composed from
5574// its index and the function name. Negative index corresponds to special
5575// parameter (unsized array) used for passing variable arguments.
5577 int Idx) const {
5578 std::string ParamName;
5579 raw_string_ostream ParamStr(ParamName);
5580
5581 ParamStr << getTargetMachine().getSymbol(F)->getName();
5582 if (Idx < 0)
5583 ParamStr << "_vararg";
5584 else
5585 ParamStr << "_param_" << Idx;
5586
5587 return ParamName;
5588}
5589
5590/// isLegalAddressingMode - Return true if the addressing mode represented
5591/// by AM is legal for this target, for a load/store of the specified type.
5592/// Used to guide target specific optimizations, like loop strength reduction
5593/// (LoopStrengthReduce.cpp) and memory optimization for address mode
5594/// (CodeGenPrepare.cpp)
5596 const AddrMode &AM, Type *Ty,
5597 unsigned AS, Instruction *I) const {
5598 // AddrMode - This represents an addressing mode of:
5599 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
5600 //
5601 // The legal address modes are
5602 // - [avar]
5603 // - [areg]
5604 // - [areg+immoff]
5605 // - [immAddr]
5606
5607 // immoff must fit in a signed 32-bit int
5608 if (!APInt(64, AM.BaseOffs).isSignedIntN(32))
5609 return false;
5610
5611 if (AM.BaseGV)
5612 return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
5613
5614 switch (AM.Scale) {
5615 case 0: // "r", "r+i" or "i" is allowed
5616 break;
5617 case 1:
5618 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
5619 return false;
5620 // Otherwise we have r+i.
5621 break;
5622 default:
5623 // No scale > 1 is allowed
5624 return false;
5625 }
5626 return true;
5627}
5628
5629//===----------------------------------------------------------------------===//
5630// NVPTX Inline Assembly Support
5631//===----------------------------------------------------------------------===//
5632
5633/// getConstraintType - Given a constraint letter, return the type of
5634/// constraint it is for this target.
5637 if (Constraint.size() == 1) {
5638 switch (Constraint[0]) {
5639 default:
5640 break;
5641 case 'b':
5642 case 'r':
5643 case 'h':
5644 case 'c':
5645 case 'l':
5646 case 'f':
5647 case 'd':
5648 case 'q':
5649 case '0':
5650 case 'N':
5651 return C_RegisterClass;
5652 }
5653 }
5654 return TargetLowering::getConstraintType(Constraint);
5655}
5656
5657std::pair<unsigned, const TargetRegisterClass *>
5659 StringRef Constraint,
5660 MVT VT) const {
5661 if (Constraint.size() == 1) {
5662 switch (Constraint[0]) {
5663 case 'b':
5664 return std::make_pair(0U, &NVPTX::B1RegClass);
5665 case 'c':
5666 case 'h':
5667 return std::make_pair(0U, &NVPTX::B16RegClass);
5668 case 'r':
5669 case 'f':
5670 return std::make_pair(0U, &NVPTX::B32RegClass);
5671 case 'l':
5672 case 'N':
5673 case 'd':
5674 return std::make_pair(0U, &NVPTX::B64RegClass);
5675 case 'q': {
5676 if (STI.getSmVersion() < 70)
5677 report_fatal_error("Inline asm with 128 bit operands is only "
5678 "supported for sm_70 and higher!");
5679 return std::make_pair(0U, &NVPTX::B128RegClass);
5680 }
5681 }
5682 }
5683 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5684}
5685
5686//===----------------------------------------------------------------------===//
5687// NVPTX DAG Combining
5688//===----------------------------------------------------------------------===//
5689
5691 CodeGenOptLevel OptLevel) const {
5692 // Always honor command-line argument
5693 if (FMAContractLevelOpt.getNumOccurrences() > 0)
5694 return FMAContractLevelOpt > 0;
5695
5696 // Do not contract if we're not optimizing the code.
5697 if (OptLevel == CodeGenOptLevel::None)
5698 return false;
5699
5700 // Honor TargetOptions flags that explicitly say fusion is okay.
5702 return true;
5703
5704 return false;
5705}
5706
5707static bool isConstZero(const SDValue &Operand) {
5708 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5709 return Const && Const->getZExtValue() == 0;
5710}
5711
5712/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
5713/// operands N0 and N1. This is a helper for PerformADDCombine that is
5714/// called with the default operands, and if that fails, with commuted
5715/// operands.
5716static SDValue
5719 EVT VT = N0.getValueType();
5720
5721 // Since integer multiply-add costs the same as integer multiply
5722 // but is more costly than integer add, do the fusion only when
5723 // the mul is only used in the add.
5724 // TODO: this may not be true for later architectures, consider relaxing this
5725 if (!N0.getNode()->hasOneUse())
5726 return SDValue();
5727
5728 // fold (add (select cond, 0, (mul a, b)), c)
5729 // -> (select cond, c, (add (mul a, b), c))
5730 //
5731 if (N0.getOpcode() == ISD::SELECT) {
5732 unsigned ZeroOpNum;
5733 if (isConstZero(N0->getOperand(1)))
5734 ZeroOpNum = 1;
5735 else if (isConstZero(N0->getOperand(2)))
5736 ZeroOpNum = 2;
5737 else
5738 return SDValue();
5739
5740 SDValue M = N0->getOperand((ZeroOpNum == 1) ? 2 : 1);
5741 if (M->getOpcode() != ISD::MUL || !M.getNode()->hasOneUse())
5742 return SDValue();
5743
5744 SDLoc DL(N);
5745 SDValue Mul =
5746 DCI.DAG.getNode(ISD::MUL, DL, VT, M->getOperand(0), M->getOperand(1));
5747 SDValue MAD = DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, N1);
5748 return DCI.DAG.getSelect(SDLoc(N), VT, N0->getOperand(0),
5749 ((ZeroOpNum == 1) ? N1 : MAD),
5750 ((ZeroOpNum == 1) ? MAD : N1));
5751 }
5752
5753 return SDValue();
5754}
5755
5756SDValue NVPTXTargetLowering::performFADDCombineWithOperands(
5758 CodeGenOptLevel OptLevel) const {
5759 EVT VT = N0.getValueType();
5760 if (N0.getOpcode() == ISD::FMUL) {
5761 if (!(allowFMA(DCI.DAG.getMachineFunction(), OptLevel) ||
5762 (N->getFlags().hasAllowContract() &&
5763 N0->getFlags().hasAllowContract())))
5764 return SDValue();
5765
5766 // For floating point:
5767 // Do the fusion only when the mul has less than 5 uses and all
5768 // are add.
5769 // The heuristic is that if a use is not an add, then that use
5770 // cannot be fused into fma, therefore mul is still needed anyway.
5771 // If there are more than 4 uses, even if they are all add, fusing
5772 // them will increase register pressue.
5773 //
5774 int numUses = 0;
5775 int nonAddCount = 0;
5776 for (const SDNode *User : N0.getNode()->users()) {
5777 numUses++;
5778 if (User->getOpcode() != ISD::FADD)
5779 ++nonAddCount;
5780 if (numUses >= 5)
5781 return SDValue();
5782 }
5783 if (nonAddCount) {
5784 int orderNo = N->getIROrder();
5785 int orderNo2 = N0.getNode()->getIROrder();
5786 // simple heuristics here for considering potential register
5787 // pressure, the logics here is that the differnce are used
5788 // to measure the distance between def and use, the longer distance
5789 // more likely cause register pressure.
5790 if (orderNo - orderNo2 < 500)
5791 return SDValue();
5792
5793 // Now, check if at least one of the FMUL's operands is live beyond the
5794 // node N, which guarantees that the FMA will not increase register
5795 // pressure at node N.
5796 bool opIsLive = false;
5797 const SDNode *left = N0.getOperand(0).getNode();
5798 const SDNode *right = N0.getOperand(1).getNode();
5799
5800 if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
5801 opIsLive = true;
5802
5803 if (!opIsLive)
5804 for (const SDNode *User : left->users()) {
5805 int orderNo3 = User->getIROrder();
5806 if (orderNo3 > orderNo) {
5807 opIsLive = true;
5808 break;
5809 }
5810 }
5811
5812 if (!opIsLive)
5813 for (const SDNode *User : right->users()) {
5814 int orderNo3 = User->getIROrder();
5815 if (orderNo3 > orderNo) {
5816 opIsLive = true;
5817 break;
5818 }
5819 }
5820
5821 if (!opIsLive)
5822 return SDValue();
5823 }
5824
5825 return DCI.DAG.getNode(ISD::FMA, SDLoc(N), VT, N0.getOperand(0),
5826 N0.getOperand(1), N1);
5827 }
5828
5829 return SDValue();
5830}
5831
5832/// Fold unpacking movs into a load by increasing the number of return values.
5833///
5834/// ex:
5835/// L: v2f16,ch = load <p>
5836/// a: f16 = extractelt L:0, 0
5837/// b: f16 = extractelt L:0, 1
5838/// use(a, b)
5839///
5840/// ...is turned into...
5841///
5842/// L: f16,f16,ch = LoadV2 <p>
5843/// use(L:0, L:1)
5844static SDValue
5846 // Don't run this optimization before the legalizer
5847 if (!DCI.isAfterLegalizeDAG())
5848 return SDValue();
5849
5850 EVT ElementVT = N->getValueType(0);
5851 // Avoid non-packed types and v4i8
5852 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5853 return SDValue();
5854
5855 // Check whether all outputs are either used by an extractelt or are
5856 // glue/chain nodes
5857 if (!all_of(N->uses(), [&](SDUse &U) {
5858 // Skip glue, chain nodes
5859 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)
5860 return true;
5861 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5862 if (N->getOpcode() != ISD::LOAD)
5863 return true;
5864 // Since this is an ISD::LOAD, check all extractelts are used. If
5865 // any are not used, we don't want to defeat another optimization that
5866 // will narrow the load.
5867 //
5868 // For example:
5869 //
5870 // L: v2f16,ch = load <p>
5871 // e0: f16 = extractelt L:0, 0
5872 // e1: f16 = extractelt L:0, 1 <-- unused
5873 // store e0
5874 //
5875 // Can be optimized by DAGCombiner to:
5876 //
5877 // L: f16,ch = load <p>
5878 // store L:0
5879 return !U.getUser()->use_empty();
5880 }
5881
5882 // Otherwise, this use prevents us from splitting a value.
5883 return false;
5884 }))
5885 return SDValue();
5886
5887 auto *LD = cast<MemSDNode>(N);
5888 SDLoc DL(LD);
5889
5890 // the new opcode after we double the number of operands
5891 unsigned Opcode;
5892 SmallVector<SDValue> Operands(LD->ops());
5893 unsigned OldNumOutputs; // non-glue, non-chain outputs
5894 switch (LD->getOpcode()) {
5895 case ISD::LOAD:
5896 OldNumOutputs = 1;
5897 // Any packed type is legal, so the legalizer will not have lowered
5898 // ISD::LOAD -> NVPTXISD::Load (unless it's under-aligned). We have to do it
5899 // here.
5900 Opcode = NVPTXISD::LoadV2;
5901 // append a "full" used bytes mask operand right before the extension type
5902 // operand, signifying that all bytes are used.
5903 Operands.push_back(DCI.DAG.getConstant(UINT32_MAX, DL, MVT::i32));
5904 Operands.push_back(DCI.DAG.getIntPtrConstant(
5905 cast<LoadSDNode>(LD)->getExtensionType(), DL));
5906 break;
5907 case NVPTXISD::LoadV2:
5908 OldNumOutputs = 2;
5909 Opcode = NVPTXISD::LoadV4;
5910 break;
5911 case NVPTXISD::LoadV4:
5912 // V8 is only supported for f32/i32. Don't forget, we're not changing the
5913 // load size here. This is already a 256-bit load.
5914 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5915 return SDValue();
5916 OldNumOutputs = 4;
5917 Opcode = NVPTXISD::LoadV8;
5918 break;
5919 case NVPTXISD::LoadV8:
5920 // PTX doesn't support the next doubling of outputs
5921 return SDValue();
5922 }
5923
5924 // the non-glue, non-chain outputs in the new load
5925 const unsigned NewNumOutputs = OldNumOutputs * 2;
5926 SmallVector<EVT> NewVTs(NewNumOutputs, ElementVT.getVectorElementType());
5927 // add remaining chain and glue values
5928 NewVTs.append(LD->value_begin() + OldNumOutputs, LD->value_end());
5929
5930 // Create the new load
5931 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
5932 Opcode, DL, DCI.DAG.getVTList(NewVTs), Operands, LD->getMemoryVT(),
5933 LD->getMemOperand());
5934
5935 // Now we use a combination of BUILD_VECTORs and a MERGE_VALUES node to keep
5936 // the outputs the same. These nodes will be optimized away in later
5937 // DAGCombiner iterations.
5939 for (unsigned I : seq(OldNumOutputs))
5940 Results.push_back(DCI.DAG.getBuildVector(
5941 ElementVT, DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));
5942 // Add remaining chain and glue nodes
5943 for (unsigned I : seq(NewLoad->getNumValues() - NewNumOutputs))
5944 Results.push_back(NewLoad.getValue(NewNumOutputs + I));
5945
5946 return DCI.DAG.getMergeValues(Results, DL);
5947}
5948
5949/// Fold packing movs into a store.
5950///
5951/// ex:
5952/// v1: v2f16 = BUILD_VECTOR a:f16, b:f16
5953/// v2: v2f16 = BUILD_VECTOR c:f16, d:f16
5954/// StoreV2 v1, v2
5955///
5956/// ...is turned into...
5957///
5958/// StoreV4 a, b, c, d
5961 unsigned Front, unsigned Back) {
5962 // We want to run this as late as possible since other optimizations may
5963 // eliminate the BUILD_VECTORs.
5964 if (!DCI.isAfterLegalizeDAG())
5965 return SDValue();
5966
5967 // Get the type of the operands being stored.
5968 EVT ElementVT = N->getOperand(Front).getValueType();
5969
5970 // Avoid non-packed types and v4i8
5971 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5972 return SDValue();
5973
5974 auto *ST = cast<MemSDNode>(N);
5975
5976 // The new opcode after we double the number of operands.
5977 unsigned Opcode;
5978 switch (N->getOpcode()) {
5979 case ISD::STORE:
5980 // Any packed type is legal, so the legalizer will not have lowered
5981 // ISD::STORE -> NVPTXISD::Store (unless it's under-aligned). We have to do
5982 // it here.
5983 Opcode = NVPTXISD::StoreV2;
5984 break;
5985 case NVPTXISD::StoreV2:
5986 Opcode = NVPTXISD::StoreV4;
5987 break;
5988 case NVPTXISD::StoreV4:
5989 // V8 is only supported for f32/i32. Don't forget, we're not changing the
5990 // store size here. This is already a 256-bit store.
5991 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5992 return SDValue();
5993 Opcode = NVPTXISD::StoreV8;
5994 break;
5995 case NVPTXISD::StoreV8:
5996 // PTX doesn't support the next doubling of operands
5997 return SDValue();
5998 default:
5999 llvm_unreachable("Unhandled store opcode");
6000 }
6001
6002 // Scan the operands and if they're all BUILD_VECTORs, we'll have gathered
6003 // their elements.
6004 SmallVector<SDValue, 4> Operands(N->ops().take_front(Front));
6005 for (SDValue BV : N->ops().drop_front(Front).drop_back(Back)) {
6006 if (BV.getOpcode() != ISD::BUILD_VECTOR)
6007 return SDValue();
6008
6009 // If the operand has multiple uses, this optimization can increase register
6010 // pressure.
6011 if (!BV.hasOneUse())
6012 return SDValue();
6013
6014 // DAGCombiner visits nodes bottom-up. Check the BUILD_VECTOR operands for
6015 // any signs they may be folded by some other pattern or rule.
6016 for (SDValue Op : BV->ops()) {
6017 // Peek through bitcasts
6018 if (Op.getOpcode() == ISD::BITCAST)
6019 Op = Op.getOperand(0);
6020
6021 // This may be folded into a PRMT.
6022 if (Op.getValueType() == MVT::i16 && Op.getOpcode() == ISD::TRUNCATE &&
6023 Op->getOperand(0).getValueType() == MVT::i32)
6024 return SDValue();
6025
6026 // This may be folded into cvt.bf16x2
6027 if (Op.getOpcode() == ISD::FP_ROUND)
6028 return SDValue();
6029 }
6030 Operands.append({BV.getOperand(0), BV.getOperand(1)});
6031 }
6032 Operands.append(N->op_end() - Back, N->op_end());
6033
6034 // Now we replace the store
6035 return DCI.DAG.getMemIntrinsicNode(Opcode, SDLoc(N), N->getVTList(), Operands,
6036 ST->getMemoryVT(), ST->getMemOperand());
6037}
6038
6040 const NVPTXSubtarget &STI) {
6041
6042 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::STORE) {
6043 // Here is our chance to custom lower a store with a non-simple type.
6044 // Unfortunately, we can't do this in the legalizer because there is no
6045 // way to setOperationAction for an non-simple type.
6047 if (!ST->getValue().getValueType().isSimple())
6048 return lowerSTOREVector(SDValue(ST, 0), DCI.DAG, STI);
6049 }
6050
6051 return combinePackingMovIntoStore(N, DCI, 1, 2);
6052}
6053
6055 const NVPTXSubtarget &STI) {
6056 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::LOAD) {
6057 // Here is our chance to custom lower a load with a non-simple type.
6058 // Unfortunately, we can't do this in the legalizer because there is no
6059 // way to setOperationAction for an non-simple type.
6060 if (!N->getValueType(0).isSimple())
6061 return lowerLoadVector(N, DCI.DAG, STI);
6062 }
6063
6064 return combineUnpackingMovIntoLoad(N, DCI);
6065}
6066
6067/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
6068///
6071 CodeGenOptLevel OptLevel) {
6072 if (OptLevel == CodeGenOptLevel::None)
6073 return SDValue();
6074
6075 SDValue N0 = N->getOperand(0);
6076 SDValue N1 = N->getOperand(1);
6077
6078 // Skip non-integer, non-scalar case
6079 EVT VT = N0.getValueType();
6080 if (VT.isVector() || VT != MVT::i32)
6081 return SDValue();
6082
6083 // First try with the default operand order.
6084 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI))
6085 return Result;
6086
6087 // If that didn't work, try again with the operands commuted.
6088 return PerformADDCombineWithOperands(N, N1, N0, DCI);
6089}
6090
6091/// Check if a v2f32 BUILD_VECTOR provably packs values from non-adjacent
6092/// register pairs (non-coalescable).
6093static bool isNonCoalescableBuildVector(const SDValue &BV) {
6094 if (BV.getOpcode() != ISD::BUILD_VECTOR || BV.getValueType() != MVT::v2f32)
6095 return false;
6096
6097 SDValue Elt0 = BV.getOperand(0);
6098 SDValue Elt1 = BV.getOperand(1);
6099
6100 bool IsExt0 = Elt0.getOpcode() == ISD::EXTRACT_VECTOR_ELT;
6101 bool IsExt1 = Elt1.getOpcode() == ISD::EXTRACT_VECTOR_ELT;
6102
6103 // If neither element is an EXTRACT_VECTOR_ELT they are free-standing
6104 // scalars and the register allocator can still place them side-by-side.
6105 if (!IsExt0 && !IsExt1)
6106 return false;
6107
6108 // If exactly one element is an EXTRACT_VECTOR_ELT, the other is a scalar
6109 // that cannot generally occupy the adjacent register slot.
6110 if (IsExt0 != IsExt1)
6111 return true;
6112
6113 // At this point both sources are extracting from vectors. If they are from
6114 // different vectors, then the BUILD_VECTOR is non-coalescable.
6115 SDValue Src0 = Elt0.getOperand(0);
6116 SDValue Src1 = Elt1.getOperand(0);
6117 if (Src0 != Src1)
6118 return true;
6119
6120 auto *Idx0 = dyn_cast<ConstantSDNode>(Elt0.getOperand(1));
6121 auto *Idx1 = dyn_cast<ConstantSDNode>(Elt1.getOperand(1));
6122 // If both indices are dynamic they will be lowered to
6123 // loads and the vector will be spilled to local memory. The register
6124 // allocator can easily place the results in adjacent registers.
6125 if (!Idx0 && !Idx1)
6126 return false;
6127
6128 // If one index is dynamic and the other is constant, the value from the
6129 // constant load will result in an additional register to pair with the result
6130 // from the dynamic load. We consider this non-coalescable.
6131 if ((Idx0 && !Idx1) || (!Idx0 && Idx1))
6132 return true;
6133
6134 // Both are constant, adjacent pairs are coalescable
6135 return std::abs(Idx0->getSExtValue() - Idx1->getSExtValue()) != 1;
6136}
6137
6138/// Return true if FMUL v2f32 node \p N may be scalarized to fold each lane's
6139/// product into a scalar FMA.
6140bool NVPTXTargetLowering::mayFoldFMULIntoFMA(SDNode *N, MachineFunction &MF,
6141 CodeGenOptLevel OptLevel) const {
6142 if (N->getOpcode() != ISD::FMUL || N->getValueType(0) != MVT::v2f32)
6143 return false;
6144 const bool GlobalFMA = allowFMA(MF, OptLevel);
6145 if (!N->getFlags().hasAllowContract() && !GlobalFMA)
6146 return false;
6147
6148 const SDNode *FirstFAdd = nullptr;
6149 unsigned NumScalarFAdd = 0;
6150
6151 // Both lanes must feed unique FADDs
6152 for (SDNode *EE : N->users()) {
6153 if (NumScalarFAdd == 2)
6154 return false;
6155
6156 if (EE->getOpcode() != ISD::EXTRACT_VECTOR_ELT || !EE->hasOneUse() ||
6157 !isa<ConstantSDNode>(EE->getOperand(1)))
6158 return false;
6159
6160 const SDNode *const FAdd = *EE->users().begin();
6161 if (FAdd->getOpcode() != ISD::FADD ||
6162 (!GlobalFMA && !FAdd->getFlags().hasAllowContract()))
6163 return false;
6164
6165 if (!FirstFAdd)
6166 FirstFAdd = FAdd;
6167 else if (FAdd == FirstFAdd)
6168 return false;
6169
6170 NumScalarFAdd++;
6171 }
6172
6173 return NumScalarFAdd == 2;
6174}
6175
6176/// Scalarize a v2f32 arithmetic node (FADD, FMUL, FSUB, FMA) when at least
6177/// one operand is a BUILD_VECTOR that repacks values from non-adjacent register
6178/// pairs. Without this combine the BUILD_VECTOR forces allocation of a
6179/// temporary 64-bit register, increasing register pressure.
6180///
6181/// Example - before:
6182/// t0: v2f32,v2f32,ch = LoadV2 ...
6183/// t1: f32 = extract_vector_elt t0, 0
6184/// t2: f32 = extract_vector_elt t0:1, 0
6185/// t3: v2f32 = BUILD_VECTOR t1, t2 ;; non-coalescable repack
6186/// t4: v2f32 = fma t_a, t3, t_c
6187///
6188/// After:
6189/// t0: v2f32,v2f32,ch = LoadV2 ...
6190/// t1: f32 = extract_vector_elt t0, 0
6191/// t2: f32 = extract_vector_elt t0:1, 0
6192/// a0: f32 = extract_vector_elt t_a, 0
6193/// a1: f32 = extract_vector_elt t_a, 1
6194/// c0: f32 = extract_vector_elt t_c, 0
6195/// c1: f32 = extract_vector_elt t_c, 1
6196/// r0: f32 = fma a0, t1, c0
6197/// r1: f32 = fma a1, t2, c1
6198/// t4: v2f32 = BUILD_VECTOR r0, r1
6199///
6200/// Also scalarizes an FMUL when all output lanes feed into scalar FADDs
6201/// to enable scalar FMA combining.
6202SDValue NVPTXTargetLowering::performScalarizeV2F32Op(
6204 CodeGenOptLevel OptLevel) const {
6205 EVT VT = N->getValueType(0);
6206 if (VT != MVT::v2f32)
6207 return SDValue();
6208
6209 if (none_of(N->ops(), isNonCoalescableBuildVector) &&
6210 !mayFoldFMULIntoFMA(N, DCI.DAG.getMachineFunction(), OptLevel))
6211 return SDValue();
6212
6213 SelectionDAG &DAG = DCI.DAG;
6214 SDLoc DL(N);
6215 EVT EltVT = VT.getVectorElementType();
6216 unsigned Opc = N->getOpcode();
6217
6218 // For each operand, get the scalar element at the given index: if the operand
6219 // is a BUILD_VECTOR, grab the element directly; otherwise, emit an
6220 // EXTRACT_VECTOR_ELT.
6221 auto GetElement = [&](SDValue Op, unsigned Index) -> SDValue {
6222 if (Op.getOpcode() == ISD::BUILD_VECTOR)
6223 return Op.getOperand(Index);
6224 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Op,
6225 DAG.getVectorIdxConstant(Index, DL));
6226 };
6227
6228 // Build scalar operand lists for element 0 and element 1.
6229 SmallVector<SDValue, 3> Ops0, Ops1;
6230 for (const SDValue &Op : N->ops()) {
6231 Ops0.push_back(GetElement(Op, 0));
6232 Ops1.push_back(GetElement(Op, 1));
6233 }
6234
6235 SDValue Res0 = DAG.getNode(Opc, DL, EltVT, Ops0, N->getFlags());
6236 SDValue Res1 = DAG.getNode(Opc, DL, EltVT, Ops1, N->getFlags());
6237
6238 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Res0, Res1);
6239}
6240
6241/// Target-specific dag combine xforms for ISD::FADD.
6242SDValue
6243NVPTXTargetLowering::performFADDCombine(SDNode *N,
6245 CodeGenOptLevel OptLevel) const {
6246 if (SDValue Result = performScalarizeV2F32Op(N, DCI, OptLevel))
6247 return Result;
6248
6249 SDValue N0 = N->getOperand(0);
6250 SDValue N1 = N->getOperand(1);
6251
6252 EVT VT = N0.getValueType();
6253 if (VT.isVector() || !(VT == MVT::f32 || VT == MVT::f64))
6254 return SDValue();
6255
6256 // First try with the default operand order.
6257 if (SDValue Result = performFADDCombineWithOperands(N, N0, N1, DCI, OptLevel))
6258 return Result;
6259
6260 // If that didn't work, try again with the operands commuted.
6261 return performFADDCombineWithOperands(N, N1, N0, DCI, OptLevel);
6262}
6263
6264/// Get 3-input version of a 2-input min/max opcode
6265static unsigned getMinMax3Opcode(unsigned MinMax2Opcode) {
6266 switch (MinMax2Opcode) {
6267 case ISD::FMAXNUM:
6268 case ISD::FMAXIMUMNUM:
6269 return NVPTXISD::FMAXNUM3;
6270 case ISD::FMINNUM:
6271 case ISD::FMINIMUMNUM:
6272 return NVPTXISD::FMINNUM3;
6273 case ISD::FMAXIMUM:
6274 return NVPTXISD::FMAXIMUM3;
6275 case ISD::FMINIMUM:
6276 return NVPTXISD::FMINIMUM3;
6277 default:
6278 llvm_unreachable("Invalid 2-input min/max opcode");
6279 }
6280}
6281
6282/// PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into
6283/// (fmaxnum3 a, b, c). Also covers other llvm min/max intrinsics.
6286 unsigned PTXVersion, unsigned SmVersion) {
6287
6288 // 3-input min/max requires PTX 8.8+ and SM_100+, and only supports f32s
6289 EVT VT = N->getValueType(0);
6290 if (VT != MVT::f32 || PTXVersion < 88 || SmVersion < 100)
6291 return SDValue();
6292
6293 SDValue Op0 = N->getOperand(0);
6294 SDValue Op1 = N->getOperand(1);
6295 unsigned MinMaxOp2 = N->getOpcode();
6296 unsigned MinMaxOp3 = getMinMax3Opcode(MinMaxOp2);
6297
6298 if (Op0.getOpcode() == MinMaxOp2 && Op0.hasOneUse()) {
6299 // (maxnum (maxnum a, b), c) -> (maxnum3 a, b, c)
6300 SDValue A = Op0.getOperand(0);
6301 SDValue B = Op0.getOperand(1);
6302 SDValue C = Op1;
6303 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
6304 } else if (Op1.getOpcode() == MinMaxOp2 && Op1.hasOneUse()) {
6305 // (maxnum a, (maxnum b, c)) -> (maxnum3 a, b, c)
6306 SDValue A = Op0;
6307 SDValue B = Op1.getOperand(0);
6308 SDValue C = Op1.getOperand(1);
6309 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
6310 }
6311 return SDValue();
6312}
6313
6316 CodeGenOptLevel OptLevel) {
6317 assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
6318
6319 // Don't do anything at less than -O2.
6320 if (OptLevel < CodeGenOptLevel::Default)
6321 return SDValue();
6322
6323 SelectionDAG &DAG = DCI.DAG;
6324 SDLoc DL(N);
6325 EVT VT = N->getValueType(0);
6326 bool IsSigned = N->getOpcode() == ISD::SREM;
6327 unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
6328
6329 const SDValue &Num = N->getOperand(0);
6330 const SDValue &Den = N->getOperand(1);
6331
6332 for (const SDNode *U : Num->users()) {
6333 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
6334 U->getOperand(1) == Den) {
6335 // Num % Den -> Num - (Num / Den) * Den
6336 return DAG.getNode(ISD::SUB, DL, VT, Num,
6337 DAG.getNode(ISD::MUL, DL, VT,
6338 DAG.getNode(DivOpc, DL, VT, Num, Den),
6339 Den));
6340 }
6341 }
6342 return SDValue();
6343}
6344
6345// (sign_extend|zero_extend (mul|shl) x, y) -> (mul.wide x, y)
6347 CodeGenOptLevel OptLevel) {
6348 if (OptLevel == CodeGenOptLevel::None)
6349 return SDValue();
6350
6351 SDValue Op = N->getOperand(0);
6352 if (!Op.hasOneUse())
6353 return SDValue();
6354 EVT ToVT = N->getValueType(0);
6355 EVT FromVT = Op.getValueType();
6356 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
6357 (ToVT == MVT::i64 && FromVT == MVT::i32)))
6358 return SDValue();
6359 if (!(Op.getOpcode() == ISD::MUL ||
6360 (Op.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Op.getOperand(1)))))
6361 return SDValue();
6362
6363 SDLoc DL(N);
6364 unsigned ExtOpcode = N->getOpcode();
6365 unsigned Opcode = 0;
6366 if (ExtOpcode == ISD::SIGN_EXTEND && Op->getFlags().hasNoSignedWrap())
6367 Opcode = NVPTXISD::MUL_WIDE_SIGNED;
6368 else if (ExtOpcode == ISD::ZERO_EXTEND && Op->getFlags().hasNoUnsignedWrap())
6369 Opcode = NVPTXISD::MUL_WIDE_UNSIGNED;
6370 else
6371 return SDValue();
6372 SDValue RHS = Op.getOperand(1);
6373 if (Op.getOpcode() == ISD::SHL) {
6374 const auto ShiftAmt = Op.getConstantOperandVal(1);
6375 const auto MulVal = APInt(FromVT.getSizeInBits(), 1) << ShiftAmt;
6376 RHS = DCI.DAG.getConstant(MulVal, DL, FromVT);
6377 }
6378 return DCI.DAG.getNode(Opcode, DL, ToVT, Op.getOperand(0), RHS);
6379}
6380
6386
6387/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
6388/// that can be demoted to \p OptSize bits without loss of information. The
6389/// signedness of the operand, if determinable, is placed in \p S.
6391 unsigned OptSize,
6392 OperandSignedness &S) {
6393 S = Unknown;
6394
6395 if (Op.getOpcode() == ISD::SIGN_EXTEND ||
6396 Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
6397 EVT OrigVT = Op.getOperand(0).getValueType();
6398 if (OrigVT.getFixedSizeInBits() <= OptSize) {
6399 S = Signed;
6400 return true;
6401 }
6402 } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
6403 EVT OrigVT = Op.getOperand(0).getValueType();
6404 if (OrigVT.getFixedSizeInBits() <= OptSize) {
6405 S = Unsigned;
6406 return true;
6407 }
6408 }
6409
6410 return false;
6411}
6412
6413/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
6414/// be demoted to \p OptSize bits without loss of information. If the operands
6415/// contain a constant, it should appear as the RHS operand. The signedness of
6416/// the operands is placed in \p IsSigned.
6418 unsigned OptSize,
6419 bool &IsSigned) {
6420 OperandSignedness LHSSign;
6421
6422 // The LHS operand must be a demotable op
6423 if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
6424 return false;
6425
6426 // We should have been able to determine the signedness from the LHS
6427 if (LHSSign == Unknown)
6428 return false;
6429
6430 IsSigned = (LHSSign == Signed);
6431
6432 // The RHS can be a demotable op or a constant
6434 const APInt &Val = CI->getAPIntValue();
6435 if (LHSSign == Unsigned) {
6436 return Val.isIntN(OptSize);
6437 } else {
6438 return Val.isSignedIntN(OptSize);
6439 }
6440 } else {
6441 OperandSignedness RHSSign;
6442 if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
6443 return false;
6444
6445 return LHSSign == RHSSign;
6446 }
6447}
6448
6449/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
6450/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
6451/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
6452/// amount.
6455 EVT MulType = N->getValueType(0);
6456 if (MulType != MVT::i32 && MulType != MVT::i64) {
6457 return SDValue();
6458 }
6459
6460 SDLoc DL(N);
6461 unsigned OptSize = MulType.getSizeInBits() >> 1;
6462 SDValue LHS = N->getOperand(0);
6463 SDValue RHS = N->getOperand(1);
6464
6465 // Canonicalize the multiply so the constant (if any) is on the right
6466 if (N->getOpcode() == ISD::MUL) {
6467 if (isa<ConstantSDNode>(LHS)) {
6468 std::swap(LHS, RHS);
6469 }
6470 }
6471
6472 // If we have a SHL, determine the actual multiply amount
6473 if (N->getOpcode() == ISD::SHL) {
6475 if (!ShlRHS) {
6476 return SDValue();
6477 }
6478
6479 APInt ShiftAmt = ShlRHS->getAPIntValue();
6480 unsigned BitWidth = MulType.getSizeInBits();
6481 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
6482 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
6483 RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
6484 } else {
6485 return SDValue();
6486 }
6487 }
6488
6489 bool Signed;
6490 // Verify that our operands are demotable
6491 if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
6492 return SDValue();
6493 }
6494
6495 EVT DemotedVT;
6496 if (MulType == MVT::i32) {
6497 DemotedVT = MVT::i16;
6498 } else {
6499 DemotedVT = MVT::i32;
6500 }
6501
6502 // Truncate the operands to the correct size. Note that these are just for
6503 // type consistency and will (likely) be eliminated in later phases.
6504 SDValue TruncLHS =
6505 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
6506 SDValue TruncRHS =
6507 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
6508
6509 unsigned Opc;
6510 if (Signed) {
6511 Opc = NVPTXISD::MUL_WIDE_SIGNED;
6512 } else {
6513 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
6514 }
6515
6516 return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
6517}
6518
6519static bool isConstOne(const SDValue &Operand) {
6520 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
6521 return Const && Const->getZExtValue() == 1;
6522}
6523
6525 if (Add->getOpcode() != ISD::ADD)
6526 return SDValue();
6527
6528 if (isConstOne(Add->getOperand(0)))
6529 return Add->getOperand(1);
6530
6531 if (isConstOne(Add->getOperand(1)))
6532 return Add->getOperand(0);
6533
6534 return SDValue();
6535}
6536
6539
6541 SDValue Mul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
6542 return DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, X);
6543 }
6544
6545 return SDValue();
6546}
6547
6549 SDLoc DL,
6551 if (Select->getOpcode() != ISD::SELECT)
6552 return SDValue();
6553
6554 SDValue Cond = Select->getOperand(0);
6555
6556 unsigned ConstOpNo;
6557 if (isConstOne(Select->getOperand(1)))
6558 ConstOpNo = 1;
6559 else if (isConstOne(Select->getOperand(2)))
6560 ConstOpNo = 2;
6561 else
6562 return SDValue();
6563
6564 SDValue Y = Select->getOperand((ConstOpNo == 1) ? 2 : 1);
6565
6566 // Do not combine if the resulting sequence is not obviously profitable.
6568 return SDValue();
6569
6570 SDValue NewMul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
6571
6572 return DCI.DAG.getNode(ISD::SELECT, DL, VT, Cond,
6573 (ConstOpNo == 1) ? X : NewMul,
6574 (ConstOpNo == 1) ? NewMul : X);
6575}
6576
6577static SDValue
6580
6581 EVT VT = N0.getValueType();
6582 if (VT.isVector())
6583 return SDValue();
6584
6585 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
6586 return SDValue();
6587
6588 SDLoc DL(N);
6589
6590 // (mul x, (add y, 1)) -> (add (mul x, y), x)
6591 if (SDValue Res = combineMADConstOne(N0, N1, VT, DL, DCI))
6592 return Res;
6593 if (SDValue Res = combineMADConstOne(N1, N0, VT, DL, DCI))
6594 return Res;
6595
6596 // (mul x, (select y, 1)) -> (select (mul x, y), x)
6597 if (SDValue Res = combineMulSelectConstOne(N0, N1, VT, DL, DCI))
6598 return Res;
6599 if (SDValue Res = combineMulSelectConstOne(N1, N0, VT, DL, DCI))
6600 return Res;
6601
6602 return SDValue();
6603}
6604
6605/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
6608 CodeGenOptLevel OptLevel) {
6609 if (OptLevel == CodeGenOptLevel::None)
6610 return SDValue();
6611
6612 if (SDValue Ret = TryMULWIDECombine(N, DCI))
6613 return Ret;
6614
6615 SDValue N0 = N->getOperand(0);
6616 SDValue N1 = N->getOperand(1);
6617 return PerformMULCombineWithOperands(N, N0, N1, DCI);
6618}
6619
6620/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
6623 CodeGenOptLevel OptLevel) {
6624 if (OptLevel > CodeGenOptLevel::None) {
6625 // Try mul.wide combining at OptLevel > 0
6626 if (SDValue Ret = TryMULWIDECombine(N, DCI))
6627 return Ret;
6628 }
6629
6630 return SDValue();
6631}
6632
6635 unsigned int SmVersion) {
6636 EVT CCType = N->getValueType(0);
6637 SDValue A = N->getOperand(0);
6638 SDValue B = N->getOperand(1);
6639
6640 EVT AType = A.getValueType();
6641 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
6642 return SDValue();
6643
6644 if (A.getValueType() == MVT::v2bf16 && SmVersion < 90)
6645 return SDValue();
6646
6647 SDLoc DL(N);
6648 // setp.f16x2 returns two scalar predicates, which we need to
6649 // convert back to v2i1. The returned result will be scalarized by
6650 // the legalizer, but the comparison will remain a single vector
6651 // instruction.
6652 SDValue CCNode = DCI.DAG.getNode(
6653 A.getValueType() == MVT::v2f16 ? NVPTXISD::SETP_F16X2
6655 DL, DCI.DAG.getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
6656 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
6657 CCNode.getValue(1));
6658}
6659
6662 SDValue Vector = N->getOperand(0);
6663 if (Vector->getOpcode() == ISD::FREEZE)
6664 Vector = Vector->getOperand(0);
6665 SDLoc DL(N);
6666 EVT VectorVT = Vector.getValueType();
6667 if (Vector->getOpcode() == ISD::LOAD && VectorVT.isSimple() &&
6668 IsPTXVectorType(VectorVT.getSimpleVT()))
6669 return SDValue(); // Native vector loads already combine nicely w/
6670 // extract_vector_elt.
6671 // Don't mess with singletons or packed types (v2*32, v2*16, v4i8 and v8i8),
6672 // we already handle them OK.
6673 if (VectorVT.getVectorNumElements() == 1 ||
6674 NVPTX::isPackedVectorTy(VectorVT) || VectorVT == MVT::v8i8)
6675 return SDValue();
6676
6677 // Don't mess with undef values as sra may be simplified to 0, not undef.
6678 if (Vector->isUndef() || ISD::allOperandsUndef(Vector.getNode()))
6679 return SDValue();
6680
6681 uint64_t VectorBits = VectorVT.getSizeInBits();
6682 // We only handle the types we can extract in-register.
6683 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
6684 return SDValue();
6685
6686 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(N->getOperand(1));
6687 // Index == 0 is handled by generic DAG combiner.
6688 if (!Index || Index->getZExtValue() == 0)
6689 return SDValue();
6690
6691 MVT IVT = MVT::getIntegerVT(VectorBits);
6692 EVT EltVT = VectorVT.getVectorElementType();
6693 EVT EltIVT = EltVT.changeTypeToInteger();
6694 uint64_t EltBits = EltVT.getScalarSizeInBits();
6695
6696 SDValue Result = DCI.DAG.getNode(
6697 ISD::TRUNCATE, DL, EltIVT,
6698 DCI.DAG.getNode(
6699 ISD::SRA, DL, IVT, DCI.DAG.getNode(ISD::BITCAST, DL, IVT, Vector),
6700 DCI.DAG.getConstant(Index->getZExtValue() * EltBits, DL, IVT)));
6701
6702 // If element has non-integer type, bitcast it back to the expected type.
6703 if (EltVT != EltIVT)
6704 Result = DCI.DAG.getNode(ISD::BITCAST, DL, EltVT, Result);
6705 // Past legalizer, we may need to extent i8 -> i16 to match the register type.
6706 if (EltVT != N->getValueType(0))
6707 Result = DCI.DAG.getNode(ISD::ANY_EXTEND, DL, N->getValueType(0), Result);
6708
6709 return Result;
6710}
6711
6712/// Transform patterns like:
6713/// (select (ugt shift_amt, BitWidth-1), 0, (srl/shl x, shift_amt))
6714/// (select (ult shift_amt, BitWidth), (srl/shl x, shift_amt), 0)
6715/// Into:
6716/// (NVPTXISD::SRL_CLAMP x, shift_amt) or (NVPTXISD::SHL_CLAMP x, shift_amt)
6717///
6718/// These patterns arise from C/C++ code like `shift >= 32 ? 0 : x >> shift`
6719/// which guards against undefined behavior. PTX shr/shl instructions clamp
6720/// shift amounts >= BitWidth to produce 0 for logical shifts, making the
6721/// guard redundant.
6722///
6723/// Note: We only handle SRL and SHL, not SRA, because arithmetic right
6724/// shifts could produce 0 or -1 when shift >= BitWidth.
6725/// Note: We don't handle uge or ule. These don't appear because of
6726/// canonicalization.
6729 if (!DCI.isAfterLegalizeDAG())
6730 return SDValue();
6731
6732 using namespace SDPatternMatch;
6733 unsigned BitWidth = N->getValueType(0).getSizeInBits();
6734 SDValue ShiftAmt, ShiftOp;
6735
6736 // Match logical shifts where the shift amount in the guard matches the shift
6737 // amount in the operation.
6738 auto LogicalShift =
6739 m_AllOf(m_Value(ShiftOp),
6740 m_AnyOf(m_Srl(m_Value(), m_TruncOrSelf(m_Deferred(ShiftAmt))),
6741 m_Shl(m_Value(), m_TruncOrSelf(m_Deferred(ShiftAmt)))));
6742
6743 // shift_amt > BitWidth-1 ? 0 : shift_op
6744 bool MatchedUGT =
6745 sd_match(N, m_Select(m_SetCC(m_Value(ShiftAmt),
6747 m_SpecificCondCode(ISD::SETUGT)),
6748 m_Zero(), LogicalShift));
6749 // shift_amt < BitWidth ? shift_op : 0
6750 bool MatchedULT =
6751 !MatchedUGT &&
6752 sd_match(N, m_Select(m_SetCC(m_Value(ShiftAmt),
6754 m_SpecificCondCode(ISD::SETULT)),
6755 LogicalShift, m_Zero()));
6756
6757 if (!MatchedUGT && !MatchedULT)
6758 return SDValue();
6759
6760 // Return a clamp shift operation, which has the same semantics as PTX shift.
6761 unsigned ClampOpc = ShiftOp.getOpcode() == ISD::SRL ? NVPTXISD::SRL_CLAMP
6762 : NVPTXISD::SHL_CLAMP;
6763 return DCI.DAG.getNode(ClampOpc, SDLoc(N), ShiftOp.getValueType(),
6764 ShiftOp.getOperand(0), ShiftOp.getOperand(1));
6765}
6766
6769 SDValue VA = N->getOperand(1);
6770 EVT VectorVT = VA.getValueType();
6771 if (VectorVT != MVT::v4i8)
6772 return SDValue();
6773
6774 // We need to split vselect into individual per-element operations Because we
6775 // use BFE/BFI instruction for byte extraction/insertion, we do end up with
6776 // 32-bit values, so we may as well do comparison as i32 to avoid conversions
6777 // to/from i16 normally used for i8 values.
6779 SDLoc DL(N);
6780 SDValue VCond = N->getOperand(0);
6781 SDValue VB = N->getOperand(2);
6782 for (int I = 0; I < 4; ++I) {
6783 SDValue C = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i1, VCond,
6784 DCI.DAG.getConstant(I, DL, MVT::i32));
6785 SDValue EA = DCI.DAG.getAnyExtOrTrunc(
6786 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VA,
6787 DCI.DAG.getConstant(I, DL, MVT::i32)),
6788 DL, MVT::i32);
6789 SDValue EB = DCI.DAG.getAnyExtOrTrunc(
6790 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VB,
6791 DCI.DAG.getConstant(I, DL, MVT::i32)),
6792 DL, MVT::i32);
6793 E.push_back(DCI.DAG.getAnyExtOrTrunc(
6794 DCI.DAG.getNode(ISD::SELECT, DL, MVT::i32, C, EA, EB), DL, MVT::i8));
6795 }
6796 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i8, E);
6797}
6798
6799static SDValue
6801 auto VT = N->getValueType(0);
6802 if (!DCI.isAfterLegalizeDAG() ||
6803 // only process v2*16 types
6804 !(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector() &&
6805 VT.getVectorNumElements() == 2))
6806 return SDValue();
6807
6808 auto Op0 = N->getOperand(0);
6809 auto Op1 = N->getOperand(1);
6810
6811 // Start out by assuming we want to take the lower 2 bytes of each i32
6812 // operand.
6813 uint64_t Op0Bytes = 0x10;
6814 uint64_t Op1Bytes = 0x54;
6815
6816 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
6817 {&Op1, &Op1Bytes}};
6818
6819 // Check that each operand is an i16, truncated from an i32 operand. We'll
6820 // select individual bytes from those original operands. Optionally, fold in a
6821 // shift right of that original operand.
6822 for (auto &[Op, OpBytes] : OpData) {
6823 // Eat up any bitcast
6824 if (Op->getOpcode() == ISD::BITCAST)
6825 *Op = Op->getOperand(0);
6826
6827 if (!(Op->getValueType() == MVT::i16 && Op->getOpcode() == ISD::TRUNCATE &&
6828 Op->getOperand(0).getValueType() == MVT::i32))
6829 return SDValue();
6830
6831 // If the truncate has multiple uses, this optimization can increase
6832 // register pressure
6833 if (!Op->hasOneUse())
6834 return SDValue();
6835
6836 *Op = Op->getOperand(0);
6837
6838 // Optionally, fold in a shift-right of the original operand and let permute
6839 // pick the two higher bytes of the original value directly.
6840 if (Op->getOpcode() == ISD::SRL && isa<ConstantSDNode>(Op->getOperand(1))) {
6841 if (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue() == 16) {
6842 // Shift the PRMT byte selector to pick upper bytes from each respective
6843 // value, instead of the lower ones: 0x10 -> 0x32, 0x54 -> 0x76
6844 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
6845 "PRMT selector values out of range");
6846 *OpBytes += 0x22;
6847 *Op = Op->getOperand(0);
6848 }
6849 }
6850 }
6851
6852 SDLoc DL(N);
6853 auto &DAG = DCI.DAG;
6854
6855 auto PRMT =
6856 getPRMT(DAG.getBitcast(MVT::i32, Op0), DAG.getBitcast(MVT::i32, Op1),
6857 (Op1Bytes << 8) | Op0Bytes, DL, DAG);
6858 return DAG.getBitcast(VT, PRMT);
6859}
6860
6863 auto *ASCN1 = cast<AddrSpaceCastSDNode>(N);
6864
6865 if (auto *ASCN2 = dyn_cast<AddrSpaceCastSDNode>(ASCN1->getOperand(0))) {
6866 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());
6867
6868 // Fold asc[B -> A](asc[A -> B](x)) -> x
6869 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())
6870 return ASCN2->getOperand(0);
6871 }
6872
6873 return SDValue();
6874}
6875
6876// Given a constant selector value and a prmt mode, return the selector value
6877// normalized to the generic prmt mode. See the PTX ISA documentation for more
6878// details:
6879// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prmt
6880static APInt getPRMTSelector(const APInt &Selector, unsigned Mode) {
6881 assert(Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6882
6884 return Selector;
6885
6886 const unsigned V = Selector.trunc(2).getZExtValue();
6887
6888 const auto GetSelector = [](unsigned S0, unsigned S1, unsigned S2,
6889 unsigned S3) {
6890 return APInt(32, S0 | (S1 << 4) | (S2 << 8) | (S3 << 12));
6891 };
6892
6893 switch (Mode) {
6895 return GetSelector(V, V + 1, V + 2, V + 3);
6897 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);
6899 return GetSelector(V, V, V, V);
6901 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);
6903 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);
6905 unsigned V1 = (V & 1) << 1;
6906 return GetSelector(V1, V1 + 1, V1, V1 + 1);
6907 }
6908 default:
6909 llvm_unreachable("Invalid PRMT mode");
6910 }
6911}
6912
6913static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode) {
6914 assert(A.getBitWidth() == 32 && B.getBitWidth() == 32 &&
6915 Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6916 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
6917 APInt BitField = B.concat(A);
6918 APInt SelectorVal = getPRMTSelector(Selector, Mode);
6919 APInt Result(32, 0);
6920 for (unsigned I : llvm::seq(4U)) {
6921 APInt Sel = SelectorVal.extractBits(4, I * 4);
6922 unsigned Idx = Sel.getLoBits(3).getZExtValue();
6923 unsigned Sign = Sel.getHiBits(1).getZExtValue();
6924 APInt Byte = BitField.extractBits(8, Idx * 8);
6925 if (Sign)
6926 Byte = Byte.ashr(8);
6927 Result.insertBits(Byte, I * 8);
6928 }
6929 return Result;
6930}
6931
6933 CodeGenOptLevel OptLevel) {
6934 if (OptLevel == CodeGenOptLevel::None)
6935 return SDValue();
6936
6937 // Constant fold PRMT
6938 if (isa<ConstantSDNode>(N->getOperand(0)) &&
6939 isa<ConstantSDNode>(N->getOperand(1)) &&
6940 isa<ConstantSDNode>(N->getOperand(2)))
6941 return DCI.DAG.getConstant(computePRMT(N->getConstantOperandAPInt(0),
6942 N->getConstantOperandAPInt(1),
6943 N->getConstantOperandAPInt(2),
6944 N->getConstantOperandVal(3)),
6945 SDLoc(N), N->getValueType(0));
6946 return SDValue();
6947}
6948
6949// During call lowering we wrap the return values in a ProxyReg node which
6950// depend on the chain value produced by the completed call. This ensures that
6951// the full call is emitted in cases where libcalls are used to legalize
6952// operations. To improve the functioning of other DAG combines we pull all
6953// operations we can through one of these nodes, ensuring that the ProxyReg
6954// directly wraps a load. That is:
6955//
6956// (ProxyReg (zext (load retval0))) => (zext (ProxyReg (load retval0)))
6957//
6960 switch (R.getOpcode()) {
6961 case ISD::TRUNCATE:
6962 case ISD::ANY_EXTEND:
6963 case ISD::SIGN_EXTEND:
6964 case ISD::ZERO_EXTEND:
6965 case ISD::BITCAST: {
6966 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
6967 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), V);
6968 return SDValue();
6969 }
6970 case ISD::SHL:
6971 case ISD::SRL:
6972 case ISD::SRA:
6973 case ISD::OR: {
6974 if (SDValue A = sinkProxyReg(R.getOperand(0), Chain, DCI))
6975 if (SDValue B = sinkProxyReg(R.getOperand(1), Chain, DCI))
6976 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), A, B);
6977 return SDValue();
6978 }
6979 case ISD::Constant:
6980 return R;
6981 case ISD::LOAD:
6982 case NVPTXISD::LoadV2:
6983 case NVPTXISD::LoadV4: {
6984 return DCI.DAG.getNode(NVPTXISD::ProxyReg, SDLoc(R), R.getValueType(),
6985 {Chain, R});
6986 }
6987 case ISD::BUILD_VECTOR: {
6988 if (DCI.isBeforeLegalize())
6989 return SDValue();
6990
6992 for (auto &Op : R->ops()) {
6993 SDValue V = sinkProxyReg(Op, Chain, DCI);
6994 if (!V)
6995 return SDValue();
6996 Ops.push_back(V);
6997 }
6998 return DCI.DAG.getNode(ISD::BUILD_VECTOR, SDLoc(R), R.getValueType(), Ops);
6999 }
7001 if (DCI.isBeforeLegalize())
7002 return SDValue();
7003
7004 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
7006 R.getValueType(), V, R.getOperand(1));
7007 return SDValue();
7008 }
7009 default:
7010 return SDValue();
7011 }
7012}
7013
7014static unsigned getF16SubOpc(Intrinsic::ID AddIntrinsicID) {
7015 switch (AddIntrinsicID) {
7016 default:
7017 break;
7018 case Intrinsic::nvvm_add_rn_sat_f16:
7019 case Intrinsic::nvvm_add_rn_sat_v2f16:
7020 return NVPTXISD::SUB_RN_SAT;
7021 case Intrinsic::nvvm_add_rn_ftz_sat_f16:
7022 case Intrinsic::nvvm_add_rn_ftz_sat_v2f16:
7023 return NVPTXISD::SUB_RN_FTZ_SAT;
7024 }
7025 llvm_unreachable("Invalid F16 add intrinsic");
7026}
7027
7029 Intrinsic::ID AddIntrinsicID) {
7030 SDValue Op1 = N->getOperand(1);
7031 SDValue Op2 = N->getOperand(2);
7032
7033 SDValue SubOp1, SubOp2;
7034
7035 if (Op1.getOpcode() == ISD::FNEG) {
7036 SubOp1 = Op2;
7037 SubOp2 = Op1.getOperand(0);
7038 } else if (Op2.getOpcode() == ISD::FNEG) {
7039 SubOp1 = Op1;
7040 SubOp2 = Op2.getOperand(0);
7041 } else {
7042 return SDValue();
7043 }
7044
7045 SDLoc DL(N);
7046 return DAG.getNode(getF16SubOpc(AddIntrinsicID), DL, N->getValueType(0),
7047 SubOp1, SubOp2);
7048}
7049
7052 const NVPTXSubtarget &STI) {
7053 unsigned IID = N->getConstantOperandVal(0);
7054
7055 switch (IID) {
7056 default:
7057 break;
7058 case Intrinsic::nvvm_add_rn_sat_f16:
7059 case Intrinsic::nvvm_add_rn_ftz_sat_f16:
7060 case Intrinsic::nvvm_add_rn_sat_v2f16:
7061 case Intrinsic::nvvm_add_rn_ftz_sat_v2f16:
7062 return combineF16AddWithNeg(N, DCI.DAG, IID);
7063 }
7064 return SDValue();
7065}
7066
7069
7070 SDValue Chain = N->getOperand(0);
7071 SDValue Reg = N->getOperand(1);
7072
7073 // If the ProxyReg is not wrapping a load, try to pull the operations through
7074 // the ProxyReg.
7075 if (Reg.getOpcode() != ISD::LOAD) {
7076 if (SDValue V = sinkProxyReg(Reg, Chain, DCI))
7077 return V;
7078 }
7079
7080 return SDValue();
7081}
7082
7083SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
7084 DAGCombinerInfo &DCI) const {
7086 switch (N->getOpcode()) {
7087 default:
7088 break;
7089 case ISD::ADD:
7090 return PerformADDCombine(N, DCI, OptLevel);
7091 case ISD::ADDRSPACECAST:
7092 return combineADDRSPACECAST(N, DCI);
7093 case ISD::SIGN_EXTEND:
7094 case ISD::ZERO_EXTEND:
7095 return combineMulWide(N, DCI, OptLevel);
7096 case ISD::BUILD_VECTOR:
7097 return PerformBUILD_VECTORCombine(N, DCI);
7099 return PerformEXTRACTCombine(N, DCI);
7100 case ISD::FADD:
7101 return performFADDCombine(N, DCI, OptLevel);
7102 case ISD::FMA:
7103 case ISD::FMUL:
7104 case ISD::FSUB:
7105 return performScalarizeV2F32Op(N, DCI, OptLevel);
7106 case ISD::FMAXNUM:
7107 case ISD::FMINNUM:
7108 case ISD::FMAXIMUM:
7109 case ISD::FMINIMUM:
7110 case ISD::FMAXIMUMNUM:
7111 case ISD::FMINIMUMNUM:
7112 return PerformFMinMaxCombine(N, DCI, STI.getPTXVersion(),
7113 STI.getSmVersion());
7114 case ISD::LOAD:
7115 case NVPTXISD::LoadV2:
7116 case NVPTXISD::LoadV4:
7117 return combineLOAD(N, DCI, STI);
7118 case ISD::MUL:
7119 return PerformMULCombine(N, DCI, OptLevel);
7120 case NVPTXISD::PRMT:
7121 return combinePRMT(N, DCI, OptLevel);
7122 case NVPTXISD::ProxyReg:
7123 return combineProxyReg(N, DCI);
7124 case ISD::SETCC:
7125 return PerformSETCCCombine(N, DCI, STI.getSmVersion());
7126 case ISD::SHL:
7127 return PerformSHLCombine(N, DCI, OptLevel);
7128 case ISD::SREM:
7129 case ISD::UREM:
7130 return PerformREMCombine(N, DCI, OptLevel);
7131 case ISD::STORE:
7132 case NVPTXISD::StoreV2:
7133 case NVPTXISD::StoreV4:
7134 return combineSTORE(N, DCI, STI);
7135 case ISD::SELECT:
7136 return PerformSELECTShiftCombine(N, DCI);
7137 case ISD::VSELECT:
7138 return PerformVSELECTCombine(N, DCI);
7140 return combineIntrinsicWOChain(N, DCI, STI);
7141 }
7142 return SDValue();
7143}
7144
7147 // Handle bitcasting to v2i8 without hitting the default promotion
7148 // strategy which goes through stack memory.
7149 SDValue Op(Node, 0);
7150 EVT ToVT = Op->getValueType(0);
7151 if (ToVT != MVT::v2i8) {
7152 return;
7153 }
7154
7155 // Bitcast to i16 and unpack elements into a vector
7156 SDLoc DL(Node);
7157 SDValue AsInt = DAG.getBitcast(MVT::i16, Op->getOperand(0));
7158 SDValue Vec0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, AsInt);
7159 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
7160 SDValue Vec1 =
7161 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
7162 DAG.getNode(ISD::SRL, DL, MVT::i16, {AsInt, Const8}));
7163 Results.push_back(
7164 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i8, {Vec0, Vec1}));
7165}
7166
7169 SDValue Chain = N->getOperand(0);
7170 SDValue Intrin = N->getOperand(1);
7171 SDLoc DL(N);
7172
7173 // Get the intrinsic ID
7174 unsigned IntrinNo = Intrin.getNode()->getAsZExtVal();
7175 switch (IntrinNo) {
7176 default:
7177 return;
7178 case Intrinsic::nvvm_ldu_global_i:
7179 case Intrinsic::nvvm_ldu_global_f:
7180 case Intrinsic::nvvm_ldu_global_p: {
7181 EVT ResVT = N->getValueType(0);
7182
7183 if (ResVT.isVector()) {
7184 // Vector LDG/LDU
7185
7186 unsigned NumElts = ResVT.getVectorNumElements();
7187 EVT EltVT = ResVT.getVectorElementType();
7188
7189 // Since LDU/LDG are target nodes, we cannot rely on DAG type
7190 // legalization.
7191 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
7192 // loaded type to i16 and propagate the "real" type as the memory type.
7193 bool NeedTrunc = false;
7194 if (EltVT.getSizeInBits() < 16) {
7195 EltVT = MVT::i16;
7196 NeedTrunc = true;
7197 }
7198
7199 unsigned Opcode = 0;
7200 SDVTList LdResVTs;
7201
7202 switch (NumElts) {
7203 default:
7204 return;
7205 case 2:
7206 Opcode = NVPTXISD::LDUV2;
7207 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
7208 break;
7209 case 4: {
7210 Opcode = NVPTXISD::LDUV4;
7211 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
7212 LdResVTs = DAG.getVTList(ListVTs);
7213 break;
7214 }
7215 }
7216
7217 SmallVector<SDValue, 8> OtherOps;
7218
7219 // Copy regular operands
7220
7221 OtherOps.push_back(Chain); // Chain
7222 // Skip operand 1 (intrinsic ID)
7223 // Others
7224 OtherOps.append(N->op_begin() + 2, N->op_end());
7225
7227
7228 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
7229 MemSD->getMemoryVT(),
7230 MemSD->getMemOperand());
7231
7232 SmallVector<SDValue, 4> ScalarRes;
7233
7234 for (unsigned i = 0; i < NumElts; ++i) {
7235 SDValue Res = NewLD.getValue(i);
7236 if (NeedTrunc)
7237 Res =
7238 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
7239 ScalarRes.push_back(Res);
7240 }
7241
7242 SDValue LoadChain = NewLD.getValue(NumElts);
7243
7244 SDValue BuildVec =
7245 DAG.getBuildVector(ResVT, DL, ScalarRes);
7246
7247 Results.push_back(BuildVec);
7248 Results.push_back(LoadChain);
7249 } else {
7250 // i8 LDG/LDU
7251 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
7252 "Custom handling of non-i8 ldu/ldg?");
7253
7254 // Just copy all operands as-is
7256
7257 // Force output to i16
7258 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
7259
7261
7262 // We make sure the memory type is i8, which will be used during isel
7263 // to select the proper instruction.
7264 SDValue NewLD =
7266 MVT::i8, MemSD->getMemOperand());
7267
7268 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
7269 NewLD.getValue(0)));
7270 Results.push_back(NewLD.getValue(1));
7271 }
7272 return;
7273 }
7274
7275 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
7276 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
7277 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
7278 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
7279 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
7280 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
7281 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
7282 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
7283 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
7284 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
7285 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
7286 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
7287 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
7288 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
7289 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
7290 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
7291 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
7292 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
7293 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
7294 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
7295 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
7296 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
7297 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
7298 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
7299 if (auto Res = lowerTcgen05Ld(N, DAG)) {
7300 Results.push_back(Res->first);
7301 Results.push_back(Res->second);
7302 }
7303 return;
7304
7305 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
7306 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
7307 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
7308 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
7309 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
7310 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
7311 if (auto Res = lowerTcgen05Ld(N, DAG, /*HasOffset=*/true)) {
7312 Results.push_back(Res->first);
7313 Results.push_back(Res->second);
7314 }
7315 return;
7316
7317 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_i32:
7318 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_f32:
7319 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_i32:
7320 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_f32:
7321 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_i32:
7322 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_f32:
7323 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_i32:
7324 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_f32:
7325 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_i32:
7326 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_f32:
7327 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_i32:
7328 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_f32:
7329 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_i32:
7330 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_f32:
7331 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_i32:
7332 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_f32:
7333 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_i32:
7334 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_f32:
7335 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_i32:
7336 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_f32:
7337 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_i32:
7338 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_f32:
7339 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_i32:
7340 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_f32:
7341 if (auto Res = lowerTcgen05LdRed(N, DAG)) {
7342 Results.push_back(std::get<0>(*Res));
7343 Results.push_back(std::get<1>(*Res));
7344 Results.push_back(std::get<2>(*Res));
7345 }
7346 return;
7347 }
7348}
7349
7352 // Change the CopyFromReg to output 2 64-bit results instead of a 128-bit
7353 // result so that it can pass the legalization
7354 SDLoc DL(N);
7355 SDValue Chain = N->getOperand(0);
7356 SDValue Reg = N->getOperand(1);
7357 SDValue Glue = N->getOperand(2);
7358
7359 assert(Reg.getValueType() == MVT::i128 &&
7360 "Custom lowering for CopyFromReg with 128-bit reg only");
7361 SmallVector<EVT, 4> ResultsType = {MVT::i64, MVT::i64, N->getValueType(1),
7362 N->getValueType(2)};
7363 SmallVector<SDValue, 3> NewOps = {Chain, Reg, Glue};
7364
7365 SDValue NewValue = DAG.getNode(ISD::CopyFromReg, DL, ResultsType, NewOps);
7366 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128,
7367 {NewValue.getValue(0), NewValue.getValue(1)});
7368
7369 Results.push_back(Pair);
7370 Results.push_back(NewValue.getValue(2));
7371 Results.push_back(NewValue.getValue(3));
7372}
7373
7375 const TargetLowering &TLI,
7377 SDValue Chain = N->getOperand(0);
7378 SDValue Reg = N->getOperand(1);
7379
7380 MVT VT = TLI.getRegisterType(*DAG.getContext(), Reg.getValueType());
7381
7382 SDValue NewReg = DAG.getAnyExtOrTrunc(Reg, SDLoc(N), VT);
7383 SDValue NewProxy =
7384 DAG.getNode(NVPTXISD::ProxyReg, SDLoc(N), VT, {Chain, NewReg});
7385 SDValue Res = DAG.getAnyExtOrTrunc(NewProxy, SDLoc(N), N->getValueType(0));
7386
7387 Results.push_back(Res);
7388}
7389
7391 const NVPTXSubtarget &STI,
7393 assert(N->getValueType(0) == MVT::i128 &&
7394 "Custom lowering for atomic128 only supports i128");
7395
7397 SDLoc dl(N);
7398
7399 if (!STI.hasAtomSwap128()) {
7402 "Support for b128 atomics introduced in PTX ISA version 8.3 and "
7403 "requires target sm_90.",
7404 dl.getDebugLoc()));
7405
7406 Results.push_back(DAG.getUNDEF(MVT::i128));
7407 Results.push_back(AN->getOperand(0)); // Chain
7408 return;
7409 }
7410
7412 Ops.push_back(AN->getOperand(0)); // Chain
7413 Ops.push_back(AN->getOperand(1)); // Ptr
7414 for (const auto &Op : AN->ops().drop_front(2)) {
7415 // Low part
7416 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
7417 DAG.getIntPtrConstant(0, dl)));
7418 // High part
7419 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
7420 DAG.getIntPtrConstant(1, dl)));
7421 }
7422 unsigned Opcode = N->getOpcode() == ISD::ATOMIC_SWAP
7425 SDVTList Tys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other);
7426 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, MVT::i128,
7427 AN->getMemOperand());
7428 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i128,
7429 {Result.getValue(0), Result.getValue(1)}));
7430 Results.push_back(Result.getValue(2));
7431}
7432
7433void NVPTXTargetLowering::ReplaceNodeResults(
7435 switch (N->getOpcode()) {
7436 default:
7437 report_fatal_error("Unhandled custom legalization");
7438 case ISD::BITCAST:
7439 ReplaceBITCAST(N, DAG, Results);
7440 return;
7441 case ISD::LOAD:
7442 case ISD::MLOAD:
7443 replaceLoadVector(N, DAG, Results, STI);
7444 return;
7447 return;
7448 case ISD::CopyFromReg:
7450 return;
7451 case NVPTXISD::ProxyReg:
7452 replaceProxyReg(N, DAG, *this, Results);
7453 return;
7455 case ISD::ATOMIC_SWAP:
7456 replaceAtomicSwap128(N, DAG, STI, Results);
7457 return;
7458 }
7459}
7460
7463 Type *Ty = AI->getValOperand()->getType();
7464
7465 if (AI->isFloatingPointOperation()) {
7467 if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&
7468 STI.getPTXVersion() >= 63)
7470 if (Ty->isBFloatTy() && STI.getSmVersion() >= 90 &&
7471 STI.getPTXVersion() >= 78)
7473 if (Ty->isFloatTy())
7475 if (Ty->isDoubleTy() && STI.hasAtomAddF64())
7477 }
7479 }
7480
7481 assert(Ty->isIntegerTy() && "Ty should be integer at this point");
7482 const unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
7483
7484 switch (AI->getOperation()) {
7485 default:
7488 if (BitWidth == 128)
7490 [[fallthrough]];
7494 switch (BitWidth) {
7495 case 8:
7496 case 16:
7498 case 32:
7500 case 64:
7501 if (STI.hasAtomBitwise64())
7504 case 128:
7506 default:
7507 llvm_unreachable("unsupported width encountered");
7508 }
7515 switch (BitWidth) {
7516 case 8:
7517 case 16:
7519 case 32:
7521 case 64:
7522 if (STI.hasAtomMinMax64())
7525 case 128:
7527 default:
7528 llvm_unreachable("unsupported width encountered");
7529 }
7532 switch (BitWidth) {
7533 case 32:
7535 case 8:
7536 case 16:
7537 case 64:
7538 case 128:
7540 default:
7541 llvm_unreachable("unsupported width encountered");
7542 }
7543 }
7544
7546}
7547
7549 const Instruction *I) const {
7550 // This function returns true iff the operation is emulated using a CAS-loop,
7551 // or if it has the memory order seq_cst (which is not natively supported in
7552 // the PTX `atom` instruction).
7553 //
7554 // atomicrmw and cmpxchg instructions not efficiently supported by PTX
7555 // are lowered to CAS emulation loops that preserve their memory order,
7556 // syncscope, and volatile semantics. For PTX, it is more efficient to use
7557 // atom.cas.relaxed.sco instructions within the loop, and fences before and
7558 // after the loop to restore order.
7559 //
7560 // Atomic instructions efficiently supported by PTX are lowered to
7561 // `atom.<op>.<sem>.<scope` instruction with their corresponding memory order
7562 // and scope. Since PTX does not support seq_cst, we emulate it by lowering to
7563 // a fence.sc followed by an atom according to the PTX atomics ABI
7564 // https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/atomic-abi.html
7565 if (auto *CI = dyn_cast<AtomicCmpXchgInst>(I))
7566 return (cast<IntegerType>(CI->getCompareOperand()->getType())
7567 ->getBitWidth() < STI.getMinCmpXchgSizeInBits()) ||
7568 CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent;
7569 if (auto *RI = dyn_cast<AtomicRMWInst>(I))
7571 RI->getOrdering() == AtomicOrdering::SequentiallyConsistent;
7572 return false;
7573}
7574
7576 const Instruction *I) const {
7577 // If the operation is emulated by a CAS-loop, we lower the instruction to
7578 // atom.<op>.relaxed, since AtomicExpandPass will insert fences for enforcing
7579 // the correct memory ordering around the CAS loop.
7580 //
7581 // When the operation is not emulated, but the memory order is seq_cst,
7582 // we must lower to "fence.sc.<scope>; atom.<op>.acquire.<scope>;" to conform
7583 // to the PTX atomics ABI.
7584 // https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability/atomic-abi.html
7585 // For such cases, emitLeadingFence() will separately insert the leading
7586 // "fence.sc.<scope>;". Here, we only set the memory order to acquire.
7587 //
7588 // Otherwise, the operation is not emulated, and the memory order is not
7589 // seq_cst. In this case, the LLVM memory order is natively supported by the
7590 // PTX `atom` instruction, and we just lower to the corresponding
7591 // `atom.<op>.relaxed|acquire|release|acq_rel". For such cases, this function
7592 // will NOT be called.
7593 // prerequisite: shouldInsertFencesForAtomic() should have returned `true` for
7594 // I before its memory order was modified.
7595 if (auto *CI = dyn_cast<AtomicCmpXchgInst>(I);
7596 CI && CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent &&
7597 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() >=
7598 STI.getMinCmpXchgSizeInBits())
7600 else if (auto *RI = dyn_cast<AtomicRMWInst>(I);
7601 RI && RI->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
7604
7606}
7607
7609 Instruction *Inst,
7610 AtomicOrdering Ord) const {
7611 // prerequisite: shouldInsertFencesForAtomic() should have returned `true` for
7612 // `Inst` before its memory order was modified. We cannot enforce this with an
7613 // assert, because AtomicExpandPass will have modified the memory order
7614 // between the initial call to shouldInsertFencesForAtomic() and the call to
7615 // this function.
7616 if (!isa<AtomicCmpXchgInst>(Inst) && !isa<AtomicRMWInst>(Inst))
7617 return TargetLoweringBase::emitLeadingFence(Builder, Inst, Ord);
7618
7619 // Specialize for cmpxchg and atomicrmw
7620 auto SSID = getAtomicSyncScopeID(Inst);
7621 assert(SSID.has_value() && "Expected an atomic operation");
7622
7623 if (isReleaseOrStronger(Ord))
7624 return Builder.CreateFence(Ord == AtomicOrdering::SequentiallyConsistent
7627 SSID.value());
7628
7629 return nullptr;
7630}
7631
7633 Instruction *Inst,
7634 AtomicOrdering Ord) const {
7635 // prerequisite: shouldInsertFencesForAtomic() should have returned `true` for
7636 // `Inst` before its memory order was modified. See `emitLeadingFence` for why
7637 // this cannot be enforced with an assert. Specialize for cmpxchg and
7638 // atomicrmw
7639 auto *CI = dyn_cast<AtomicCmpXchgInst>(Inst);
7640 auto *RI = dyn_cast<AtomicRMWInst>(Inst);
7641 if (!CI && !RI)
7642 return TargetLoweringBase::emitTrailingFence(Builder, Inst, Ord);
7643
7644 auto SSID = getAtomicSyncScopeID(Inst);
7645 assert(SSID.has_value() && "Expected an atomic operation");
7646
7647 bool IsEmulated =
7648 CI ? cast<IntegerType>(CI->getCompareOperand()->getType())
7649 ->getBitWidth() < STI.getMinCmpXchgSizeInBits()
7651
7652 if (isAcquireOrStronger(Ord) && IsEmulated)
7653 return Builder.CreateFence(AtomicOrdering::Acquire, SSID.value());
7654
7655 return nullptr;
7656}
7657
7658// Rather than default to SINT when both UINT and SINT are custom, we only
7659// change the opcode when UINT is not legal and SINT is. UINT is preferred when
7660// both are custom since unsigned CVT instructions can lead to slightly better
7661// SASS code with fewer instructions.
7663 EVT ToVT) const {
7664 if (isOperationLegal(Op, ToVT))
7665 return Op;
7666 switch (Op) {
7667 case ISD::FP_TO_UINT:
7669 return ISD::FP_TO_SINT;
7670 break;
7674 break;
7675 case ISD::VP_FP_TO_UINT:
7676 if (isOperationLegal(ISD::VP_FP_TO_SINT, ToVT))
7677 return ISD::VP_FP_TO_SINT;
7678 break;
7679 default:
7680 break;
7681 }
7682 return Op;
7683}
7684
7685// Pin NVPTXTargetObjectFile's vtables to this file.
7687
7692
7694 const SelectionDAG &DAG, unsigned Depth) {
7695 SDValue A = Op.getOperand(0);
7696 SDValue B = Op.getOperand(1);
7697 ConstantSDNode *Selector = dyn_cast<ConstantSDNode>(Op.getOperand(2));
7698 unsigned Mode = Op.getConstantOperandVal(3);
7699
7700 if (!Selector)
7701 return;
7702
7703 KnownBits AKnown = DAG.computeKnownBits(A, Depth);
7704 KnownBits BKnown = DAG.computeKnownBits(B, Depth);
7705
7706 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
7707 assert(AKnown.getBitWidth() == 32 && BKnown.getBitWidth() == 32 &&
7708 "PRMT must have i32 operands");
7709 assert(Known.getBitWidth() == 32 && "PRMT must have i32 result");
7710 KnownBits BitField = BKnown.concat(AKnown);
7711
7712 APInt SelectorVal = getPRMTSelector(Selector->getAPIntValue(), Mode);
7713 for (unsigned I : llvm::seq(4)) {
7714 APInt Sel = SelectorVal.extractBits(4, I * 4);
7715 unsigned Idx = Sel.getLoBits(3).getZExtValue();
7716 unsigned Sign = Sel.getHiBits(1).getZExtValue();
7717 KnownBits Byte = BitField.extractBits(8, Idx * 8);
7718 if (Sign)
7719 Byte = KnownBits::ashr(Byte, KnownBits::makeConstant(APInt(8, 7)));
7720 Known.insertBits(Byte, I * 8);
7721 }
7722}
7723
7724static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known) {
7726
7727 // We can't do anything without knowing the sign bit.
7728 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
7729 if (ExtType == ISD::SEXTLOAD)
7730 return;
7731
7732 // ExtLoading to vector types is weird and may not work well with known bits.
7733 auto DestVT = LD->getValueType(0);
7734 if (DestVT.isVector())
7735 return;
7736
7737 assert(Known.getBitWidth() == DestVT.getSizeInBits());
7738 auto ElementBitWidth = NVPTXDAGToDAGISel::getFromTypeWidthForLoad(LD);
7739 Known.Zero.setHighBits(Known.getBitWidth() - ElementBitWidth);
7740}
7741
7743 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
7744 const SelectionDAG &DAG, unsigned Depth) const {
7745 Known.resetAll();
7746
7747 switch (Op.getOpcode()) {
7748 case NVPTXISD::PRMT:
7749 computeKnownBitsForPRMT(Op, Known, DAG, Depth);
7750 break;
7751 case NVPTXISD::LoadV2:
7752 case NVPTXISD::LoadV4:
7753 case NVPTXISD::LoadV8:
7755 break;
7756 default:
7757 break;
7758 }
7759}
7760
7761static std::pair<APInt, APInt> getPRMTDemandedBits(const APInt &SelectorVal,
7762 const APInt &DemandedBits) {
7763 APInt DemandedLHS = APInt(32, 0);
7764 APInt DemandedRHS = APInt(32, 0);
7765
7766 for (unsigned I : llvm::seq(4)) {
7767 if (DemandedBits.extractBits(8, I * 8).isZero())
7768 continue;
7769
7770 APInt Sel = SelectorVal.extractBits(4, I * 4);
7771 unsigned Idx = Sel.getLoBits(3).getZExtValue();
7772 unsigned Sign = Sel.getHiBits(1).getZExtValue();
7773
7774 APInt &Src = Idx < 4 ? DemandedLHS : DemandedRHS;
7775 unsigned ByteStart = (Idx % 4) * 8;
7776 if (Sign)
7777 Src.setBit(ByteStart + 7);
7778 else
7779 Src.setBits(ByteStart, ByteStart + 8);
7780 }
7781
7782 return {DemandedLHS, DemandedRHS};
7783}
7784
7785// Replace undef with 0 as this is easier for other optimizations such as
7786// known bits.
7788 if (!Op)
7789 return SDValue();
7790 if (Op.isUndef())
7791 return DAG.getConstant(0, SDLoc(), MVT::i32);
7792 return Op;
7793}
7794
7796 const APInt &DemandedBits,
7797 SelectionDAG &DAG,
7798 const TargetLowering &TLI,
7799 unsigned Depth) {
7800 assert(PRMT.getOpcode() == NVPTXISD::PRMT);
7801 SDValue Op0 = PRMT.getOperand(0);
7802 SDValue Op1 = PRMT.getOperand(1);
7803 auto *SelectorConst = dyn_cast<ConstantSDNode>(PRMT.getOperand(2));
7804 if (!SelectorConst)
7805 return SDValue();
7806
7807 unsigned Mode = PRMT.getConstantOperandVal(3);
7808 const APInt Selector = getPRMTSelector(SelectorConst->getAPIntValue(), Mode);
7809
7810 // Try to simplify the PRMT to one of the inputs if the used bytes are all
7811 // from the same input in the correct order.
7812 const unsigned LeadingBytes = DemandedBits.countLeadingZeros() / 8;
7813 const unsigned SelBits = (4 - LeadingBytes) * 4;
7814 if (Selector.getLoBits(SelBits) == APInt(32, 0x3210).getLoBits(SelBits))
7815 return Op0;
7816 if (Selector.getLoBits(SelBits) == APInt(32, 0x7654).getLoBits(SelBits))
7817 return Op1;
7818
7819 auto [DemandedLHS, DemandedRHS] = getPRMTDemandedBits(Selector, DemandedBits);
7820
7821 // Attempt to avoid multi-use ops if we don't need anything from them.
7822 SDValue DemandedOp0 =
7823 TLI.SimplifyMultipleUseDemandedBits(Op0, DemandedLHS, DAG, Depth + 1);
7824 SDValue DemandedOp1 =
7825 TLI.SimplifyMultipleUseDemandedBits(Op1, DemandedRHS, DAG, Depth + 1);
7826
7827 DemandedOp0 = canonicalizePRMTInput(DemandedOp0, DAG);
7828 DemandedOp1 = canonicalizePRMTInput(DemandedOp1, DAG);
7829 if ((DemandedOp0 && DemandedOp0 != Op0) ||
7830 (DemandedOp1 && DemandedOp1 != Op1)) {
7831 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
7832 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
7833 return getPRMT(Op0, Op1, Selector.getZExtValue(), SDLoc(PRMT), DAG);
7834 }
7835
7836 return SDValue();
7837}
7838
7840 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
7841 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
7842 Known.resetAll();
7843
7844 switch (Op.getOpcode()) {
7845 case NVPTXISD::PRMT:
7847 *this, Depth)) {
7848 TLO.CombineTo(Op, Result);
7849 return true;
7850 }
7851 break;
7852 default:
7853 break;
7854 }
7855
7856 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
7857 return false;
7858}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
constexpr LLT F32
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
#define X(NUM, ENUM, NAME)
Definition ELF.h:851
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains the declarations of entities that describe floating point environment and related ...
static bool IsIndirectCall(const MachineInstr *MI)
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
Register Reg
Register const TargetRegisterInfo * TRI
#define T
NVPTX address space definition.
static SDValue reportInvalidTensormapReplaceUsage(SDValue Op, SelectionDAG &DAG, unsigned Val)
static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG, bool hasOffset=false)
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static SDValue PerformSELECTShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Transform patterns like: (select (ugt shift_amt, BitWidth-1), 0, (srl/shl x, shift_amt)) (select (ult...
static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)
static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)
static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)
static unsigned getMinMax3Opcode(unsigned MinMax2Opcode)
Get 3-input version of a 2-input min/max opcode.
static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
#define TCGEN05_LD_RED_INST(SHAPE, NUM, TYPE)
static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static unsigned getTcgen05LdRedID(Intrinsic::ID IID)
static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)
Fold packing movs into a store.
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)
static Align getArgumentAlignment(const CallBase *CB, Type *Ty, unsigned Idx, const DataLayout &DL)
static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)
static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)
static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)
static SDValue PerformFMinMaxCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned PTXVersion, unsigned SmVersion)
PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into (fmaxnum3 a, b, c).
static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static std::optional< unsigned > getScalar3OpcodeForReduction(unsigned ReductionOpcode)
Get 3-input scalar reduction opcode.
static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG)
static bool isNonCoalescableBuildVector(const SDValue &BV)
Check if a v2f32 BUILD_VECTOR provably packs values from non-adjacent register pairs (non-coalescable...
static bool isConstZero(const SDValue &Operand)
static unsigned getF16SubOpc(Intrinsic::ID AddIntrinsicID)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG)
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static unsigned getTcgen05MMADisableOutputLane(unsigned IID)
static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)
static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)
static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue lowerBSWAP(SDValue Op, SelectionDAG &DAG)
static SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)
static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Fold unpacking movs into a load by increasing the number of return values.
#define TCGEN05_LD_RED_INTR(SHAPE, NUM, TYPE)
static SDValue lowerTensormapReplaceElemtype(SDValue Op, SelectionDAG &DAG)
static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset=false)
static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
replaceLoadVector - Convert vector loads into multi-output scalar loads.
static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static std::pair< MemSDNode *, uint32_t > convertMLOADToLoadWithUsedBytesMask(MemSDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)
Reduces the elements using the scalar operations provided.
static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)
static SDValue matchMADConstOnePattern(SDValue Add)
static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)
static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)
static EVT promoteScalarIntegerPTX(const EVT VT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static std::optional< std::tuple< SDValue, SDValue, SDValue > > lowerTcgen05LdRed(SDNode *N, SelectionDAG &DAG)
static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)
static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)
static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)
static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)
static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)
static SDValue lowerTensormapReplaceSwizzleMode(SDValue Op, SelectionDAG &DAG)
static SDValue combineIntrinsicWOChain(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)
static SDValue combineF16AddWithNeg(SDNode *N, SelectionDAG &DAG, Intrinsic::ID AddIntrinsicID)
static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))
Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....
static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)
static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
MachineInstr unsigned OpIdx
uint64_t High
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
Contains matchers for matching SelectionDAG nodes and values.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
BinaryOperator * Mul
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1153
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition APInt.cpp:644
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1563
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition APInt.h:1414
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:639
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:967
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1353
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1137
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
Definition APInt.cpp:482
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1244
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
an instruction that atomically reads a memory location, combines it with another value,...
@ Add
*p = old + v
@ FAdd
*p = old + v
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
const APInt & getAPIntValue() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:638
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
static constexpr unsigned NoRegister
Definition MCRegister.h:60
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition MCSection.h:573
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
SimpleValueType SimpleTy
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto fp_fixedlen_vector_valuetypes()
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
Align getAlign() const
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)
bool hasTensormapReplaceSwizzleModeSupport(unsigned value) const
bool hasUsedBytesMaskPragma() const
bool hasTensormapReplaceElemtypeSupport(unsigned value) const
bool hasAtomSwap128() const
bool hasF32x2Instructions() const
bool has256BitVectorLoadStore(unsigned AS) const
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
void getTgtMemIntrinsic(SmallVectorImpl< IntrinsicInfo > &Infos, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
iterator_range< user_iterator > users()
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:143
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:137
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
TargetOptions Options
MCSymbol * getSymbol(const GlobalValue *GV) const
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:201
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:186
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:318
A raw_ostream that writes to an std::string.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt pow(const APInt &X, int64_t N)
Compute X^N for N>=0.
Definition APInt.cpp:3206
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:819
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:788
@ POISON
POISON - A poison node.
Definition ISDOpcodes.h:236
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:779
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:294
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:853
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition ISDOpcodes.h:220
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:880
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:584
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:747
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:993
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:254
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:844
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:352
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:796
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:233
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition ISDOpcodes.h:247
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:230
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:704
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:765
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition ISDOpcodes.h:649
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:614
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:224
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:850
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:811
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:386
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:356
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:888
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:727
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:978
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:805
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:328
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ STRICT_FP_TO_UINT
Definition ISDOpcodes.h:478
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:477
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:926
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:739
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:205
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:304
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition ISDOpcodes.h:241
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:959
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition ISDOpcodes.h:997
@ VECREDUCE_FMINIMUM
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:856
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:833
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:338
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition ISDOpcodes.h:213
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:556
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
@ DeviceParam
Definition NVPTX.h:214
@ EntryParam
Definition NVPTX.h:208
bool isPackedVectorTy(EVT VT)
DivPrecisionLevel
Definition NVPTX.h:277
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:557
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:830
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
MaybeAlign getAlign(const CallInst &I, unsigned Index)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1668
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
Definition MathExtras.h:385
bool isReleaseOrStronger(AtomicOrdering AO)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:2025
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
std::optional< SyncScope::ID > getAtomicSyncScopeID(const Instruction *I)
A helper function that returns an atomic operation's sync scope; returns std::nullopt if it is not an...
unsigned promoteScalarArgumentSize(unsigned size)
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1752
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool shouldPassAsArray(Type *Ty)
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
@ Default
-O2, -Os, -Oz
Definition CodeGen.h:85
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL)
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isParamGridConstant(const Argument &Arg)
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL)
Since function arguments are passed via .param space, we may want to increase their alignment in a wa...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:403
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:145
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:70
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
Definition ValueTypes.h:129
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:292
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:308
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition ValueTypes.h:155
ElementCount getVectorElementCount() const
Definition ValueTypes.h:358
bool is32BitVector() const
Return true if this is a 32-bit vector type.
Definition ValueTypes.h:205
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:381
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:393
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:324
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:389
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:176
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:331
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Definition ValueTypes.h:264
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:336
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
Definition ValueTypes.h:121
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:165
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:344
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:160
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
Definition KnownBits.h:315
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
Definition KnownBits.h:247
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:72
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition KnownBits.h:233
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasAllowContract() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...