LLVM 23.0.0git
NVPTXISelLowering.cpp
Go to the documentation of this file.
1//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that NVPTX uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "NVPTXISelLowering.h"
16#include "NVPTX.h"
17#include "NVPTXISelDAGToDAG.h"
19#include "NVPTXSubtarget.h"
20#include "NVPTXTargetMachine.h"
22#include "NVPTXUtilities.h"
23#include "llvm/ADT/APFloat.h"
24#include "llvm/ADT/APInt.h"
25#include "llvm/ADT/STLExtras.h"
27#include "llvm/ADT/StringRef.h"
40#include "llvm/IR/Argument.h"
41#include "llvm/IR/Attributes.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/FPEnv.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/GlobalValue.h"
49#include "llvm/IR/IRBuilder.h"
50#include "llvm/IR/Instruction.h"
52#include "llvm/IR/IntrinsicsNVPTX.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Value.h"
67#include <algorithm>
68#include <cassert>
69#include <cmath>
70#include <cstdint>
71#include <iterator>
72#include <optional>
73#include <string>
74#include <tuple>
75#include <utility>
76#include <vector>
77
78#define DEBUG_TYPE "nvptx-lower"
79
80using namespace llvm;
81
83 "nvptx-sched4reg",
84 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
85
87 "nvptx-fma-level", cl::Hidden,
88 cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
89 " 1: do it 2: do it aggressively"),
90 cl::init(2));
91
93 "nvptx-prec-divf32", cl::Hidden,
95 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),
97 clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"),
98 clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"),
100 "Use IEEE Compliant F32 div.rnd if available (default)"),
102 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),
104
106 "nvptx-prec-sqrtf32", cl::Hidden,
107 cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
108 cl::init(true));
109
110/// Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it
111/// does NOT use lg2.approx for log2, so this is disabled by default.
113 "nvptx-approx-log2f32",
114 cl::desc("NVPTX Specific: whether to use lg2.approx for log2"),
115 cl::init(false));
116
118 "nvptx-force-min-byval-param-align", cl::Hidden,
119 cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval"
120 " params of device functions."),
121 cl::init(false));
122
125 const SDNode &N) const {
126 // If nvptx-prec-div32=N is used on the command-line, always honor it
127 if (UsePrecDivF32.getNumOccurrences() > 0)
128 return UsePrecDivF32;
129
130 const SDNodeFlags Flags = N.getFlags();
131 if (Flags.hasApproximateFuncs())
133
135}
136
138 // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
139 if (UsePrecSqrtF32.getNumOccurrences() > 0)
140 return UsePrecSqrtF32;
141
142 if (N) {
143 const SDNodeFlags Flags = N->getFlags();
144 if (Flags.hasApproximateFuncs())
145 return false;
146 }
147
148 return true;
149}
150
155
156static bool IsPTXVectorType(MVT VT) {
157 switch (VT.SimpleTy) {
158 default:
159 return false;
160 case MVT::v2i1:
161 case MVT::v4i1:
162 case MVT::v2i8:
163 case MVT::v4i8:
164 case MVT::v8i8: // <2 x i8x4>
165 case MVT::v16i8: // <4 x i8x4>
166 case MVT::v2i16:
167 case MVT::v4i16:
168 case MVT::v8i16: // <4 x i16x2>
169 case MVT::v2i32:
170 case MVT::v4i32:
171 case MVT::v2i64:
172 case MVT::v2f16:
173 case MVT::v4f16:
174 case MVT::v8f16: // <4 x f16x2>
175 case MVT::v2bf16:
176 case MVT::v4bf16:
177 case MVT::v8bf16: // <4 x bf16x2>
178 case MVT::v2f32:
179 case MVT::v4f32:
180 case MVT::v2f64:
181 case MVT::v4i64:
182 case MVT::v4f64:
183 case MVT::v8i32:
184 case MVT::v8f32:
185 case MVT::v16f16: // <8 x f16x2>
186 case MVT::v16bf16: // <8 x bf16x2>
187 case MVT::v16i16: // <8 x i16x2>
188 case MVT::v32i8: // <8 x i8x4>
189 return true;
190 }
191}
192
193// When legalizing vector loads/stores, this function is called, which does two
194// things:
195// 1. Determines Whether the vector is something we want to custom lower,
196// std::nullopt is returned if we do not want to custom lower it.
197// 2. If we do want to handle it, returns two parameters:
198// - unsigned int NumElts - The number of elements in the final vector
199// - EVT EltVT - The type of the elements in the final vector
200static std::optional<std::pair<unsigned int, MVT>>
202 unsigned AddressSpace) {
203 const bool CanLowerTo256Bit = STI.has256BitVectorLoadStore(AddressSpace);
204
205 if (CanLowerTo256Bit && VectorEVT.isScalarInteger() &&
206 VectorEVT.getSizeInBits() == 256)
207 return {{4, MVT::i64}};
208
209 if (!VectorEVT.isSimple())
210 return std::nullopt;
211 const MVT VectorVT = VectorEVT.getSimpleVT();
212
213 if (!VectorVT.isVector()) {
214 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)
215 return {{2, MVT::i64}};
216 return std::nullopt;
217 }
218
219 const MVT EltVT = VectorVT.getVectorElementType();
220 const unsigned NumElts = VectorVT.getVectorNumElements();
221
222 // The size of the PTX virtual register that holds a packed type.
223 unsigned PackRegSize;
224
225 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
226 // legal. We can (and should) split that into 2 stores of <2 x double> here
227 // but I'm leaving that as a TODO for now.
228 switch (VectorVT.SimpleTy) {
229 default:
230 return std::nullopt;
231
232 case MVT::v4i64:
233 case MVT::v4f64:
234 // This is a "native" vector type iff the address space is global and the
235 // target supports 256-bit loads/stores
236 if (!CanLowerTo256Bit)
237 return std::nullopt;
238 [[fallthrough]];
239 case MVT::v2i8:
240 case MVT::v2i64:
241 case MVT::v2f64:
242 // This is a "native" vector type
243 return std::pair(NumElts, EltVT);
244
245 case MVT::v16f16: // <8 x f16x2>
246 case MVT::v16bf16: // <8 x bf16x2>
247 case MVT::v16i16: // <8 x i16x2>
248 case MVT::v32i8: // <8 x i8x4>
249 // This can be upsized into a "native" vector type iff the address space is
250 // global and the target supports 256-bit loads/stores.
251 if (!CanLowerTo256Bit)
252 return std::nullopt;
253 [[fallthrough]];
254 case MVT::v2i16: // <1 x i16x2>
255 case MVT::v2f16: // <1 x f16x2>
256 case MVT::v2bf16: // <1 x bf16x2>
257 case MVT::v4i8: // <1 x i8x4>
258 case MVT::v4i16: // <2 x i16x2>
259 case MVT::v4f16: // <2 x f16x2>
260 case MVT::v4bf16: // <2 x bf16x2>
261 case MVT::v8i8: // <2 x i8x4>
262 case MVT::v8f16: // <4 x f16x2>
263 case MVT::v8bf16: // <4 x bf16x2>
264 case MVT::v8i16: // <4 x i16x2>
265 case MVT::v16i8: // <4 x i8x4>
266 PackRegSize = 32;
267 break;
268
269 case MVT::v8f32: // <4 x f32x2>
270 case MVT::v8i32: // <4 x i32x2>
271 // This is a "native" vector type iff the address space is global and the
272 // target supports 256-bit loads/stores
273 if (!CanLowerTo256Bit)
274 return std::nullopt;
275 [[fallthrough]];
276 case MVT::v2f32: // <1 x f32x2>
277 case MVT::v4f32: // <2 x f32x2>
278 case MVT::v2i32: // <1 x i32x2>
279 case MVT::v4i32: // <2 x i32x2>
280 if (!STI.hasF32x2Instructions())
281 return std::pair(NumElts, EltVT);
282 PackRegSize = 64;
283 break;
284 }
285
286 // If we reach here, then we can pack 2 or more elements into a single 32-bit
287 // or 64-bit PTX register and treat the vector as a new vector containing
288 // packed elements.
289
290 // Number of elements to pack in one word.
291 const unsigned NPerReg = PackRegSize / EltVT.getSizeInBits();
292
293 return std::pair(NumElts / NPerReg, MVT::getVectorVT(EltVT, NPerReg));
294}
295
296/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
297/// legal-ish MVTs that compose it. Unlike ComputeValueVTs, this will legalize
298/// the types as required by the calling convention (with special handling for
299/// i8s).
300/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
301/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
302/// LowerCall, and LowerReturn.
303static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
304 LLVMContext &Ctx, CallingConv::ID CallConv,
305 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
307 uint64_t StartingOffset = 0) {
308 SmallVector<EVT, 16> TempVTs;
309 SmallVector<uint64_t, 16> TempOffsets;
310 ComputeValueVTs(TLI, DL, Ty, TempVTs, /*MemVTs=*/nullptr, &TempOffsets,
311 StartingOffset);
312
313 for (const auto [VT, Off] : zip(TempVTs, TempOffsets)) {
314 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT);
315 unsigned NumRegs = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT);
316
317 // Since we actually can load/store b8, we need to ensure that we'll use
318 // the original sized type for any i8s or i8 vectors.
319 if (VT.getScalarType() == MVT::i8) {
320 if (RegisterVT == MVT::i16)
321 RegisterVT = MVT::i8;
322 else if (RegisterVT == MVT::v2i16)
323 RegisterVT = MVT::v2i8;
324 else
325 assert(RegisterVT == MVT::v4i8 &&
326 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");
327 }
328
329 // TODO: This is horribly incorrect for cases where the vector elements are
330 // not a multiple of bytes (ex i1) and legal or i8. However, this problem
331 // has existed for as long as NVPTX has and no one has complained, so we'll
332 // leave it for now.
333 for (unsigned I : seq(NumRegs)) {
334 ValueVTs.push_back(RegisterVT);
335 Offsets.push_back(Off + I * RegisterVT.getStoreSize());
336 }
337 }
338}
339
340// We return an EVT that can hold N VTs
341// If the VT is a vector, the resulting EVT is a flat vector with the same
342// element type as VT's element type.
343static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C) {
344 if (N == 1)
345 return VT;
346
347 return VT.isVector() ? EVT::getVectorVT(C, VT.getScalarType(),
348 VT.getVectorNumElements() * N)
349 : EVT::getVectorVT(C, VT, N);
350}
351
353 const SDLoc &dl, SelectionDAG &DAG) {
354 if (V.getValueType() == VT) {
355 assert(I == 0 && "Index must be 0 for scalar value");
356 return V;
357 }
358
359 if (!VT.isVector())
360 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, V,
361 DAG.getVectorIdxConstant(I, dl));
362
363 return DAG.getNode(
364 ISD::EXTRACT_SUBVECTOR, dl, VT, V,
366}
367
368template <typename T>
369static inline SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl,
370 SelectionDAG &DAG, T GetElement) {
371 if (N == 1)
372 return GetElement(0);
373
375 for (const unsigned I : llvm::seq(N)) {
376 SDValue Val = GetElement(I);
377 if (Val.getValueType().isVector())
378 DAG.ExtractVectorElements(Val, Values);
379 else
380 Values.push_back(Val);
381 }
382
383 EVT VT = EVT::getVectorVT(*DAG.getContext(), Values[0].getValueType(),
384 Values.size());
385 return DAG.getBuildVector(VT, dl, Values);
386}
387
388/// PromoteScalarIntegerPTX
389/// Used to make sure the arguments/returns are suitable for passing
390/// and promote them to a larger size if they're not.
391///
392/// The promoted type is placed in \p PromoteVT if the function returns true.
394 if (VT.isScalarInteger()) {
395 switch (PowerOf2Ceil(VT.getFixedSizeInBits())) {
396 default:
398 "Promotion is not suitable for scalars of size larger than 64-bits");
399 case 1:
400 return MVT::i1;
401 case 2:
402 case 4:
403 case 8:
404 return MVT::i8;
405 case 16:
406 return MVT::i16;
407 case 32:
408 return MVT::i32;
409 case 64:
410 return MVT::i64;
411 }
412 }
413 return VT;
414}
415
416// Check whether we can merge loads/stores of some of the pieces of a
417// flattened function parameter or return value into a single vector
418// load/store.
419//
420// The flattened parameter is represented as a list of EVTs and
421// offsets, and the whole structure is aligned to ParamAlignment. This
422// function determines whether we can load/store pieces of the
423// parameter starting at index Idx using a single vectorized op of
424// size AccessSize. If so, it returns the number of param pieces
425// covered by the vector op. Otherwise, it returns 1.
426template <typename T>
428 unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
429 const SmallVectorImpl<T> &Offsets, Align ParamAlignment) {
430
431 // Can't vectorize if param alignment is not sufficient.
432 if (ParamAlignment < AccessSize)
433 return 1;
434 // Can't vectorize if offset is not aligned.
435 if (Offsets[Idx] & (AccessSize - 1))
436 return 1;
437
438 EVT EltVT = ValueVTs[Idx];
439 unsigned EltSize = EltVT.getStoreSize();
440
441 // Element is too large to vectorize.
442 if (EltSize >= AccessSize)
443 return 1;
444
445 unsigned NumElts = AccessSize / EltSize;
446 // Can't vectorize if AccessBytes if not a multiple of EltSize.
447 if (AccessSize != EltSize * NumElts)
448 return 1;
449
450 // We don't have enough elements to vectorize.
451 if (Idx + NumElts > ValueVTs.size())
452 return 1;
453
454 // PTX ISA can only deal with 2- and 4-element vector ops.
455 if (NumElts != 4 && NumElts != 2)
456 return 1;
457
458 for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
459 // Types do not match.
460 if (ValueVTs[j] != EltVT)
461 return 1;
462
463 // Elements are not contiguous.
464 if (Offsets[j] - Offsets[j - 1] != EltSize)
465 return 1;
466 }
467 // OK. We can vectorize ValueVTs[i..i+NumElts)
468 return NumElts;
469}
470
471// Computes whether and how we can vectorize the loads/stores of a
472// flattened function parameter or return value.
473//
474// The flattened parameter is represented as the list of ValueVTs and
475// Offsets, and is aligned to ParamAlignment bytes. We return a vector
476// of the same size as ValueVTs indicating how each piece should be
477// loaded/stored (i.e. as a scalar, or as part of a vector
478// load/store).
479template <typename T>
482 const SmallVectorImpl<T> &Offsets, Align ParamAlignment,
483 bool IsVAArg = false) {
484 // Set vector size to match ValueVTs and mark all elements as
485 // scalars by default.
486
487 if (IsVAArg)
488 return SmallVector<unsigned>(ValueVTs.size(), 1);
489
490 SmallVector<unsigned, 16> VectorInfo;
491
492 const auto GetNumElts = [&](unsigned I) -> unsigned {
493 for (const unsigned AccessSize : {16, 8, 4, 2}) {
494 const unsigned NumElts = canMergeParamLoadStoresStartingAt(
495 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
496 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&
497 "Unexpected vectorization size");
498 if (NumElts != 1)
499 return NumElts;
500 }
501 return 1;
502 };
503
504 // Check what we can vectorize using 128/64/32-bit accesses.
505 for (unsigned I = 0, E = ValueVTs.size(); I != E;) {
506 const unsigned NumElts = GetNumElts(I);
507 VectorInfo.push_back(NumElts);
508 I += NumElts;
509 }
510 assert(std::accumulate(VectorInfo.begin(), VectorInfo.end(), 0u) ==
511 ValueVTs.size());
512 return VectorInfo;
513}
514
515// NVPTXTargetLowering Constructor.
517 const NVPTXSubtarget &STI)
518 : TargetLowering(TM, STI), nvTM(&TM), STI(STI), GlobalUniqueCallSite(0) {
519 // always lower memset, memcpy, and memmove intrinsics to load/store
520 // instructions, rather
521 // then generating calls to memset, mempcy or memmove.
525
528
529 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
530 // condition branches.
531 setJumpIsExpensive(true);
532
533 // Wide divides are _very_ slow. Try to reduce the width of the divide if
534 // possible.
535 addBypassSlowDiv(64, 32);
536
537 // By default, use the Source scheduling
538 if (sched4reg)
540 else
542
543 auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
544 LegalizeAction NoF16Action) {
545 bool IsOpSupported = STI.allowFP16Math();
546 switch (Op) {
547 // Several FP16 instructions are available on sm_80 only.
548 case ISD::FMINNUM:
549 case ISD::FMAXNUM:
552 case ISD::FMAXIMUM:
553 case ISD::FMINIMUM:
554 case ISD::FMAXIMUMNUM:
555 case ISD::FMINIMUMNUM:
556 IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
557 break;
558 case ISD::FEXP2:
559 IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70;
560 break;
561 }
562 setOperationAction(Op, VT, IsOpSupported ? Action : NoF16Action);
563 };
564
565 auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
566 LegalizeAction NoBF16Action) {
567 bool IsOpSupported = STI.hasNativeBF16Support(Op);
569 Op, VT, IsOpSupported ? Action : NoBF16Action);
570 };
571
572 auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
573 LegalizeAction NoI16x2Action) {
574 bool IsOpSupported = false;
575 // instructions are available on sm_90 only
576 switch (Op) {
577 case ISD::ADD:
578 case ISD::SMAX:
579 case ISD::SMIN:
580 case ISD::UMIN:
581 case ISD::UMAX:
582 IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
583 break;
584 }
585 setOperationAction(Op, VT, IsOpSupported ? Action : NoI16x2Action);
586 };
587
588 addRegisterClass(MVT::i1, &NVPTX::B1RegClass);
589 addRegisterClass(MVT::i16, &NVPTX::B16RegClass);
590 addRegisterClass(MVT::v2i16, &NVPTX::B32RegClass);
591 addRegisterClass(MVT::v4i8, &NVPTX::B32RegClass);
592 addRegisterClass(MVT::i32, &NVPTX::B32RegClass);
593 addRegisterClass(MVT::i64, &NVPTX::B64RegClass);
594 addRegisterClass(MVT::f32, &NVPTX::B32RegClass);
595 addRegisterClass(MVT::f64, &NVPTX::B64RegClass);
596 addRegisterClass(MVT::f16, &NVPTX::B16RegClass);
597 addRegisterClass(MVT::v2f16, &NVPTX::B32RegClass);
598 addRegisterClass(MVT::bf16, &NVPTX::B16RegClass);
599 addRegisterClass(MVT::v2bf16, &NVPTX::B32RegClass);
600
601 if (STI.hasF32x2Instructions()) {
602 addRegisterClass(MVT::v2f32, &NVPTX::B64RegClass);
603 addRegisterClass(MVT::v2i32, &NVPTX::B64RegClass);
604 }
605
606 // Conversion to/from FP16/FP16x2 is always legal.
611
613 if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
615
616 setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
617 setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
618
619 // Conversion to/from BFP16/BFP16x2 is always legal.
624
625 setBF16OperationAction(ISD::SETCC, MVT::v2bf16, Legal, Expand);
626 setBF16OperationAction(ISD::SETCC, MVT::bf16, Legal, Promote);
627 if (getOperationAction(ISD::SETCC, MVT::bf16) == Promote)
628 AddPromotedToType(ISD::SETCC, MVT::bf16, MVT::f32);
629
630 // Conversion to/from i16/i16x2 is always legal.
635
640
641 // No support for these operations with v2f32/v2i32
642 setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand);
643 setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand);
644
647 MVT::v2i32, Expand);
648
649 // Need custom lowering in case the index is dynamic.
650 if (STI.hasF32x2Instructions())
651 setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32},
652 Custom);
653
654 // Custom conversions to/from v2i8.
656
657 // Only logical ops can be done on v4i8/v2i32 directly, others must be done
658 // elementwise.
675 {MVT::v4i8, MVT::v2i32}, Expand);
676
677 // Operations not directly supported by NVPTX.
678 for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
679 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
680 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {
683 }
684
685 // We don't want ops like FMINIMUM or UMAX to be lowered to SETCC+VSELECT.
686 setOperationAction(ISD::VSELECT, {MVT::v2f32, MVT::v2i32}, Expand);
687
688 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
689 // For others we will expand to a SHL/SRA pair.
695 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i16, MVT::v2i32}, Expand);
696
703
706
708 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
709 Expand);
710
711 if (STI.hasHWROT32()) {
714 Custom);
715 }
716
717 setOperationAction(ISD::BR_JT, MVT::Other, STI.hasBrx() ? Legal : Expand);
719
720 // We want to legalize constant related memmove and memcopy
721 // intrinsics.
723
724 // FP extload/truncstore is not legal in PTX. We need to expand all these.
725 for (auto FloatVTs :
727 for (MVT ValVT : FloatVTs) {
728 for (MVT MemVT : FloatVTs) {
729 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Expand);
730 setTruncStoreAction(ValVT, MemVT, Expand);
731 }
732 }
733 }
734
735 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
736 // how they'll be lowered in ISel anyway, and by doing this a little earlier
737 // we allow for more DAG combine opportunities.
738 for (auto IntVTs :
740 for (MVT ValVT : IntVTs)
741 for (MVT MemVT : IntVTs)
742 if (isTypeLegal(ValVT))
743 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Custom);
744
745 // PTX does not support load / store predicate registers
747 for (MVT VT : MVT::integer_valuetypes()) {
749 Promote);
750 setTruncStoreAction(VT, MVT::i1, Expand);
751 }
752
753 // Disable generations of extload/truncstore for v2i32/v2i16/v2i8. The generic
754 // expansion for these nodes when they are unaligned is incorrect if the
755 // type is a vector.
756 //
757 // TODO: Fix the generic expansion for these nodes found in
758 // TargetLowering::expandUnalignedLoad/Store.
760 MVT::v2i8, Expand);
762 {MVT::v2i8, MVT::v2i16}, Expand);
763 setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand);
764 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
765 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
766
767 // Register custom handling for illegal type loads/stores. We'll try to custom
768 // lower almost all illegal types and logic in the lowering will discard cases
769 // we can't handle.
770 setOperationAction({ISD::LOAD, ISD::STORE}, {MVT::i128, MVT::i256, MVT::f128},
771 Custom);
773 if (!isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)
775 Custom);
776
777 // Custom legalization for LDU intrinsics.
778 // TODO: The logic to lower these is not very robust and we should rewrite it.
779 // Perhaps LDU should not be represented as an intrinsic at all.
782 if (IsPTXVectorType(VT))
784
788 MVT::i1, Expand);
789
790 // This is legal in NVPTX
795
796 setOperationAction(ISD::DYNAMIC_STACKALLOC, {MVT::i32, MVT::i64}, Custom);
798
799 // TRAP can be lowered to PTX trap
800 setOperationAction(ISD::TRAP, MVT::Other, Legal);
801 // DEBUGTRAP can be lowered to PTX brkpt
803
804 // Support varargs.
809
811 {MVT::i16, MVT::i32, MVT::i64}, Legal);
812
814 Promote);
817
818 setI16x2OperationAction(ISD::ABS, MVT::v2i16, Legal, Custom);
819 setI16x2OperationAction(ISD::SMIN, MVT::v2i16, Legal, Custom);
820 setI16x2OperationAction(ISD::SMAX, MVT::v2i16, Legal, Custom);
821 setI16x2OperationAction(ISD::UMIN, MVT::v2i16, Legal, Custom);
822 setI16x2OperationAction(ISD::UMAX, MVT::v2i16, Legal, Custom);
823 setI16x2OperationAction(ISD::CTPOP, MVT::v2i16, Legal, Expand);
824 setI16x2OperationAction(ISD::CTLZ, MVT::v2i16, Legal, Expand);
825
826 setI16x2OperationAction(ISD::ADD, MVT::v2i16, Legal, Custom);
827 setI16x2OperationAction(ISD::SUB, MVT::v2i16, Legal, Custom);
828 setI16x2OperationAction(ISD::MUL, MVT::v2i16, Legal, Custom);
829 setI16x2OperationAction(ISD::SHL, MVT::v2i16, Legal, Custom);
830 setI16x2OperationAction(ISD::SREM, MVT::v2i16, Legal, Custom);
831 setI16x2OperationAction(ISD::UREM, MVT::v2i16, Legal, Custom);
832
833 // Other arithmetic and logic ops are unsupported.
837 {MVT::v2i16, MVT::v2i32}, Expand);
838
839 // v2i32 is not supported for any arithmetic operations
844 MVT::v2i32, Expand);
845
850 if (STI.getPTXVersion() >= 43) {
855 }
856
858 setOperationAction(ISD::CTTZ, {MVT::v2i16, MVT::v2i32}, Expand);
861
862 // PTX does not directly support SELP of i1, so promote to i32 first
864
865 // PTX cannot multiply two i64s in a single instruction.
868
869 // We have some custom DAG combine patterns for these nodes
879
880 // setcc for f16x2 and bf16x2 needs special handling to prevent
881 // legalizer's attempt to scalarize it due to v2i1 not being legal.
882 if (STI.allowFP16Math() || STI.hasBF16Math())
884
885 // Vector reduction operations. These may be turned into shuffle or tree
886 // reductions depending on what instructions are available for each type.
888 MVT EltVT = VT.getVectorElementType();
889 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
892 VT, Custom);
893 }
894 }
895
896 // Promote fp16 arithmetic if fp16 hardware isn't available or the
897 // user passed --nvptx-no-fp16-math. The flag is useful because,
898 // although sm_53+ GPUs have some sort of FP16 support in
899 // hardware, only sm_53 and sm_60 have full implementation. Others
900 // only have token amount of hardware and are likely to run faster
901 // by using fp32 units instead.
902 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
903 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
904 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
905 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
906 // bf16 must be promoted to f32.
907 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
908 if (getOperationAction(Op, MVT::bf16) == Promote)
909 AddPromotedToType(Op, MVT::bf16, MVT::f32);
910 setOperationAction(Op, MVT::v2f32,
911 STI.hasF32x2Instructions() ? Legal : Expand);
912 }
913
914 // On SM80, we select add/mul/sub as fma to avoid promotion to float
915 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB}) {
916 for (const auto &VT : {MVT::bf16, MVT::v2bf16}) {
917 if (!STI.hasNativeBF16Support(Op) && STI.hasNativeBF16Support(ISD::FMA)) {
919 }
920 }
921 }
922
923 // f16/f16x2 neg was introduced in PTX 60, SM_53.
924 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&
925 STI.getPTXVersion() >= 60 &&
926 STI.allowFP16Math();
927 for (const auto &VT : {MVT::f16, MVT::v2f16})
929 IsFP16FP16x2NegAvailable ? Legal : Expand);
930
931 setBF16OperationAction(ISD::FNEG, MVT::bf16, Legal, Expand);
932 setBF16OperationAction(ISD::FNEG, MVT::v2bf16, Legal, Expand);
933 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
934 // (would be) Library functions.
935
936 // These map to conversion instructions for scalar FP types.
937 for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
939 setOperationAction(Op, MVT::f16, Legal);
940 setOperationAction(Op, MVT::f32, Legal);
941 setOperationAction(Op, MVT::f64, Legal);
942 setOperationAction(Op, MVT::v2f16, Expand);
943 setOperationAction(Op, MVT::v2bf16, Expand);
944 setOperationAction(Op, MVT::v2f32, Expand);
945 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
946 if (getOperationAction(Op, MVT::bf16) == Promote)
947 AddPromotedToType(Op, MVT::bf16, MVT::f32);
948 }
949
950 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {
952 }
953 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
954 for (MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
957 }
958 }
959
960 // Expand v2f32 = fp_extend
962 // Expand v2[b]f16 = fp_round v2f32
963 setOperationAction(ISD::FP_ROUND, {MVT::v2bf16, MVT::v2f16}, Expand);
964
965 // sm_80 only has conversions between f32 and bf16. Custom lower all other
966 // bf16 conversions.
967 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
968 for (MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
971 VT, Custom);
972 }
975 MVT::bf16, Custom);
976 }
977
984 AddPromotedToType(ISD::FROUND, MVT::bf16, MVT::f32);
985
986 // 'Expand' implements FCOPYSIGN without calling an external library.
993
994 // These map to corresponding instructions for f32/f64. f16 must be
995 // promoted to f32. v2f16 is expanded to f16, which is then promoted
996 // to f32.
997 for (const auto &Op :
999 setOperationAction(Op, MVT::f16, Promote);
1000 setOperationAction(Op, MVT::f32, Legal);
1001 // only div/rem/sqrt are legal for f64
1002 if (Op == ISD::FDIV || Op == ISD::FREM || Op == ISD::FSQRT) {
1003 setOperationAction(Op, MVT::f64, Legal);
1004 }
1005 setOperationAction(Op, {MVT::v2f16, MVT::v2bf16, MVT::v2f32}, Expand);
1006 setOperationAction(Op, MVT::bf16, Promote);
1007 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1008 }
1009 setOperationAction(ISD::FREM, {MVT::f32, MVT::f64}, Custom);
1010
1011 setOperationAction(ISD::FABS, {MVT::f32, MVT::f64}, Legal);
1012 setOperationAction(ISD::FABS, MVT::v2f32, Expand);
1013 if (STI.getPTXVersion() >= 65) {
1014 setFP16OperationAction(ISD::FABS, MVT::f16, Legal, Promote);
1015 setFP16OperationAction(ISD::FABS, MVT::v2f16, Legal, Expand);
1016 } else {
1018 setOperationAction(ISD::FABS, MVT::v2f16, Expand);
1019 }
1020 setBF16OperationAction(ISD::FABS, MVT::v2bf16, Legal, Expand);
1021 setBF16OperationAction(ISD::FABS, MVT::bf16, Legal, Promote);
1022 if (getOperationAction(ISD::FABS, MVT::bf16) == Promote)
1023 AddPromotedToType(ISD::FABS, MVT::bf16, MVT::f32);
1024
1025 for (const auto &Op :
1027 setOperationAction(Op, MVT::f32, Legal);
1028 setOperationAction(Op, MVT::f64, Legal);
1029 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
1030 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1031 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1032 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
1033 if (getOperationAction(Op, MVT::bf16) == Promote)
1034 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1035 setOperationAction(Op, MVT::v2f32, Expand);
1036 }
1037 bool SupportsF32MinMaxNaN =
1038 STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
1039 for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {
1040 setOperationAction(Op, MVT::f32, SupportsF32MinMaxNaN ? Legal : Expand);
1041 setFP16OperationAction(Op, MVT::f16, Legal, Expand);
1042 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1043 setBF16OperationAction(Op, MVT::bf16, Legal, Expand);
1044 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1045 setOperationAction(Op, MVT::v2f32, Expand);
1046 }
1047
1048 // Custom lowering for inline asm with 128-bit operands
1051
1052 // FEXP2 support:
1053 // - f32
1054 // - f16/f16x2 (sm_70+, PTX 7.0+)
1055 // - bf16/bf16x2 (sm_90+, PTX 7.8+)
1056 // When f16/bf16 types aren't supported, they are promoted/expanded to f32.
1058 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
1059 setFP16OperationAction(ISD::FEXP2, MVT::f16, Legal, Promote);
1060 setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand);
1061 setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote);
1062 setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand);
1063
1064 // FLOG2 supports f32 only
1065 // f16/bf16 types aren't supported, but they are promoted/expanded to f32.
1066 if (UseApproxLog2F32) {
1068 setOperationPromotedToType(ISD::FLOG2, MVT::f16, MVT::f32);
1069 setOperationPromotedToType(ISD::FLOG2, MVT::bf16, MVT::f32);
1070 setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32},
1071 Expand);
1072 }
1073
1074 setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom);
1075
1076 setOperationAction(ISD::ATOMIC_LOAD_SUB, {MVT::i32, MVT::i64}, Expand);
1077
1078 // atom.b128 is legal in PTX but since we don't represent i128 as a legal
1079 // type, we need to custom lower it.
1081 Custom);
1082
1083 // Now deduce the information based on the above mentioned
1084 // actions
1085 computeRegisterProperties(STI.getRegisterInfo());
1086
1087 // PTX support for 16-bit CAS is emulated. Only use 32+
1088 setMinCmpXchgSizeInBits(STI.getMinCmpXchgSizeInBits());
1089 setMaxAtomicSizeInBitsSupported(STI.hasAtomSwap128() ? 128 : 64);
1091
1092 // Custom lowering for tcgen05.ld vector operands
1094 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1095 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::v2f32,
1096 MVT::v4f32, MVT::v8f32, MVT::v16f32, MVT::v32f32,
1097 MVT::v64f32, MVT::v128f32},
1098 Custom);
1099
1100 // Custom lowering for tcgen05.st vector operands
1102 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1103 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other},
1104 Custom);
1105
1106 // Enable custom lowering for the following:
1107 // * MVT::i128 - clusterlaunchcontrol
1108 // * MVT::i32 - prmt
1109 // * MVT::v4f32 - cvt_rs fp{4/6/8}x4 intrinsics
1110 // * MVT::Other - internal.addrspace.wrap
1112 {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other}, Custom);
1113
1114 // Custom lowering for bswap
1115 setOperationAction(ISD::BSWAP, {MVT::i16, MVT::i32, MVT::i64, MVT::v2i16},
1116 Custom);
1117}
1118
1121 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
1122 VT.getScalarType() == MVT::i1)
1123 return TypeSplitVector;
1125}
1126
1128 int Enabled, int &ExtraSteps,
1129 bool &UseOneConst,
1130 bool Reciprocal) const {
1133 return SDValue();
1134
1135 if (ExtraSteps == ReciprocalEstimate::Unspecified)
1136 ExtraSteps = 0;
1137
1138 SDLoc DL(Operand);
1139 EVT VT = Operand.getValueType();
1140 bool Ftz = useF32FTZ(DAG.getMachineFunction());
1141
1142 auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
1143 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
1144 DAG.getConstant(IID, DL, MVT::i32), Operand);
1145 };
1146
1147 // The sqrt and rsqrt refinement processes assume we always start out with an
1148 // approximation of the rsqrt. Therefore, if we're going to do any refinement
1149 // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
1150 // any refinement, we must return a regular sqrt.
1151 if (Reciprocal || ExtraSteps > 0) {
1152 if (VT == MVT::f32)
1153 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1154 : Intrinsic::nvvm_rsqrt_approx_f);
1155 else if (VT == MVT::f64)
1156 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1157 else
1158 return SDValue();
1159 } else {
1160 if (VT == MVT::f32)
1161 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1162 : Intrinsic::nvvm_sqrt_approx_f);
1163 else {
1164 // There's no sqrt.approx.f64 instruction, so we emit
1165 // reciprocal(rsqrt(x)). This is faster than
1166 // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
1167 // x * rsqrt(x).)
1168 return DAG.getNode(
1170 DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
1171 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1172 }
1173 }
1174}
1175
1177 const DataLayout &DL, Type *RetTy, const ArgListTy &Args,
1179 std::optional<unsigned> FirstVAArg, const CallBase &CB,
1180 unsigned UniqueCallSite) const {
1181 auto PtrVT = getPointerTy(DL);
1182
1183 std::string Prototype;
1184 raw_string_ostream O(Prototype);
1185 O << "prototype_" << UniqueCallSite << " : .callprototype ";
1186
1187 if (RetTy->isVoidTy()) {
1188 O << "()";
1189 } else {
1190 O << "(";
1191 if (shouldPassAsArray(RetTy)) {
1192 const Align RetAlign = getArgumentAlignment(&CB, RetTy, 0, DL);
1193 O << ".param .align " << RetAlign.value() << " .b8 _["
1194 << DL.getTypeAllocSize(RetTy) << "]";
1195 } else if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy()) {
1196 unsigned size = 0;
1197 if (auto *ITy = dyn_cast<IntegerType>(RetTy)) {
1198 size = ITy->getBitWidth();
1199 } else {
1200 assert(RetTy->isFloatingPointTy() &&
1201 "Floating point type expected here");
1202 size = RetTy->getPrimitiveSizeInBits();
1203 }
1204 // PTX ABI requires all scalar return values to be at least 32
1205 // bits in size. fp16 normally uses .b16 as its storage type in
1206 // PTX, so its size must be adjusted here, too.
1208
1209 O << ".param .b" << size << " _";
1210 } else if (isa<PointerType>(RetTy)) {
1211 O << ".param .b" << PtrVT.getSizeInBits() << " _";
1212 } else {
1213 llvm_unreachable("Unknown return type");
1214 }
1215 O << ") ";
1216 }
1217 O << "_ (";
1218
1219 bool first = true;
1220
1221 const unsigned NumArgs = FirstVAArg.value_or(Args.size());
1222 auto AllOuts = ArrayRef(Outs);
1223 for (const unsigned I : llvm::seq(NumArgs)) {
1224 const auto ArgOuts =
1225 AllOuts.take_while([I](auto O) { return O.OrigArgIndex == I; });
1226 AllOuts = AllOuts.drop_front(ArgOuts.size());
1227
1228 Type *Ty = Args[I].Ty;
1229 if (!first) {
1230 O << ", ";
1231 }
1232 first = false;
1233
1234 if (ArgOuts[0].Flags.isByVal()) {
1235 // Indirect calls need strict ABI alignment so we disable optimizations by
1236 // not providing a function to optimize.
1237 Type *ETy = Args[I].IndirectType;
1238 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1239 Align ParamByValAlign =
1240 getFunctionByValParamAlign(/*F=*/nullptr, ETy, InitialAlign, DL);
1241
1242 O << ".param .align " << ParamByValAlign.value() << " .b8 _["
1243 << ArgOuts[0].Flags.getByValSize() << "]";
1244 } else {
1245 if (shouldPassAsArray(Ty)) {
1246 Align ParamAlign =
1247 getArgumentAlignment(&CB, Ty, I + AttributeList::FirstArgIndex, DL);
1248 O << ".param .align " << ParamAlign.value() << " .b8 _["
1249 << DL.getTypeAllocSize(Ty) << "]";
1250 continue;
1251 }
1252 // i8 types in IR will be i16 types in SDAG
1253 assert((getValueType(DL, Ty) == ArgOuts[0].VT ||
1254 (getValueType(DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&
1255 "type mismatch between callee prototype and arguments");
1256 // scalar type
1257 unsigned sz = 0;
1258 if (auto *ITy = dyn_cast<IntegerType>(Ty)) {
1259 sz = promoteScalarArgumentSize(ITy->getBitWidth());
1260 } else if (isa<PointerType>(Ty)) {
1261 sz = PtrVT.getSizeInBits();
1262 } else {
1263 sz = Ty->getPrimitiveSizeInBits();
1264 }
1265 O << ".param .b" << sz << " _";
1266 }
1267 }
1268
1269 if (FirstVAArg)
1270 O << (first ? "" : ",") << " .param .align "
1271 << STI.getMaxRequiredAlignment() << " .b8 _[]";
1272 O << ")";
1273 if (shouldEmitPTXNoReturn(&CB, *nvTM))
1274 O << " .noreturn";
1275 O << ";";
1276
1277 return Prototype;
1278}
1279
1281 const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const {
1282 return getAlign(*F, Idx).value_or(getFunctionParamOptimizedAlign(F, Ty, DL));
1283}
1284
1285Align NVPTXTargetLowering::getArgumentAlignment(const CallBase *CB, Type *Ty,
1286 unsigned Idx,
1287 const DataLayout &DL) const {
1288 if (!CB) {
1289 // CallSite is zero, fallback to ABI type alignment
1290 return DL.getABITypeAlign(Ty);
1291 }
1292
1293 const Function *DirectCallee = CB->getCalledFunction();
1294
1295 if (!DirectCallee) {
1296 // We don't have a direct function symbol, but that may be because of
1297 // constant cast instructions in the call.
1298
1299 // With bitcast'd call targets, the instruction will be the call
1300 if (const auto *CI = dyn_cast<CallInst>(CB)) {
1301 // Check if we have call alignment metadata
1302 if (MaybeAlign StackAlign = getAlign(*CI, Idx))
1303 return StackAlign.value();
1304 }
1305 DirectCallee = getMaybeBitcastedCallee(CB);
1306 }
1307
1308 // Check for function alignment information if we found that the
1309 // ultimate target is a Function
1310 if (DirectCallee)
1311 return getFunctionArgumentAlignment(DirectCallee, Ty, Idx, DL);
1312
1313 // Call is indirect, fall back to the ABI type alignment
1314 return DL.getABITypeAlign(Ty);
1315}
1316
1318 const GlobalAddressSDNode *Func) {
1319 if (!Func)
1320 return false;
1321 if (auto *CalleeFunc = dyn_cast<Function>(Func->getGlobal()))
1322 return CB->getFunctionType() != CalleeFunc->getFunctionType();
1323 return false;
1324}
1325
1327 const DataLayout &DL,
1328 const TargetLowering &TL) {
1329 if (Ptr->getOpcode() == ISD::FrameIndex) {
1330 auto Ty = TL.getPointerTy(DL, ADDRESS_SPACE_LOCAL);
1331 Ptr = DAG.getAddrSpaceCast(SDLoc(), Ty, Ptr, ADDRESS_SPACE_GENERIC,
1333
1335 }
1336
1337 // Peel of an addrspacecast to generic and load directly from the specific
1338 // address space.
1339 if (Ptr->getOpcode() == ISD::ADDRSPACECAST) {
1340 const auto *ASC = cast<AddrSpaceCastSDNode>(Ptr);
1341 if (ASC->getDestAddressSpace() == ADDRESS_SPACE_GENERIC) {
1342 Ptr = ASC->getOperand(0);
1343 return MachinePointerInfo(ASC->getSrcAddressSpace());
1344 }
1345 }
1346
1347 return MachinePointerInfo();
1348}
1349
1351 if (Flags.isSExt())
1352 return ISD::SIGN_EXTEND;
1353 if (Flags.isZExt())
1354 return ISD::ZERO_EXTEND;
1355 return ISD::ANY_EXTEND;
1356}
1357
1359 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1360 SDLoc dl) {
1361 const EVT ActualVT = V.getValueType();
1362 assert((ActualVT == ExpectedVT ||
1363 (ExpectedVT.isInteger() && ActualVT.isInteger())) &&
1364 "Non-integer argument type size mismatch");
1365 if (ExpectedVT.bitsGT(ActualVT))
1366 return DAG.getNode(getExtOpcode(Flags), dl, ExpectedVT, V);
1367 if (ExpectedVT.bitsLT(ActualVT))
1368 return DAG.getNode(ISD::TRUNCATE, dl, ExpectedVT, V);
1369
1370 return V;
1371}
1372
1374 SmallVectorImpl<SDValue> &InVals) const {
1375
1376 if (CLI.IsVarArg && (STI.getPTXVersion() < 60 || STI.getSmVersion() < 30))
1378 "Support for variadic functions (unsized array parameter) introduced "
1379 "in PTX ISA version 6.0 and requires target sm_30.");
1380
1381 SelectionDAG &DAG = CLI.DAG;
1382 SDLoc dl = CLI.DL;
1383 const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1384 SDValue Callee = CLI.Callee;
1385 ArgListTy &Args = CLI.getArgs();
1386 Type *RetTy = CLI.RetTy;
1387 const CallBase *CB = CLI.CB;
1388 const DataLayout &DL = DAG.getDataLayout();
1389 LLVMContext &Ctx = *DAG.getContext();
1390
1391 const auto GetI32 = [&](const unsigned I) {
1392 return DAG.getConstant(I, dl, MVT::i32);
1393 };
1394
1395 const unsigned UniqueCallSite = GlobalUniqueCallSite++;
1396 const SDValue CallChain = CLI.Chain;
1397 const SDValue StartChain =
1398 DAG.getCALLSEQ_START(CallChain, UniqueCallSite, 0, dl);
1399 SDValue DeclareGlue = StartChain.getValue(1);
1400
1401 SmallVector<SDValue, 16> CallPrereqs{StartChain};
1402
1403 const auto MakeDeclareScalarParam = [&](SDValue Symbol, unsigned Size) {
1404 // PTX ABI requires integral types to be at least 32 bits in size. FP16 is
1405 // loaded/stored using i16, so it's handled here as well.
1406 const unsigned SizeBits = promoteScalarArgumentSize(Size * 8);
1407 SDValue Declare =
1408 DAG.getNode(NVPTXISD::DeclareScalarParam, dl, {MVT::Other, MVT::Glue},
1409 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});
1410 CallPrereqs.push_back(Declare);
1411 DeclareGlue = Declare.getValue(1);
1412 return Declare;
1413 };
1414
1415 const auto MakeDeclareArrayParam = [&](SDValue Symbol, Align Align,
1416 unsigned Size) {
1417 SDValue Declare = DAG.getNode(
1418 NVPTXISD::DeclareArrayParam, dl, {MVT::Other, MVT::Glue},
1419 {StartChain, Symbol, GetI32(Align.value()), GetI32(Size), DeclareGlue});
1420 CallPrereqs.push_back(Declare);
1421 DeclareGlue = Declare.getValue(1);
1422 return Declare;
1423 };
1424
1425 // Variadic arguments.
1426 //
1427 // Normally, for each argument, we declare a param scalar or a param
1428 // byte array in the .param space, and store the argument value to that
1429 // param scalar or array starting at offset 0.
1430 //
1431 // In the case of the first variadic argument, we declare a vararg byte array
1432 // with size 0. The exact size of this array isn't known at this point, so
1433 // it'll be patched later. All the variadic arguments will be stored to this
1434 // array at a certain offset (which gets tracked by 'VAOffset'). The offset is
1435 // initially set to 0, so it can be used for non-variadic arguments (which use
1436 // 0 offset) to simplify the code.
1437 //
1438 // After all vararg is processed, 'VAOffset' holds the size of the
1439 // vararg byte array.
1440 assert((CLI.IsVarArg || CLI.Args.size() == CLI.NumFixedArgs) &&
1441 "Non-VarArg function with extra arguments");
1442
1443 const unsigned FirstVAArg = CLI.NumFixedArgs; // position of first variadic
1444 unsigned VAOffset = 0; // current offset in the param array
1445
1446 const SDValue VADeclareParam =
1447 CLI.Args.size() > FirstVAArg
1448 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),
1449 Align(STI.getMaxRequiredAlignment()), 0)
1450 : SDValue();
1451
1452 // Args.size() and Outs.size() need not match.
1453 // Outs.size() will be larger
1454 // * if there is an aggregate argument with multiple fields (each field
1455 // showing up separately in Outs)
1456 // * if there is a vector argument with more than typical vector-length
1457 // elements (generally if more than 4) where each vector element is
1458 // individually present in Outs.
1459 // So a different index should be used for indexing into Outs/OutVals.
1460 // See similar issue in LowerFormalArguments.
1461 auto AllOuts = ArrayRef(CLI.Outs);
1462 auto AllOutVals = ArrayRef(CLI.OutVals);
1463 assert(AllOuts.size() == AllOutVals.size() &&
1464 "Outs and OutVals must be the same size");
1465 // Declare the .params or .reg need to pass values
1466 // to the function
1467 for (const auto E : llvm::enumerate(Args)) {
1468 const auto ArgI = E.index();
1469 const auto Arg = E.value();
1470 const auto ArgOuts =
1471 AllOuts.take_while([&](auto O) { return O.OrigArgIndex == ArgI; });
1472 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());
1473 AllOuts = AllOuts.drop_front(ArgOuts.size());
1474 AllOutVals = AllOutVals.drop_front(ArgOuts.size());
1475
1476 const bool IsVAArg = (ArgI >= FirstVAArg);
1477 const bool IsByVal = Arg.IsByVal;
1478
1479 const SDValue ParamSymbol =
1480 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);
1481
1482 assert((!IsByVal || Arg.IndirectType) &&
1483 "byval arg must have indirect type");
1484 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);
1485
1486 const Align ArgAlign = [&]() {
1487 if (IsByVal) {
1488 // The ByValAlign in the Outs[OIdx].Flags is always set at this point,
1489 // so we don't need to worry whether it's naturally aligned or not.
1490 // See TargetLowering::LowerCallTo().
1491 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1493 InitialAlign, DL);
1494 }
1495 return getArgumentAlignment(CB, Arg.Ty, ArgI + 1, DL);
1496 }();
1497
1498 const unsigned TySize = DL.getTypeAllocSize(ETy);
1499 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&
1500 "type size mismatch");
1501
1502 const SDValue ArgDeclare = [&]() {
1503 if (IsVAArg)
1504 return VADeclareParam;
1505
1506 if (IsByVal || shouldPassAsArray(Arg.Ty))
1507 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);
1508
1509 assert(ArgOuts.size() == 1 && "We must pass only one value as non-array");
1510 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&
1511 "Only int and float types are supported as non-array arguments");
1512
1513 return MakeDeclareScalarParam(ParamSymbol, TySize);
1514 }();
1515
1516 if (IsByVal) {
1517 assert(ArgOutVals.size() == 1 && "We must pass only one value as byval");
1518 SDValue SrcPtr = ArgOutVals[0];
1519 const auto PointerInfo = refinePtrAS(SrcPtr, DAG, DL, *this);
1520 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1521
1522 if (IsVAArg)
1523 VAOffset = alignTo(VAOffset, ArgAlign);
1524
1525 SmallVector<EVT, 4> ValueVTs, MemVTs;
1527 ComputeValueVTs(*this, DL, ETy, ValueVTs, &MemVTs, &Offsets);
1528
1529 unsigned J = 0;
1530 const auto VI = VectorizePTXValueVTs(MemVTs, Offsets, ArgAlign, IsVAArg);
1531 for (const unsigned NumElts : VI) {
1532 EVT LoadVT = getVectorizedVT(MemVTs[J], NumElts, Ctx);
1533 Align SrcAlign = commonAlignment(BaseSrcAlign, Offsets[J]);
1534 SDValue SrcAddr = DAG.getObjectPtrOffset(dl, SrcPtr, Offsets[J]);
1535 SDValue SrcLoad =
1536 DAG.getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);
1537
1538 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);
1539 Align ParamAlign = commonAlignment(ArgAlign, ParamOffset);
1540 SDValue ParamAddr =
1541 DAG.getObjectPtrOffset(dl, ParamSymbol, ParamOffset);
1542 SDValue StoreParam =
1543 DAG.getStore(ArgDeclare, dl, SrcLoad, ParamAddr,
1545 CallPrereqs.push_back(StoreParam);
1546
1547 J += NumElts;
1548 }
1549 if (IsVAArg)
1550 VAOffset += TySize;
1551 } else {
1554 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, Arg.Ty, VTs, Offsets,
1555 VAOffset);
1556 assert(VTs.size() == Offsets.size() && "Size mismatch");
1557 assert(VTs.size() == ArgOuts.size() && "Size mismatch");
1558
1559 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
1560 // than 32-bits are sign extended or zero extended, depending on
1561 // whether they are signed or unsigned types. This case applies
1562 // only to scalar parameters and not to aggregate values.
1563 const bool ExtendIntegerParam =
1564 Arg.Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Arg.Ty) < 32;
1565
1566 const auto GetStoredValue = [&](const unsigned I) {
1567 SDValue StVal = ArgOutVals[I];
1569 StVal.getValueType() &&
1570 "OutVal type should always be legal");
1571
1572 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1573 const EVT StoreVT =
1574 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1575
1576 return correctParamType(StVal, StoreVT, ArgOuts[I].Flags, DAG, dl);
1577 };
1578
1579 unsigned J = 0;
1580 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign, IsVAArg);
1581 for (const unsigned NumElts : VI) {
1582 const EVT EltVT = promoteScalarIntegerPTX(VTs[J]);
1583
1584 unsigned Offset;
1585 if (IsVAArg) {
1586 // TODO: We may need to support vector types that can be passed
1587 // as scalars in variadic arguments.
1588 assert(NumElts == 1 &&
1589 "Vectorization should be disabled for vaargs.");
1590
1591 // Align each part of the variadic argument to their type.
1592 VAOffset = alignTo(VAOffset, DAG.getEVTAlign(EltVT));
1593 Offset = VAOffset;
1594
1595 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1596 VAOffset += DL.getTypeAllocSize(TheStoreType.getTypeForEVT(Ctx));
1597 } else {
1598 assert(VAOffset == 0 && "VAOffset must be 0 for non-VA args");
1599 Offset = Offsets[J];
1600 }
1601
1602 SDValue Ptr =
1603 DAG.getObjectPtrOffset(dl, ParamSymbol, TypeSize::getFixed(Offset));
1604
1605 const MaybeAlign CurrentAlign = ExtendIntegerParam
1606 ? MaybeAlign(std::nullopt)
1607 : commonAlignment(ArgAlign, Offset);
1608
1609 SDValue Val =
1610 getBuildVectorizedValue(NumElts, dl, DAG, [&](unsigned K) {
1611 return GetStoredValue(J + K);
1612 });
1613
1614 SDValue StoreParam =
1615 DAG.getStore(ArgDeclare, dl, Val, Ptr,
1617 CallPrereqs.push_back(StoreParam);
1618
1619 J += NumElts;
1620 }
1621 }
1622 }
1623
1624 // Handle Result
1625 if (!Ins.empty()) {
1626 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1627 const unsigned ResultSize = DL.getTypeAllocSize(RetTy);
1628 if (shouldPassAsArray(RetTy)) {
1629 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1630 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);
1631 } else {
1632 MakeDeclareScalarParam(RetSymbol, ResultSize);
1633 }
1634 }
1635
1636 // Set the size of the vararg param byte array if the callee is a variadic
1637 // function and the variadic part is not empty.
1638 if (VADeclareParam) {
1639 SDValue DeclareParamOps[] = {VADeclareParam.getOperand(0),
1640 VADeclareParam.getOperand(1),
1641 VADeclareParam.getOperand(2), GetI32(VAOffset),
1642 VADeclareParam.getOperand(4)};
1643 DAG.MorphNodeTo(VADeclareParam.getNode(), VADeclareParam.getOpcode(),
1644 VADeclareParam->getVTList(), DeclareParamOps);
1645 }
1646
1647 const auto *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
1648 // If the type of the callsite does not match that of the function, convert
1649 // the callsite to an indirect call.
1650 const bool ConvertToIndirectCall = shouldConvertToIndirectCall(CB, Func);
1651
1652 // Both indirect calls and libcalls have nullptr Func. In order to distinguish
1653 // between them we must rely on the call site value which is valid for
1654 // indirect calls but is always null for libcalls.
1655 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;
1656
1657 if (isa<ExternalSymbolSDNode>(Callee)) {
1658 Function* CalleeFunc = nullptr;
1659
1660 // Try to find the callee in the current module.
1661 Callee = DAG.getSymbolFunctionGlobalAddress(Callee, &CalleeFunc);
1662 assert(CalleeFunc != nullptr && "Libcall callee must be set.");
1663
1664 // Set the "libcall callee" attribute to indicate that the function
1665 // must always have a declaration.
1666 CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");
1667 }
1668
1669 if (IsIndirectCall) {
1670 // This is indirect function call case : PTX requires a prototype of the
1671 // form
1672 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
1673 // to be emitted, and the label has to used as the last arg of call
1674 // instruction.
1675 // The prototype is embedded in a string and put as the operand for a
1676 // CallPrototype SDNode which will print out to the value of the string.
1677 const bool HasVAArgs = CLI.IsVarArg && (CLI.Args.size() > CLI.NumFixedArgs);
1678 std::string Proto =
1679 getPrototype(DL, RetTy, Args, CLI.Outs,
1680 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,
1681 UniqueCallSite);
1682 const char *ProtoStr = nvTM->getStrPool().save(Proto).data();
1683 const SDValue PrototypeDeclare = DAG.getNode(
1684 NVPTXISD::CallPrototype, dl, MVT::Other,
1685 {StartChain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32)});
1686 CallPrereqs.push_back(PrototypeDeclare);
1687 }
1688
1689 const unsigned Proto = IsIndirectCall ? UniqueCallSite : 0;
1690 const unsigned NumArgs =
1691 std::min<unsigned>(CLI.NumFixedArgs + 1, Args.size());
1692 /// CALL(Chain, IsConvergent, IsIndirectCall/IsUniform, NumReturns,
1693 /// NumParams, Callee, Proto)
1694 const SDValue CallToken = DAG.getTokenFactor(dl, CallPrereqs);
1695 const SDValue Call = DAG.getNode(
1696 NVPTXISD::CALL, dl, MVT::Other,
1697 {CallToken, GetI32(CLI.IsConvergent), GetI32(IsIndirectCall),
1698 GetI32(Ins.empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});
1699
1700 SmallVector<SDValue, 16> LoadChains{Call};
1701 SmallVector<SDValue, 16> ProxyRegOps;
1702 if (!Ins.empty()) {
1705 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, RetTy, VTs, Offsets);
1706 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1707
1708 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1709 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1710
1711 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
1712 // 32-bits are sign extended or zero extended, depending on whether
1713 // they are signed or unsigned types.
1714 const bool ExtendIntegerRetVal =
1715 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
1716
1717 unsigned I = 0;
1718 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
1719 for (const unsigned NumElts : VI) {
1720 const MaybeAlign CurrentAlign =
1721 ExtendIntegerRetVal ? MaybeAlign(std::nullopt)
1722 : commonAlignment(RetAlign, Offsets[I]);
1723
1724 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1725 const EVT LoadVT =
1726 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1727 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
1728 SDValue Ptr =
1729 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
1730
1731 SDValue R =
1732 DAG.getLoad(VecVT, dl, Call, Ptr,
1734
1735 LoadChains.push_back(R.getValue(1));
1736 for (const unsigned J : llvm::seq(NumElts))
1737 ProxyRegOps.push_back(getExtractVectorizedValue(R, J, LoadVT, dl, DAG));
1738 I += NumElts;
1739 }
1740 }
1741
1742 const SDValue EndToken = DAG.getTokenFactor(dl, LoadChains);
1743 const SDValue CallEnd = DAG.getCALLSEQ_END(EndToken, UniqueCallSite,
1744 UniqueCallSite + 1, SDValue(), dl);
1745
1746 // Append ProxyReg instructions to the chain to make sure that `callseq_end`
1747 // will not get lost. Otherwise, during libcalls expansion, the nodes can become
1748 // dangling.
1749 for (const auto [I, Reg] : llvm::enumerate(ProxyRegOps)) {
1750 SDValue Proxy =
1751 DAG.getNode(NVPTXISD::ProxyReg, dl, Reg.getValueType(), {CallEnd, Reg});
1752 SDValue Ret = correctParamType(Proxy, Ins[I].VT, Ins[I].Flags, DAG, dl);
1753 InVals.push_back(Ret);
1754 }
1755
1756 // set IsTailCall to false for now, until we figure out how to express
1757 // tail call optimization in PTX
1758 CLI.IsTailCall = false;
1759 return CallEnd;
1760}
1761
1763 SelectionDAG &DAG) const {
1764
1765 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1766 const Function &Fn = DAG.getMachineFunction().getFunction();
1767
1769 Fn,
1770 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1771 "requires target sm_52.",
1772 SDLoc(Op).getDebugLoc()));
1773 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()),
1774 Op.getOperand(0)};
1775 return DAG.getMergeValues(Ops, SDLoc());
1776 }
1777
1778 SDLoc DL(Op.getNode());
1779 SDValue Chain = Op.getOperand(0);
1780 SDValue Size = Op.getOperand(1);
1781 uint64_t Align = Op.getConstantOperandVal(2);
1782
1783 // The alignment on a ISD::DYNAMIC_STACKALLOC node may be 0 to indicate that
1784 // the default stack alignment should be used.
1785 if (Align == 0)
1787
1788 // The size for ptx alloca instruction is 64-bit for m64 and 32-bit for m32.
1789 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1790
1791 SDValue Alloc =
1792 DAG.getNode(NVPTXISD::DYNAMIC_STACKALLOC, DL, {LocalVT, MVT::Other},
1793 {Chain, DAG.getZExtOrTrunc(Size, DL, LocalVT),
1794 DAG.getTargetConstant(Align, DL, MVT::i32)});
1795
1796 SDValue ASC = DAG.getAddrSpaceCast(
1798
1799 return DAG.getMergeValues({ASC, SDValue(Alloc.getNode(), 1)}, DL);
1800}
1801
1803 SelectionDAG &DAG) const {
1804 SDLoc DL(Op.getNode());
1805 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1806 const Function &Fn = DAG.getMachineFunction().getFunction();
1807
1809 Fn,
1810 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
1811 ">= sm_52.",
1812 DL.getDebugLoc()));
1813 return Op.getOperand(0);
1814 }
1815
1816 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1817 SDValue Chain = Op.getOperand(0);
1818 SDValue Ptr = Op.getOperand(1);
1819 SDValue ASC = DAG.getAddrSpaceCast(DL, LocalVT, Ptr, ADDRESS_SPACE_GENERIC,
1821 return DAG.getNode(NVPTXISD::STACKRESTORE, DL, MVT::Other, {Chain, ASC});
1822}
1823
1825 SelectionDAG &DAG) const {
1826 SDLoc DL(Op.getNode());
1827 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1828 const Function &Fn = DAG.getMachineFunction().getFunction();
1829
1831 Fn,
1832 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
1833 "sm_52.",
1834 DL.getDebugLoc()));
1835 auto Ops = {DAG.getConstant(0, DL, Op.getValueType()), Op.getOperand(0)};
1836 return DAG.getMergeValues(Ops, DL);
1837 }
1838
1839 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1840 SDValue Chain = Op.getOperand(0);
1841 SDValue SS =
1842 DAG.getNode(NVPTXISD::STACKSAVE, DL, {LocalVT, MVT::Other}, Chain);
1843 SDValue ASC = DAG.getAddrSpaceCast(
1844 DL, Op.getValueType(), SS, ADDRESS_SPACE_LOCAL, ADDRESS_SPACE_GENERIC);
1845 return DAG.getMergeValues({ASC, SDValue(SS.getNode(), 1)}, DL);
1846}
1847
1848// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1849// (see LegalizeDAG.cpp). This is slow and uses local memory.
1850// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1851SDValue
1852NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1853 SDNode *Node = Op.getNode();
1854 SDLoc dl(Node);
1856 unsigned NumOperands = Node->getNumOperands();
1857 for (unsigned i = 0; i < NumOperands; ++i) {
1858 SDValue SubOp = Node->getOperand(i);
1859 EVT VVT = SubOp.getNode()->getValueType(0);
1860 EVT EltVT = VVT.getVectorElementType();
1861 unsigned NumSubElem = VVT.getVectorNumElements();
1862 for (unsigned j = 0; j < NumSubElem; ++j) {
1863 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1864 DAG.getIntPtrConstant(j, dl)));
1865 }
1866 }
1867 return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
1868}
1869
1871 SelectionDAG &DAG,
1872 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1873 assert(A.getValueType() == MVT::i32 && B.getValueType() == MVT::i32 &&
1874 Selector.getValueType() == MVT::i32 && "PRMT must have i32 operands");
1875 return DAG.getNode(NVPTXISD::PRMT, DL, MVT::i32,
1876 {A, B, Selector, DAG.getConstant(Mode, DL, MVT::i32)});
1877}
1878
1880 SelectionDAG &DAG,
1881 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1882 return getPRMT(A, B, DAG.getConstant(Selector, DL, MVT::i32), DL, DAG, Mode);
1883}
1884
1885/// Reduces the elements using the scalar operations provided. The operations
1886/// are sorted descending in number of inputs they take. The flags on the
1887/// original reduction operation will be propagated to each scalar operation.
1888/// Nearby elements are grouped in tree reduction, unlike the shuffle reduction
1889/// used in ExpandReductions and SelectionDAG.
1891 const SmallVector<SDValue> &Elements, EVT EltTy,
1892 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>> Ops,
1893 const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG) {
1894 // Build the reduction tree at each level, starting with all the elements.
1895 SmallVector<SDValue> Level = Elements;
1896
1897 unsigned OpIdx = 0;
1898 while (Level.size() > 1) {
1899 // Try to reduce this level using the current operator.
1900 const auto [Op, NumInputs] = Ops[OpIdx];
1901
1902 // Build the next level by partially reducing all elements.
1903 SmallVector<SDValue> ReducedLevel;
1904 unsigned I = 0, E = Level.size();
1905 for (; I + NumInputs <= E; I += NumInputs) {
1906 // Reduce elements in groups of [NumInputs], as much as possible.
1907 ReducedLevel.push_back(DAG.getNode(
1908 Op, DL, EltTy, ArrayRef<SDValue>(Level).slice(I, NumInputs), Flags));
1909 }
1910
1911 if (I < E) {
1912 // Handle leftover elements.
1913
1914 if (ReducedLevel.empty()) {
1915 // We didn't reduce anything at this level. We need to pick a smaller
1916 // operator.
1917 ++OpIdx;
1918 assert(OpIdx < Ops.size() && "no smaller operators for reduction");
1919 continue;
1920 }
1921
1922 // We reduced some things but there's still more left, meaning the
1923 // operator's number of inputs doesn't evenly divide this level size. Move
1924 // these elements to the next level.
1925 for (; I < E; ++I)
1926 ReducedLevel.push_back(Level[I]);
1927 }
1928
1929 // Process the next level.
1930 Level = ReducedLevel;
1931 }
1932
1933 return *Level.begin();
1934}
1935
1936// Get scalar reduction opcode
1937static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode) {
1938 switch (ReductionOpcode) {
1940 return ISD::FMAXNUM;
1942 return ISD::FMINNUM;
1944 return ISD::FMAXIMUM;
1946 return ISD::FMINIMUM;
1947 default:
1948 llvm_unreachable("unhandled reduction opcode");
1949 }
1950}
1951
1952/// Get 3-input scalar reduction opcode
1953static std::optional<unsigned>
1954getScalar3OpcodeForReduction(unsigned ReductionOpcode) {
1955 switch (ReductionOpcode) {
1957 return NVPTXISD::FMAXNUM3;
1959 return NVPTXISD::FMINNUM3;
1961 return NVPTXISD::FMAXIMUM3;
1963 return NVPTXISD::FMINIMUM3;
1964 default:
1965 return std::nullopt;
1966 }
1967}
1968
1969/// Lower reductions to either a sequence of operations or a tree if
1970/// reassociations are allowed. This method will use larger operations like
1971/// max3/min3 when the target supports them.
1972SDValue NVPTXTargetLowering::LowerVECREDUCE(SDValue Op,
1973 SelectionDAG &DAG) const {
1974 SDLoc DL(Op);
1975 const SDNodeFlags Flags = Op->getFlags();
1976 SDValue Vector = Op.getOperand(0);
1977
1978 const unsigned Opcode = Op->getOpcode();
1979 const EVT EltTy = Vector.getValueType().getVectorElementType();
1980
1981 // Whether we can use 3-input min/max when expanding the reduction.
1982 const bool CanUseMinMax3 =
1983 EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&
1984 STI.getPTXVersion() >= 88 &&
1985 (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN ||
1986 Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM);
1987
1988 // A list of SDNode opcodes with equivalent semantics, sorted descending by
1989 // number of inputs they take.
1990 SmallVector<std::pair<unsigned /*Op*/, unsigned /*NumIn*/>, 2> ScalarOps;
1991
1992 if (auto Opcode3Elem = getScalar3OpcodeForReduction(Opcode);
1993 CanUseMinMax3 && Opcode3Elem)
1994 ScalarOps.push_back({*Opcode3Elem, 3});
1995 ScalarOps.push_back({getScalarOpcodeForReduction(Opcode), 2});
1996
1998 DAG.ExtractVectorElements(Vector, Elements);
1999
2000 return buildTreeReduction(Elements, EltTy, ScalarOps, DL, Flags, DAG);
2001}
2002
2003SDValue NVPTXTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
2004 // Handle bitcasting from v2i8 without hitting the default promotion
2005 // strategy which goes through stack memory.
2006 EVT FromVT = Op->getOperand(0)->getValueType(0);
2007 if (FromVT != MVT::v2i8) {
2008 return Op;
2009 }
2010
2011 // Pack vector elements into i16 and bitcast to final type
2012 SDLoc DL(Op);
2013 SDValue Vec0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2014 Op->getOperand(0), DAG.getIntPtrConstant(0, DL));
2015 SDValue Vec1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2016 Op->getOperand(0), DAG.getIntPtrConstant(1, DL));
2017 SDValue Extend0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec0);
2018 SDValue Extend1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec1);
2019 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
2020 SDValue AsInt = DAG.getNode(
2021 ISD::OR, DL, MVT::i16,
2022 {Extend0, DAG.getNode(ISD::SHL, DL, MVT::i16, {Extend1, Const8})});
2023 EVT ToVT = Op->getValueType(0);
2024 return DAG.getBitcast(ToVT, AsInt);
2025}
2026
2027// We can init constant f16x2/v2i16/v4i8 with a single .b32 move. Normally it
2028// would get lowered as two constant loads and vector-packing move.
2029// Instead we want just a constant move:
2030// mov.b32 %r2, 0x40003C00
2031SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2032 SelectionDAG &DAG) const {
2033 EVT VT = Op->getValueType(0);
2034 if (!(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector()))
2035 return Op;
2036 SDLoc DL(Op);
2037
2038 if (!llvm::all_of(Op->ops(), [](SDValue Operand) {
2039 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2040 isa<ConstantFPSDNode>(Operand);
2041 })) {
2042 if (VT != MVT::v4i8)
2043 return Op;
2044 // Lower non-const v4i8 vector as byte-wise constructed i32, which allows us
2045 // to optimize calculation of constant parts.
2046 auto GetPRMT = [&](const SDValue Left, const SDValue Right, bool Cast,
2047 uint64_t SelectionValue) -> SDValue {
2048 SDValue L = Left;
2049 SDValue R = Right;
2050 if (Cast) {
2051 L = DAG.getAnyExtOrTrunc(L, DL, MVT::i32);
2052 R = DAG.getAnyExtOrTrunc(R, DL, MVT::i32);
2053 }
2054 return getPRMT(L, R, SelectionValue, DL, DAG);
2055 };
2056 auto PRMT__10 = GetPRMT(Op->getOperand(0), Op->getOperand(1), true, 0x3340);
2057 auto PRMT__32 = GetPRMT(Op->getOperand(2), Op->getOperand(3), true, 0x3340);
2058 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32, false, 0x5410);
2059 return DAG.getBitcast(VT, PRMT3210);
2060 }
2061
2062 // Get value or the Nth operand as an APInt(32). Undef values treated as 0.
2063 auto GetOperand = [](SDValue Op, int N) -> APInt {
2064 const SDValue &Operand = Op->getOperand(N);
2065 EVT VT = Op->getValueType(0);
2066 if (Operand->isUndef())
2067 return APInt(32, 0);
2068 APInt Value;
2069 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2070 Value = cast<ConstantFPSDNode>(Operand)->getValueAPF().bitcastToAPInt();
2071 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2072 Value = Operand->getAsAPIntVal();
2073 else
2074 llvm_unreachable("Unsupported type");
2075 // i8 values are carried around as i16, so we need to zero out upper bits,
2076 // so they do not get in the way of combining individual byte values
2077 if (VT == MVT::v4i8)
2078 Value = Value.trunc(8);
2079 return Value.zext(32);
2080 };
2081
2082 // Construct a 32-bit constant by shifting into place smaller values
2083 // (elements of the vector type VT).
2084 // For example, if VT has 2 elements, then N == 2:
2085 // ShiftAmount = 32 / N = 16
2086 // Value |= Op0 (b16) << 0
2087 // Value |= Op1 (b16) << 16
2088 // If N == 4:
2089 // ShiftAmount = 32 / N = 8
2090 // Value |= Op0 (b8) << 0
2091 // Value |= Op1 (b8) << 8
2092 // Value |= Op2 (b8) << 16
2093 // Value |= Op3 (b8) << 24
2094 // ...etc
2095 APInt Value(32, 0);
2096 const unsigned NumElements = VT.getVectorNumElements();
2097 assert(32 % NumElements == 0 && "must evenly divide bit length");
2098 const unsigned ShiftAmount = 32 / NumElements;
2099 for (unsigned ElementNo : seq(NumElements))
2100 Value |= GetOperand(Op, ElementNo).shl(ElementNo * ShiftAmount);
2101 SDValue Const = DAG.getConstant(Value, DL, MVT::i32);
2102 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), Const);
2103}
2104
2105SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2106 SelectionDAG &DAG) const {
2107 SDValue Index = Op->getOperand(1);
2108 SDValue Vector = Op->getOperand(0);
2109 SDLoc DL(Op);
2110 EVT VectorVT = Vector.getValueType();
2111
2112 if (VectorVT == MVT::v4i8) {
2113 SDValue Selector = DAG.getNode(ISD::OR, DL, MVT::i32,
2114 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2115 DAG.getConstant(0x7770, DL, MVT::i32));
2116 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, Vector),
2117 DAG.getConstant(0, DL, MVT::i32), Selector, DL, DAG);
2118 SDValue Ext = DAG.getAnyExtOrTrunc(PRMT, DL, Op->getValueType(0));
2119 SDNodeFlags Flags;
2120 Flags.setNoSignedWrap(Ext.getScalarValueSizeInBits() > 8);
2121 Flags.setNoUnsignedWrap(Ext.getScalarValueSizeInBits() >= 8);
2122 Ext->setFlags(Flags);
2123 return Ext;
2124 }
2125
2126 // Constant index will be matched by tablegen.
2127 if (isa<ConstantSDNode>(Index.getNode()))
2128 return Op;
2129
2130 // Extract individual elements and select one of them.
2131 assert(NVPTX::isPackedVectorTy(VectorVT) &&
2132 VectorVT.getVectorNumElements() == 2 && "Unexpected vector type.");
2133 EVT EltVT = VectorVT.getVectorElementType();
2134
2135 SDLoc dl(Op.getNode());
2136 SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2137 DAG.getIntPtrConstant(0, dl));
2138 SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2139 DAG.getIntPtrConstant(1, dl));
2140 return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
2142}
2143
2144SDValue NVPTXTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2145 SelectionDAG &DAG) const {
2146 SDValue Vector = Op->getOperand(0);
2147 EVT VectorVT = Vector.getValueType();
2148
2149 if (VectorVT != MVT::v4i8)
2150 return Op;
2151 SDLoc DL(Op);
2152 SDValue Value = Op->getOperand(1);
2153 if (Value->isUndef())
2154 return Vector;
2155
2156 SDValue Index = Op->getOperand(2);
2157
2158 SDValue BFI =
2159 DAG.getNode(NVPTXISD::BFI, DL, MVT::i32,
2160 {DAG.getZExtOrTrunc(Value, DL, MVT::i32), Vector,
2161 DAG.getNode(ISD::MUL, DL, MVT::i32,
2162 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2163 DAG.getConstant(8, DL, MVT::i32)),
2164 DAG.getConstant(8, DL, MVT::i32)});
2165 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), BFI);
2166}
2167
2168SDValue NVPTXTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2169 SelectionDAG &DAG) const {
2170 SDValue V1 = Op.getOperand(0);
2171 EVT VectorVT = V1.getValueType();
2172 if (VectorVT != MVT::v4i8 || Op.getValueType() != MVT::v4i8)
2173 return Op;
2174
2175 // Lower shuffle to PRMT instruction.
2176 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2177 SDValue V2 = Op.getOperand(1);
2178 uint32_t Selector = 0;
2179 for (auto I : llvm::enumerate(SVN->getMask())) {
2180 if (I.value() != -1) // -1 is a placeholder for undef.
2181 Selector |= (I.value() << (I.index() * 4));
2182 }
2183
2184 SDLoc DL(Op);
2185 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, V1),
2186 DAG.getBitcast(MVT::i32, V2), Selector, DL, DAG);
2187 return DAG.getBitcast(Op.getValueType(), PRMT);
2188}
2189/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
2190/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2191/// amount, or
2192/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2193/// amount.
2194SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
2195 SelectionDAG &DAG) const {
2196 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2197 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
2198
2199 EVT VT = Op.getValueType();
2200 unsigned VTBits = VT.getSizeInBits();
2201 SDLoc dl(Op);
2202 SDValue ShOpLo = Op.getOperand(0);
2203 SDValue ShOpHi = Op.getOperand(1);
2204 SDValue ShAmt = Op.getOperand(2);
2205 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
2206
2207 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2208 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2209 // {dHi, dLo} = {aHi, aLo} >> Amt
2210 // dHi = aHi >> Amt
2211 // dLo = shf.r.clamp aLo, aHi, Amt
2212
2213 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2214 SDValue Lo =
2215 DAG.getNode(NVPTXISD::FSHR_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2216
2217 SDValue Ops[2] = { Lo, Hi };
2218 return DAG.getMergeValues(Ops, dl);
2219 }
2220 else {
2221 // {dHi, dLo} = {aHi, aLo} >> Amt
2222 // - if (Amt>=size) then
2223 // dLo = aHi >> (Amt-size)
2224 // dHi = aHi >> Amt (this is either all 0 or all 1)
2225 // else
2226 // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
2227 // dHi = aHi >> Amt
2228
2229 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2230 DAG.getConstant(VTBits, dl, MVT::i32),
2231 ShAmt);
2232 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
2233 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2234 DAG.getConstant(VTBits, dl, MVT::i32));
2235 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
2236 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2237 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
2238
2239 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2240 DAG.getConstant(VTBits, dl, MVT::i32),
2241 ISD::SETGE);
2242 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2243 SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2244
2245 SDValue Ops[2] = { Lo, Hi };
2246 return DAG.getMergeValues(Ops, dl);
2247 }
2248}
2249
2250/// LowerShiftLeftParts - Lower SHL_PARTS, which
2251/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2252/// amount, or
2253/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2254/// amount.
2255SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
2256 SelectionDAG &DAG) const {
2257 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2258 assert(Op.getOpcode() == ISD::SHL_PARTS);
2259
2260 EVT VT = Op.getValueType();
2261 unsigned VTBits = VT.getSizeInBits();
2262 SDLoc dl(Op);
2263 SDValue ShOpLo = Op.getOperand(0);
2264 SDValue ShOpHi = Op.getOperand(1);
2265 SDValue ShAmt = Op.getOperand(2);
2266
2267 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2268 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2269 // {dHi, dLo} = {aHi, aLo} << Amt
2270 // dHi = shf.l.clamp aLo, aHi, Amt
2271 // dLo = aLo << Amt
2272
2273 SDValue Hi =
2274 DAG.getNode(NVPTXISD::FSHL_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2275 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2276
2277 SDValue Ops[2] = { Lo, Hi };
2278 return DAG.getMergeValues(Ops, dl);
2279 }
2280 else {
2281 // {dHi, dLo} = {aHi, aLo} << Amt
2282 // - if (Amt>=size) then
2283 // dLo = aLo << Amt (all 0)
2284 // dLo = aLo << (Amt-size)
2285 // else
2286 // dLo = aLo << Amt
2287 // dHi = (aHi << Amt) | (aLo >> (size-Amt))
2288
2289 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2290 DAG.getConstant(VTBits, dl, MVT::i32),
2291 ShAmt);
2292 SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
2293 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2294 DAG.getConstant(VTBits, dl, MVT::i32));
2295 SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
2296 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2297 SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
2298
2299 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2300 DAG.getConstant(VTBits, dl, MVT::i32),
2301 ISD::SETGE);
2302 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2303 SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2304
2305 SDValue Ops[2] = { Lo, Hi };
2306 return DAG.getMergeValues(Ops, dl);
2307 }
2308}
2309
2310/// If the types match, convert the generic copysign to the NVPTXISD version,
2311/// otherwise bail ensuring that mismatched cases are properly expaned.
2312SDValue NVPTXTargetLowering::LowerFCOPYSIGN(SDValue Op,
2313 SelectionDAG &DAG) const {
2314 EVT VT = Op.getValueType();
2315 SDLoc DL(Op);
2316
2317 SDValue In1 = Op.getOperand(0);
2318 SDValue In2 = Op.getOperand(1);
2319 EVT SrcVT = In2.getValueType();
2320
2321 if (!SrcVT.bitsEq(VT))
2322 return SDValue();
2323
2324 return DAG.getNode(NVPTXISD::FCOPYSIGN, DL, VT, In1, In2);
2325}
2326
2327SDValue NVPTXTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2328 EVT VT = Op.getValueType();
2329
2330 if (VT == MVT::f32)
2331 return LowerFROUND32(Op, DAG);
2332
2333 if (VT == MVT::f64)
2334 return LowerFROUND64(Op, DAG);
2335
2336 llvm_unreachable("unhandled type");
2337}
2338
2339// This is the the rounding method used in CUDA libdevice in C like code:
2340// float roundf(float A)
2341// {
2342// float RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f));
2343// RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2344// return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2345// }
2346SDValue NVPTXTargetLowering::LowerFROUND32(SDValue Op,
2347 SelectionDAG &DAG) const {
2348 SDLoc SL(Op);
2349 SDValue A = Op.getOperand(0);
2350 EVT VT = Op.getValueType();
2351
2352 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2353
2354 // RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f))
2355 SDValue Bitcast = DAG.getNode(ISD::BITCAST, SL, MVT::i32, A);
2356 const unsigned SignBitMask = 0x80000000;
2357 SDValue Sign = DAG.getNode(ISD::AND, SL, MVT::i32, Bitcast,
2358 DAG.getConstant(SignBitMask, SL, MVT::i32));
2359 const unsigned PointFiveInBits = 0x3F000000;
2360 SDValue PointFiveWithSignRaw =
2361 DAG.getNode(ISD::OR, SL, MVT::i32, Sign,
2362 DAG.getConstant(PointFiveInBits, SL, MVT::i32));
2363 SDValue PointFiveWithSign =
2364 DAG.getNode(ISD::BITCAST, SL, VT, PointFiveWithSignRaw);
2365 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, A, PointFiveWithSign);
2366 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2367
2368 // RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2369 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2370 SDValue IsLarge =
2371 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 23.0), SL, VT),
2372 ISD::SETOGT);
2373 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2374
2375 // return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2376 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2377 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2378 SDValue RoundedAForSmallA = DAG.getNode(ISD::FTRUNC, SL, VT, A);
2379 return DAG.getNode(ISD::SELECT, SL, VT, IsSmall, RoundedAForSmallA, RoundedA);
2380}
2381
2382// The implementation of round(double) is similar to that of round(float) in
2383// that they both separate the value range into three regions and use a method
2384// specific to the region to round the values. However, round(double) first
2385// calculates the round of the absolute value and then adds the sign back while
2386// round(float) directly rounds the value with sign.
2387SDValue NVPTXTargetLowering::LowerFROUND64(SDValue Op,
2388 SelectionDAG &DAG) const {
2389 SDLoc SL(Op);
2390 SDValue A = Op.getOperand(0);
2391 EVT VT = Op.getValueType();
2392
2393 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2394
2395 // double RoundedA = (double) (int) (abs(A) + 0.5f);
2396 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, AbsA,
2397 DAG.getConstantFP(0.5, SL, VT));
2398 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2399
2400 // RoundedA = abs(A) < 0.5 ? (double)0 : RoundedA;
2401 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2402 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2403 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2404 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsSmall,
2405 DAG.getConstantFP(0, SL, VT),
2406 RoundedA);
2407
2408 // Add sign to rounded_A
2409 RoundedA = DAG.getNode(ISD::FCOPYSIGN, SL, VT, RoundedA, A);
2410 DAG.getNode(ISD::FTRUNC, SL, VT, A);
2411
2412 // RoundedA = abs(A) > 0x1.0p52 ? A : RoundedA;
2413 SDValue IsLarge =
2414 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 52.0), SL, VT),
2415 ISD::SETOGT);
2416 return DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2417}
2418
2420 EVT VT = N->getValueType(0);
2421 EVT NVT = MVT::f32;
2422 if (VT.isVector()) {
2423 NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount());
2424 }
2425 SDLoc DL(N);
2426 SDValue Tmp0 = DAG.getFPExtendOrRound(N->getOperand(0), DL, NVT);
2427 SDValue Tmp1 = DAG.getFPExtendOrRound(N->getOperand(1), DL, NVT);
2428 SDValue Res = DAG.getNode(N->getOpcode(), DL, NVT, Tmp0, Tmp1, N->getFlags());
2429 return DAG.getFPExtendOrRound(Res, DL, VT);
2430}
2431
2432SDValue NVPTXTargetLowering::PromoteBinOpIfF32FTZ(SDValue Op,
2433 SelectionDAG &DAG) const {
2434 if (useF32FTZ(DAG.getMachineFunction())) {
2435 return PromoteBinOpToF32(Op.getNode(), DAG);
2436 }
2437 return Op;
2438}
2439
2440SDValue NVPTXTargetLowering::LowerINT_TO_FP(SDValue Op,
2441 SelectionDAG &DAG) const {
2442 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2443
2444 if (Op.getValueType() == MVT::bf16) {
2445 SDLoc Loc(Op);
2446 return DAG.getNode(
2447 ISD::FP_ROUND, Loc, MVT::bf16,
2448 DAG.getNode(Op.getOpcode(), Loc, MVT::f32, Op.getOperand(0)),
2449 DAG.getIntPtrConstant(0, Loc, /*isTarget=*/true));
2450 }
2451
2452 // Everything else is considered legal.
2453 return Op;
2454}
2455
2456SDValue NVPTXTargetLowering::LowerFP_TO_INT(SDValue Op,
2457 SelectionDAG &DAG) const {
2458 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2459
2460 if (Op.getOperand(0).getValueType() == MVT::bf16) {
2461 SDLoc Loc(Op);
2462 return DAG.getNode(
2463 Op.getOpcode(), Loc, Op.getValueType(),
2464 DAG.getNode(ISD::FP_EXTEND, Loc, MVT::f32, Op.getOperand(0)));
2465 }
2466
2467 // Everything else is considered legal.
2468 return Op;
2469}
2470
2471SDValue NVPTXTargetLowering::LowerFP_ROUND(SDValue Op,
2472 SelectionDAG &DAG) const {
2473 EVT NarrowVT = Op.getValueType();
2474 SDValue Wide = Op.getOperand(0);
2475 EVT WideVT = Wide.getValueType();
2476 if (NarrowVT.getScalarType() == MVT::bf16) {
2477 const TargetLowering *TLI = STI.getTargetLowering();
2478 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 70) {
2479 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2480 }
2481 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
2482 // This combination was the first to support f32 -> bf16.
2483 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70) {
2484 if (WideVT.getScalarType() == MVT::f32) {
2485 return Op;
2486 }
2487 if (WideVT.getScalarType() == MVT::f64) {
2488 SDLoc Loc(Op);
2489 // Round-inexact-to-odd f64 to f32, then do the final rounding using
2490 // the hardware f32 -> bf16 instruction.
2492 WideVT.changeElementType(*DAG.getContext(), MVT::f32), Wide, Loc,
2493 DAG);
2494 return DAG.getFPExtendOrRound(rod, Loc, NarrowVT);
2495 }
2496 }
2497 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2498 }
2499 }
2500
2501 // Everything else is considered legal.
2502 return Op;
2503}
2504
2505SDValue NVPTXTargetLowering::LowerFP_EXTEND(SDValue Op,
2506 SelectionDAG &DAG) const {
2507 SDValue Narrow = Op.getOperand(0);
2508 EVT NarrowVT = Narrow.getValueType();
2509 EVT WideVT = Op.getValueType();
2510 if (NarrowVT.getScalarType() == MVT::bf16) {
2511 if (WideVT.getScalarType() == MVT::f32 &&
2512 (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71)) {
2513 SDLoc Loc(Op);
2514 return DAG.getNode(ISD::BF16_TO_FP, Loc, WideVT, Narrow);
2515 }
2516 if (WideVT.getScalarType() == MVT::f64 &&
2517 (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
2518 EVT F32 = NarrowVT.changeElementType(*DAG.getContext(), MVT::f32);
2519 SDLoc Loc(Op);
2520 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
2521 Op = DAG.getNode(ISD::FP_EXTEND, Loc, F32, Narrow);
2522 } else {
2523 Op = DAG.getNode(ISD::BF16_TO_FP, Loc, F32, Narrow);
2524 }
2525 return DAG.getNode(ISD::FP_EXTEND, Loc, WideVT, Op);
2526 }
2527 }
2528
2529 // Everything else is considered legal.
2530 return Op;
2531}
2532
2534 SDLoc DL(Op);
2535 if (Op.getValueType() != MVT::v2i16)
2536 return Op;
2537 EVT EltVT = Op.getValueType().getVectorElementType();
2538 SmallVector<SDValue> VecElements;
2539 for (int I = 0, E = Op.getValueType().getVectorNumElements(); I < E; I++) {
2540 SmallVector<SDValue> ScalarArgs;
2541 llvm::transform(Op->ops(), std::back_inserter(ScalarArgs),
2542 [&](const SDUse &O) {
2543 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2544 O.get(), DAG.getIntPtrConstant(I, DL));
2545 });
2546 VecElements.push_back(DAG.getNode(Op.getOpcode(), DL, EltVT, ScalarArgs));
2547 }
2548 SDValue V =
2549 DAG.getNode(ISD::BUILD_VECTOR, DL, Op.getValueType(), VecElements);
2550 return V;
2551}
2552
2554 SDNode *N = Op.getNode();
2555 SDLoc DL(N);
2557
2558 // split the vector argument
2559 for (size_t I = 0; I < N->getNumOperands(); I++) {
2560 SDValue Val = N->getOperand(I);
2561 EVT ValVT = Val.getValueType();
2562 if (ValVT.isVector()) {
2563 EVT EltVT = ValVT.getVectorElementType();
2564 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2565 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2566 DAG.getIntPtrConstant(J, DL)));
2567 } else
2568 Ops.push_back(Val);
2569 }
2570
2572 SDValue Tcgen05StNode =
2573 DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, N->getVTList(), Ops,
2574 MemSD->getMemoryVT(), MemSD->getMemOperand());
2575
2576 return Tcgen05StNode;
2577}
2578
2580 SDLoc DL(Op);
2581 SDValue Src = Op.getOperand(0);
2582 EVT VT = Op.getValueType();
2583
2584 switch (VT.getSimpleVT().SimpleTy) {
2585 case MVT::i16: {
2586 SDValue Extended = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
2587 SDValue Swapped =
2588 getPRMT(Extended, DAG.getConstant(0, DL, MVT::i32), 0x7701, DL, DAG);
2589 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Swapped);
2590 }
2591 case MVT::i32: {
2592 return getPRMT(Src, DAG.getConstant(0, DL, MVT::i32), 0x0123, DL, DAG);
2593 }
2594 case MVT::v2i16: {
2595 SDValue Converted = DAG.getBitcast(MVT::i32, Src);
2596 SDValue Swapped =
2597 getPRMT(Converted, DAG.getConstant(0, DL, MVT::i32), 0x2301, DL, DAG);
2598 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i16, Swapped);
2599 }
2600 case MVT::i64: {
2601 SDValue UnpackSrc =
2602 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, Src);
2603 SDValue SwappedLow =
2604 getPRMT(UnpackSrc.getValue(0), DAG.getConstant(0, DL, MVT::i32), 0x0123,
2605 DL, DAG);
2606 SDValue SwappedHigh =
2607 getPRMT(UnpackSrc.getValue(1), DAG.getConstant(0, DL, MVT::i32), 0x0123,
2608 DL, DAG);
2609 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64,
2610 {SwappedHigh, SwappedLow});
2611 }
2612 default:
2613 llvm_unreachable("unsupported type for bswap");
2614 }
2615}
2616
2617static unsigned getTcgen05MMADisableOutputLane(unsigned IID) {
2618 switch (IID) {
2619 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2620 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1;
2621 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2622 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2;
2623 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2624 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2625 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2626 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2627 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2628 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2629 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2630 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2631 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2632 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2633 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2634 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2635 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2636 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2637 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2638 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2639 case Intrinsic::
2640 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2641 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2642 case Intrinsic::
2643 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2644 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2645 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2646 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1;
2647 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2648 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2;
2649 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2650 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2651 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2652 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2653 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2654 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2655 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2656 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2657 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2658 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2659 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2660 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2661 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2662 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2663 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2664 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2665 case Intrinsic::
2666 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2667 return NVPTXISD::
2668 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2669 case Intrinsic::
2670 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2671 return NVPTXISD::
2672 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2673 };
2674 llvm_unreachable("unhandled tcgen05.mma.disable_output_lane intrinsic");
2675}
2676
2678 SDNode *N = Op.getNode();
2679 SDLoc DL(N);
2680 unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2681
2683 // split the vector argument
2684 for (size_t I = 0; I < N->getNumOperands(); I++) {
2685 if (I == 1)
2686 continue; // skip IID
2687 SDValue Val = N->getOperand(I);
2688 EVT ValVT = Val.getValueType();
2689 if (ValVT.isVector()) {
2690 EVT EltVT = ValVT.getVectorElementType();
2691 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2692 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2693 DAG.getIntPtrConstant(J, DL)));
2694 } else
2695 Ops.push_back(Val);
2696 }
2697
2699 SDValue Tcgen05MMANode = DAG.getMemIntrinsicNode(
2700 getTcgen05MMADisableOutputLane(IID), DL, N->getVTList(), Ops,
2701 MemSD->getMemoryVT(), MemSD->getMemOperand());
2702
2703 return Tcgen05MMANode;
2704}
2705
2706// Lower vector return type of tcgen05.ld intrinsics
2707static std::optional<std::pair<SDValue, SDValue>>
2708lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset = false) {
2709 SDLoc DL(N);
2710 EVT ResVT = N->getValueType(0);
2711 if (!ResVT.isVector())
2712 return {}; // already legalized.
2713
2714 const unsigned NumElts = ResVT.getVectorNumElements();
2715
2716 // Create the return type of the instructions
2717 SmallVector<EVT, 5> ListVTs;
2718 for (unsigned i = 0; i < NumElts; ++i)
2719 ListVTs.push_back(MVT::i32);
2720
2721 ListVTs.push_back(N->getValueType(1)); // Chain
2722
2723 SDVTList ResVTs = DAG.getVTList(ListVTs);
2724
2725 SmallVector<SDValue, 8> Ops{N->getOperand(0), N->getOperand(1),
2726 N->getOperand(2)};
2727
2728 if (HasOffset) {
2729 Ops.push_back(N->getOperand(3)); // offset
2730 Ops.push_back(N->getOperand(4)); // Pack flag
2731 } else
2732 Ops.push_back(N->getOperand(3)); // Pack flag
2733
2735 SDValue NewNode =
2737 MemSD->getMemoryVT(), MemSD->getMemOperand());
2738
2739 // split the vector result
2740 SmallVector<SDValue, 4> ScalarRes;
2741 for (unsigned i = 0; i < NumElts; ++i) {
2742 SDValue Res = NewNode.getValue(i);
2743 ScalarRes.push_back(Res);
2744 }
2745
2746 SDValue Chain = NewNode.getValue(NumElts);
2747 SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
2748 return {{BuildVector, Chain}};
2749}
2750
2752 unsigned Val) {
2753 SDNode *N = Op.getNode();
2754 SDLoc DL(N);
2755
2756 const Function &Fn = DAG.getMachineFunction().getFunction();
2757
2758 unsigned AS = 0;
2759 if (auto *MemN = dyn_cast<MemIntrinsicSDNode>(N))
2760 AS = MemN->getAddressSpace();
2761 Type *PtrTy = PointerType::get(*DAG.getContext(), AS);
2763
2765 Fn,
2766 "Intrinsic " +
2767 Intrinsic::getName(N->getConstantOperandVal(1), {PtrTy}, M) +
2768 " with value " + Twine(Val) +
2769 " is not supported on the given target.",
2770 DL.getDebugLoc()));
2771 return Op.getOperand(0);
2772}
2773
2775 SDNode *N = Op.getNode();
2776 SDLoc DL(N);
2777
2778 // immediate argument representing elemtype
2779 unsigned Val = N->getConstantOperandVal(3);
2780
2782 Val))
2783 return reportInvalidTensormapReplaceUsage(Op, DAG, Val);
2784
2785 return Op;
2786}
2787
2789 SDNode *N = Op.getNode();
2790 SDLoc DL(N);
2791
2792 // immediate argument representing swizzle mode
2793 unsigned Val = N->getConstantOperandVal(3);
2794
2796 Val))
2797 return reportInvalidTensormapReplaceUsage(Op, DAG, Val);
2798
2799 return Op;
2800}
2801
2803 SDNode *N = Op.getNode();
2804 SDValue Intrin = N->getOperand(1);
2805
2806 // Get the intrinsic ID
2807 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
2808 switch (IntrinNo) {
2809 default:
2810 break;
2811 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
2812 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
2813 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
2814 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
2815 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
2816 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
2817 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
2818 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
2819 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
2820 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
2821 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
2822 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
2823 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
2824 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
2825 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
2826 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
2827 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
2828 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
2829 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
2830 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
2831 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1:
2832 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:
2833 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:
2834 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:
2835 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:
2836 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:
2837 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:
2838 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:
2839 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
2840 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
2841 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
2842 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
2843 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
2844 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
2845 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
2846 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
2847 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
2848 return lowerTcgen05St(Op, DAG);
2849 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2850 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2851 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2852 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2853 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2854 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2855 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2856 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2857 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2858 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2859 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2860 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2861 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2862 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2863 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2864 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2865 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2866 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2867 case Intrinsic::
2868 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2869 case Intrinsic::
2870 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2871 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2872 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2873 case Intrinsic::
2874 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2875 case Intrinsic::
2876 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2878 case Intrinsic::nvvm_tensormap_replace_elemtype:
2879 return lowerTensormapReplaceElemtype(Op, DAG);
2880 case Intrinsic::nvvm_tensormap_replace_swizzle_mode:
2882 }
2883 return Op;
2884}
2885
2887 SelectionDAG &DAG) {
2888
2889 SDNode *N = Op.getNode();
2890 if (N->getOperand(1).getValueType() != MVT::i128) {
2891 // return, if the operand is already lowered
2892 return SDValue();
2893 }
2894
2895 unsigned IID =
2896 cast<ConstantSDNode>(N->getOperand(0).getNode())->getZExtValue();
2897 auto Opcode = [&]() {
2898 switch (IID) {
2899 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2900 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED;
2901 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2902 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X;
2903 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2904 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y;
2905 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2906 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z;
2907 default:
2908 llvm_unreachable("unsupported/unhandled intrinsic");
2909 }
2910 }();
2911
2912 SDLoc DL(N);
2913 SDValue TryCancelResponse = N->getOperand(1);
2914 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TryCancelResponse);
2915 SDValue TryCancelResponse0 =
2916 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2917 DAG.getIntPtrConstant(0, DL));
2918 SDValue TryCancelResponse1 =
2919 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2920 DAG.getIntPtrConstant(1, DL));
2921
2922 return DAG.getNode(Opcode, DL, N->getVTList(),
2923 {TryCancelResponse0, TryCancelResponse1});
2924}
2925
2927 SDNode *N = Op.getNode();
2928 SDLoc DL(N);
2929 SDValue F32Vec = N->getOperand(1);
2930 SDValue RBits = N->getOperand(2);
2931
2932 unsigned IntrinsicID = N->getConstantOperandVal(0);
2933
2934 // Extract the 4 float elements from the vector
2936 for (unsigned i = 0; i < 4; ++i)
2937 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, F32Vec,
2938 DAG.getIntPtrConstant(i, DL)));
2939
2941
2942 auto [OpCode, RetTy, CvtModeFlag] =
2943 [&]() -> std::tuple<unsigned, MVT::SimpleValueType, uint32_t> {
2944 switch (IntrinsicID) {
2945 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2946 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8,
2947 CvtMode::RS | CvtMode::RELU_FLAG};
2948 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2949 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2950 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2951 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8,
2952 CvtMode::RS | CvtMode::RELU_FLAG};
2953 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2954 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2955 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2956 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8,
2957 CvtMode::RS | CvtMode::RELU_FLAG};
2958 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2959 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2960 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
2961 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8,
2962 CvtMode::RS | CvtMode::RELU_FLAG};
2963 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
2964 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2965 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
2966 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16,
2967 CvtMode::RS | CvtMode::RELU_FLAG};
2968 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
2969 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS};
2970 default:
2971 llvm_unreachable("unsupported/unhandled intrinsic");
2972 }
2973 }();
2974
2975 Ops.push_back(RBits);
2976 Ops.push_back(DAG.getConstant(CvtModeFlag, DL, MVT::i32));
2977
2978 return DAG.getNode(OpCode, DL, RetTy, Ops);
2979}
2980
2982 const unsigned Mode = [&]() {
2983 switch (Op->getConstantOperandVal(0)) {
2984 case Intrinsic::nvvm_prmt:
2986 case Intrinsic::nvvm_prmt_b4e:
2988 case Intrinsic::nvvm_prmt_ecl:
2990 case Intrinsic::nvvm_prmt_ecr:
2992 case Intrinsic::nvvm_prmt_f4e:
2994 case Intrinsic::nvvm_prmt_rc16:
2996 case Intrinsic::nvvm_prmt_rc8:
2998 default:
2999 llvm_unreachable("unsupported/unhandled intrinsic");
3000 }
3001 }();
3002 SDLoc DL(Op);
3003 SDValue A = Op->getOperand(1);
3004 SDValue B = Op.getNumOperands() == 4 ? Op.getOperand(2)
3005 : DAG.getConstant(0, DL, MVT::i32);
3006 SDValue Selector = (Op->op_end() - 1)->get();
3007 return getPRMT(A, B, Selector, DL, DAG, Mode);
3008}
3009
3010#define TCGEN05_LD_RED_INTR(SHAPE, NUM, TYPE) \
3011 Intrinsic::nvvm_tcgen05_ld_red_##SHAPE##_x##NUM##_##TYPE
3012
3013#define TCGEN05_LD_RED_INST(SHAPE, NUM, TYPE) \
3014 NVPTXISD::TCGEN05_LD_RED_##SHAPE##_X##NUM##_##TYPE
3015
3016static unsigned getTcgen05LdRedID(Intrinsic::ID IID) {
3017 switch (IID) {
3018 case TCGEN05_LD_RED_INTR(32x32b, 2, f32):
3019 return TCGEN05_LD_RED_INST(32x32b, 2, F32);
3020 case TCGEN05_LD_RED_INTR(32x32b, 4, f32):
3021 return TCGEN05_LD_RED_INST(32x32b, 4, F32);
3022 case TCGEN05_LD_RED_INTR(32x32b, 8, f32):
3023 return TCGEN05_LD_RED_INST(32x32b, 8, F32);
3024 case TCGEN05_LD_RED_INTR(32x32b, 16, f32):
3025 return TCGEN05_LD_RED_INST(32x32b, 16, F32);
3026 case TCGEN05_LD_RED_INTR(32x32b, 32, f32):
3027 return TCGEN05_LD_RED_INST(32x32b, 32, F32);
3028 case TCGEN05_LD_RED_INTR(32x32b, 64, f32):
3029 return TCGEN05_LD_RED_INST(32x32b, 64, F32);
3030 case TCGEN05_LD_RED_INTR(32x32b, 128, f32):
3031 return TCGEN05_LD_RED_INST(32x32b, 128, F32);
3032 case TCGEN05_LD_RED_INTR(16x32bx2, 2, f32):
3033 return TCGEN05_LD_RED_INST(16x32bx2, 2, F32);
3034 case TCGEN05_LD_RED_INTR(16x32bx2, 4, f32):
3035 return TCGEN05_LD_RED_INST(16x32bx2, 4, F32);
3036 case TCGEN05_LD_RED_INTR(16x32bx2, 8, f32):
3037 return TCGEN05_LD_RED_INST(16x32bx2, 8, F32);
3038 case TCGEN05_LD_RED_INTR(16x32bx2, 16, f32):
3039 return TCGEN05_LD_RED_INST(16x32bx2, 16, F32);
3040 case TCGEN05_LD_RED_INTR(16x32bx2, 32, f32):
3041 return TCGEN05_LD_RED_INST(16x32bx2, 32, F32);
3042 case TCGEN05_LD_RED_INTR(16x32bx2, 64, f32):
3043 return TCGEN05_LD_RED_INST(16x32bx2, 64, F32);
3044 case TCGEN05_LD_RED_INTR(16x32bx2, 128, f32):
3045 return TCGEN05_LD_RED_INST(16x32bx2, 128, F32);
3046 case TCGEN05_LD_RED_INTR(32x32b, 2, i32):
3047 return TCGEN05_LD_RED_INST(32x32b, 2, I32);
3048 case TCGEN05_LD_RED_INTR(32x32b, 4, i32):
3049 return TCGEN05_LD_RED_INST(32x32b, 4, I32);
3050 case TCGEN05_LD_RED_INTR(32x32b, 8, i32):
3051 return TCGEN05_LD_RED_INST(32x32b, 8, I32);
3052 case TCGEN05_LD_RED_INTR(32x32b, 16, i32):
3053 return TCGEN05_LD_RED_INST(32x32b, 16, I32);
3054 case TCGEN05_LD_RED_INTR(32x32b, 32, i32):
3055 return TCGEN05_LD_RED_INST(32x32b, 32, I32);
3056 case TCGEN05_LD_RED_INTR(32x32b, 64, i32):
3057 return TCGEN05_LD_RED_INST(32x32b, 64, I32);
3058 case TCGEN05_LD_RED_INTR(32x32b, 128, i32):
3059 return TCGEN05_LD_RED_INST(32x32b, 128, I32);
3060 case TCGEN05_LD_RED_INTR(16x32bx2, 2, i32):
3061 return TCGEN05_LD_RED_INST(16x32bx2, 2, I32);
3062 case TCGEN05_LD_RED_INTR(16x32bx2, 4, i32):
3063 return TCGEN05_LD_RED_INST(16x32bx2, 4, I32);
3064 case TCGEN05_LD_RED_INTR(16x32bx2, 8, i32):
3065 return TCGEN05_LD_RED_INST(16x32bx2, 8, I32);
3066 case TCGEN05_LD_RED_INTR(16x32bx2, 16, i32):
3067 return TCGEN05_LD_RED_INST(16x32bx2, 16, I32);
3068 case TCGEN05_LD_RED_INTR(16x32bx2, 32, i32):
3069 return TCGEN05_LD_RED_INST(16x32bx2, 32, I32);
3070 case TCGEN05_LD_RED_INTR(16x32bx2, 64, i32):
3071 return TCGEN05_LD_RED_INST(16x32bx2, 64, I32);
3072 case TCGEN05_LD_RED_INTR(16x32bx2, 128, i32):
3073 return TCGEN05_LD_RED_INST(16x32bx2, 128, I32);
3074 default:
3075 llvm_unreachable("Invalid tcgen05.ld.red intrinsic ID");
3076 }
3077}
3078
3079// Lower vector return type of tcgen05.ld intrinsics
3080static std::optional<std::tuple<SDValue, SDValue, SDValue>>
3082 SDLoc DL(N);
3083 EVT ResVT = N->getValueType(0);
3084 if (!ResVT.isVector())
3085 return {}; // already legalized.
3086
3087 const unsigned NumElts = ResVT.getVectorNumElements();
3088
3089 // Create the return type of the instructions
3090 // +1 represents the reduction value
3091 SmallVector<EVT, 132> ListVTs{
3092 NumElts + 1,
3093 ResVT.getVectorElementType().isFloatingPoint() ? MVT::f32 : MVT::i32};
3094
3095 ListVTs.push_back(MVT::Other); // Chain
3096
3097 SDVTList ResVTs = DAG.getVTList(ListVTs);
3098
3099 // Prepare the Operands
3100 SmallVector<SDValue, 8> Ops{N->getOperand(0)}; // Chain
3101
3102 // skip IID at index 1
3103 for (unsigned i = 2; i < N->getNumOperands(); i++)
3104 Ops.push_back(N->getOperand(i));
3105
3106 unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
3108 SDValue NewNode =
3109 DAG.getMemIntrinsicNode(getTcgen05LdRedID(IID), DL, ResVTs, Ops,
3110 MemSD->getMemoryVT(), MemSD->getMemOperand());
3111
3112 // Split vector result
3113 SmallVector<SDValue, 132> ScalarRes;
3114 for (unsigned i = 0; i < NumElts; ++i) {
3115 SDValue Res = NewNode.getValue(i);
3116 ScalarRes.push_back(Res);
3117 }
3118
3119 SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
3120 SDValue RedResult = NewNode.getValue(NumElts);
3121 SDValue Chain = NewNode.getValue(NumElts + 1);
3122 return {{BuildVector, RedResult, Chain}};
3123}
3124
3126 switch (Op->getConstantOperandVal(1)) {
3127 default:
3128 return Op;
3129
3130 // These tcgen05 intrinsics return a v2i32, which is legal, so we have to
3131 // lower them through LowerOperation() instead of ReplaceNodeResults().
3132 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
3133 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
3134 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
3135 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG))
3136 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
3137 return SDValue();
3138
3139 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
3140 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG, /*HasOffset=*/true))
3141 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
3142 return SDValue();
3143
3144 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_f32:
3145 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_i32:
3146 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_f32:
3147 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_i32:
3148 if (auto Res = lowerTcgen05LdRed(Op.getNode(), DAG))
3149 return DAG.getMergeValues(
3150 {std::get<0>(*Res), std::get<1>(*Res), std::get<2>(*Res)}, SDLoc(Op));
3151 return SDValue();
3152 }
3153}
3154
3156 switch (Op->getConstantOperandVal(0)) {
3157 default:
3158 return Op;
3159 case Intrinsic::nvvm_prmt:
3160 case Intrinsic::nvvm_prmt_b4e:
3161 case Intrinsic::nvvm_prmt_ecl:
3162 case Intrinsic::nvvm_prmt_ecr:
3163 case Intrinsic::nvvm_prmt_f4e:
3164 case Intrinsic::nvvm_prmt_rc16:
3165 case Intrinsic::nvvm_prmt_rc8:
3166 return lowerPrmtIntrinsic(Op, DAG);
3167 case Intrinsic::nvvm_internal_addrspace_wrap:
3168 return Op.getOperand(1);
3169 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
3170 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
3171 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
3172 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
3174 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
3175 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
3176 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
3177 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
3178 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
3179 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
3180 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
3181 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
3182 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
3183 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
3184 return lowerCvtRSIntrinsics(Op, DAG);
3185 }
3186}
3187
3188// In PTX 64-bit CTLZ and CTPOP are supported, but they return a 32-bit value.
3189// Lower these into a node returning the correct type which is zero-extended
3190// back to the correct size.
3192 SDValue V = Op->getOperand(0);
3193 assert(V.getValueType() == MVT::i64 &&
3194 "Unexpected CTLZ/CTPOP type to legalize");
3195
3196 SDLoc DL(Op);
3197 SDValue CT = DAG.getNode(Op->getOpcode(), DL, MVT::i32, V);
3198 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, CT, SDNodeFlags::NonNeg);
3199}
3200
3202 unsigned Opcode, SelectionDAG &DAG) {
3203 assert(A.getValueType() == MVT::i64 && B.getValueType() == MVT::i64);
3204
3205 const auto *AmtConst = dyn_cast<ConstantSDNode>(ShiftAmount);
3206 if (!AmtConst)
3207 return SDValue();
3208 const auto Amt = AmtConst->getZExtValue() & 63;
3209
3210 SDValue UnpackA =
3211 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, A);
3212 SDValue UnpackB =
3213 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, B);
3214
3215 // Arch is Little endiain: 0 = low bits, 1 = high bits
3216 SDValue ALo = UnpackA.getValue(0);
3217 SDValue AHi = UnpackA.getValue(1);
3218 SDValue BLo = UnpackB.getValue(0);
3219 SDValue BHi = UnpackB.getValue(1);
3220
3221 // The bitfeild consists of { AHi : ALo : BHi : BLo }
3222 //
3223 // * FSHL, Amt < 32 - The window will contain { AHi : ALo : BHi }
3224 // * FSHL, Amt >= 32 - The window will contain { ALo : BHi : BLo }
3225 // * FSHR, Amt < 32 - The window will contain { ALo : BHi : BLo }
3226 // * FSHR, Amt >= 32 - The window will contain { AHi : ALo : BHi }
3227 //
3228 // Note that Amt = 0 and Amt = 32 are special cases where 32-bit funnel shifts
3229 // are not needed at all. Amt = 0 is a no-op producing either A or B depending
3230 // on the direction. Amt = 32 can be implemented by a packing and unpacking
3231 // move to select and arrange the 32bit values. For simplicity, these cases
3232 // are not handled here explicitly and instead we rely on DAGCombiner to
3233 // remove the no-op funnel shifts we insert.
3234 auto [High, Mid, Low] = ((Opcode == ISD::FSHL) == (Amt < 32))
3235 ? std::make_tuple(AHi, ALo, BHi)
3236 : std::make_tuple(ALo, BHi, BLo);
3237
3238 SDValue NewAmt = DAG.getConstant(Amt & 31, DL, MVT::i32);
3239 SDValue RHi = DAG.getNode(Opcode, DL, MVT::i32, {High, Mid, NewAmt});
3240 SDValue RLo = DAG.getNode(Opcode, DL, MVT::i32, {Mid, Low, NewAmt});
3241
3242 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64, {RLo, RHi});
3243}
3244
3246 return expandFSH64(Op->getOperand(0), Op->getOperand(1), Op->getOperand(2),
3247 SDLoc(Op), Op->getOpcode(), DAG);
3248}
3249
3251 unsigned Opcode = Op->getOpcode() == ISD::ROTL ? ISD::FSHL : ISD::FSHR;
3252 return expandFSH64(Op->getOperand(0), Op->getOperand(0), Op->getOperand(1),
3253 SDLoc(Op), Opcode, DAG);
3254}
3255
3257 // Lower (frem x, y) into (sub x, (mul (ftrunc (div x, y)) y)),
3258 // i.e. "poor man's fmod()". When y is infinite, x is returned. This matches
3259 // the semantics of LLVM's frem.
3260 SDLoc DL(Op);
3261 SDValue X = Op->getOperand(0);
3262 SDValue Y = Op->getOperand(1);
3263 EVT Ty = Op.getValueType();
3264 SDNodeFlags Flags = Op->getFlags();
3265
3266 SDValue Div = DAG.getNode(ISD::FDIV, DL, Ty, X, Y, Flags);
3267 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, Ty, Div, Flags);
3268 SDValue Mul = DAG.getNode(ISD::FMUL, DL, Ty, Trunc, Y,
3270 SDValue Sub = DAG.getNode(ISD::FSUB, DL, Ty, X, Mul,
3272
3273 if (Flags.hasNoInfs())
3274 return Sub;
3275
3276 // If Y is infinite, return X
3277 SDValue AbsY = DAG.getNode(ISD::FABS, DL, Ty, Y);
3278 SDValue Inf =
3279 DAG.getConstantFP(APFloat::getInf(Ty.getFltSemantics()), DL, Ty);
3280 SDValue IsInf = DAG.getSetCC(DL, MVT::i1, AbsY, Inf, ISD::SETEQ);
3281 return DAG.getSelect(DL, Ty, IsInf, X, Sub);
3282}
3283
3285 assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
3286
3287 SDValue Cond = Op->getOperand(0);
3288 SDValue TrueVal = Op->getOperand(1);
3289 SDValue FalseVal = Op->getOperand(2);
3290 SDLoc DL(Op);
3291
3292 // If both operands are truncated, we push the select through the truncates.
3293 if (TrueVal.getOpcode() == ISD::TRUNCATE &&
3294 FalseVal.getOpcode() == ISD::TRUNCATE) {
3295 TrueVal = TrueVal.getOperand(0);
3296 FalseVal = FalseVal.getOperand(0);
3297
3298 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())
3299 ? TrueVal.getValueType()
3300 : FalseVal.getValueType();
3301 TrueVal = DAG.getAnyExtOrTrunc(TrueVal, DL, VT);
3302 FalseVal = DAG.getAnyExtOrTrunc(FalseVal, DL, VT);
3303 SDValue Select = DAG.getSelect(DL, VT, Cond, TrueVal, FalseVal);
3304 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
3305 }
3306
3307 // Otherwise, expand the select into a series of logical operations. These
3308 // often can be folded into other operations either by us or ptxas.
3309 TrueVal = DAG.getFreeze(TrueVal);
3310 FalseVal = DAG.getFreeze(FalseVal);
3311 SDValue And1 = DAG.getNode(ISD::AND, DL, MVT::i1, Cond, TrueVal);
3312 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
3313 SDValue And2 = DAG.getNode(ISD::AND, DL, MVT::i1, NotCond, FalseVal);
3314 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i1, And1, And2);
3315 return Or;
3316}
3317
3319 SDNode *N = Op.getNode();
3320
3321 SDValue Chain = N->getOperand(0);
3322 SDValue Val = N->getOperand(1);
3323 SDValue BasePtr = N->getOperand(2);
3324 SDValue Offset = N->getOperand(3);
3325 SDValue Mask = N->getOperand(4);
3326
3327 SDLoc DL(N);
3328 EVT ValVT = Val.getValueType();
3329 MemSDNode *MemSD = cast<MemSDNode>(N);
3330 assert(ValVT.isVector() && "Masked vector store must have vector type");
3331 assert(MemSD->getAlign() >= DAG.getEVTAlign(ValVT) &&
3332 "Unexpected alignment for masked store");
3333
3334 unsigned Opcode = 0;
3335 switch (ValVT.getSimpleVT().SimpleTy) {
3336 default:
3337 llvm_unreachable("Unexpected masked vector store type");
3338 case MVT::v4i64:
3339 case MVT::v4f64: {
3340 Opcode = NVPTXISD::StoreV4;
3341 break;
3342 }
3343 case MVT::v8i32:
3344 case MVT::v8f32: {
3345 Opcode = NVPTXISD::StoreV8;
3346 break;
3347 }
3348 }
3349
3351
3352 // Construct the new SDNode. First operand is the chain.
3353 Ops.push_back(Chain);
3354
3355 // The next N operands are the values to store. Encode the mask into the
3356 // values using the sentinel register 0 to represent a masked-off element.
3357 assert(Mask.getValueType().isVector() &&
3358 Mask.getValueType().getVectorElementType() == MVT::i1 &&
3359 "Mask must be a vector of i1");
3360 assert(Mask.getOpcode() == ISD::BUILD_VECTOR &&
3361 "Mask expected to be a BUILD_VECTOR");
3362 assert(Mask.getValueType().getVectorNumElements() ==
3363 ValVT.getVectorNumElements() &&
3364 "Mask size must be the same as the vector size");
3365 for (auto [I, Op] : enumerate(Mask->ops())) {
3366 // Mask elements must be constants.
3367 if (Op.getNode()->getAsZExtVal() == 0) {
3368 // Append a sentinel register 0 to the Ops vector to represent a masked
3369 // off element, this will be handled in tablegen
3371 ValVT.getVectorElementType()));
3372 } else {
3373 // Extract the element from the vector to store
3374 SDValue ExtVal =
3376 Val, DAG.getIntPtrConstant(I, DL));
3377 Ops.push_back(ExtVal);
3378 }
3379 }
3380
3381 // Next, the pointer operand.
3382 Ops.push_back(BasePtr);
3383
3384 // Finally, the offset operand. We expect this to always be undef, and it will
3385 // be ignored in lowering, but to mirror the handling of the other vector
3386 // store instructions we include it in the new SDNode.
3387 assert(Offset.getOpcode() == ISD::UNDEF &&
3388 "Offset operand expected to be undef");
3389 Ops.push_back(Offset);
3390
3391 SDValue NewSt =
3392 DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
3393 MemSD->getMemoryVT(), MemSD->getMemOperand());
3394
3395 return NewSt;
3396}
3397
3398SDValue
3400 switch (Op.getOpcode()) {
3401 case ISD::RETURNADDR:
3402 return SDValue();
3403 case ISD::FRAMEADDR:
3404 return SDValue();
3405 case ISD::ADDRSPACECAST:
3406 return LowerADDRSPACECAST(Op, DAG);
3408 return lowerIntrinsicWChain(Op, DAG);
3410 return lowerIntrinsicWOChain(Op, DAG);
3412 return lowerIntrinsicVoid(Op, DAG);
3413 case ISD::BUILD_VECTOR:
3414 return LowerBUILD_VECTOR(Op, DAG);
3415 case ISD::BITCAST:
3416 return LowerBITCAST(Op, DAG);
3418 return Op;
3420 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3422 return LowerINSERT_VECTOR_ELT(Op, DAG);
3424 return LowerVECTOR_SHUFFLE(Op, DAG);
3426 return LowerCONCAT_VECTORS(Op, DAG);
3431 return LowerVECREDUCE(Op, DAG);
3432 case ISD::STORE:
3433 return LowerSTORE(Op, DAG);
3434 case ISD::MSTORE: {
3435 assert(STI.has256BitVectorLoadStore(
3436 cast<MemSDNode>(Op.getNode())->getAddressSpace()) &&
3437 "Masked store vector not supported on subtarget.");
3438 return lowerMSTORE(Op, DAG);
3439 }
3440 case ISD::LOAD:
3441 return LowerLOAD(Op, DAG);
3442 case ISD::MLOAD:
3443 return LowerMLOAD(Op, DAG);
3444 case ISD::SHL_PARTS:
3445 return LowerShiftLeftParts(Op, DAG);
3446 case ISD::SRA_PARTS:
3447 case ISD::SRL_PARTS:
3448 return LowerShiftRightParts(Op, DAG);
3449 case ISD::SELECT:
3450 return lowerSELECT(Op, DAG);
3451 case ISD::FROUND:
3452 return LowerFROUND(Op, DAG);
3453 case ISD::FCOPYSIGN:
3454 return LowerFCOPYSIGN(Op, DAG);
3455 case ISD::SINT_TO_FP:
3456 case ISD::UINT_TO_FP:
3457 return LowerINT_TO_FP(Op, DAG);
3458 case ISD::FP_TO_SINT:
3459 case ISD::FP_TO_UINT:
3460 return LowerFP_TO_INT(Op, DAG);
3461 case ISD::FP_ROUND:
3462 return LowerFP_ROUND(Op, DAG);
3463 case ISD::FP_EXTEND:
3464 return LowerFP_EXTEND(Op, DAG);
3465 case ISD::VAARG:
3466 return LowerVAARG(Op, DAG);
3467 case ISD::VASTART:
3468 return LowerVASTART(Op, DAG);
3469 case ISD::FSHL:
3470 case ISD::FSHR:
3471 return lowerFSH(Op, DAG);
3472 case ISD::ROTL:
3473 case ISD::ROTR:
3474 return lowerROT(Op, DAG);
3475 case ISD::ABS:
3476 case ISD::SMIN:
3477 case ISD::SMAX:
3478 case ISD::UMIN:
3479 case ISD::UMAX:
3480 case ISD::ADD:
3481 case ISD::SUB:
3482 case ISD::MUL:
3483 case ISD::SHL:
3484 case ISD::SREM:
3485 case ISD::UREM:
3486 return LowerVectorArith(Op, DAG);
3488 return LowerDYNAMIC_STACKALLOC(Op, DAG);
3489 case ISD::STACKRESTORE:
3490 return LowerSTACKRESTORE(Op, DAG);
3491 case ISD::STACKSAVE:
3492 return LowerSTACKSAVE(Op, DAG);
3493 case ISD::CopyToReg:
3494 return LowerCopyToReg_128(Op, DAG);
3495 case ISD::FADD:
3496 case ISD::FSUB:
3497 case ISD::FMUL:
3498 // Used only for bf16 on SM80, where we select fma for non-ftz operation
3499 return PromoteBinOpIfF32FTZ(Op, DAG);
3500 case ISD::CTPOP:
3501 case ISD::CTLZ:
3502 return lowerCTLZCTPOP(Op, DAG);
3503 case ISD::FREM:
3504 return lowerFREM(Op, DAG);
3505 case ISD::BSWAP:
3506 return lowerBSWAP(Op, DAG);
3507 default:
3508 llvm_unreachable("Custom lowering not defined for operation");
3509 }
3510}
3511
3512// This will prevent AsmPrinter from trying to print the jump tables itself.
3516
3517SDValue NVPTXTargetLowering::LowerADDRSPACECAST(SDValue Op,
3518 SelectionDAG &DAG) const {
3520 unsigned SrcAS = N->getSrcAddressSpace();
3521 unsigned DestAS = N->getDestAddressSpace();
3522 if (SrcAS != llvm::ADDRESS_SPACE_GENERIC &&
3523 DestAS != llvm::ADDRESS_SPACE_GENERIC) {
3524 // Shared and SharedCluster can be converted to each other through generic
3525 // space
3526 if ((SrcAS == llvm::ADDRESS_SPACE_SHARED &&
3529 DestAS == llvm::ADDRESS_SPACE_SHARED)) {
3530 SDLoc DL(Op.getNode());
3531 const MVT GenerictVT =
3533 SDValue GenericConversion = DAG.getAddrSpaceCast(
3534 DL, GenerictVT, Op.getOperand(0), SrcAS, ADDRESS_SPACE_GENERIC);
3535 SDValue SharedClusterConversion =
3536 DAG.getAddrSpaceCast(DL, Op.getValueType(), GenericConversion,
3537 ADDRESS_SPACE_GENERIC, DestAS);
3538 return SharedClusterConversion;
3539 }
3540
3541 return DAG.getUNDEF(Op.getValueType());
3542 }
3543
3544 return Op;
3545}
3546
3547// This function is almost a copy of SelectionDAG::expandVAArg().
3548// The only diff is that this one produces loads from local address space.
3549SDValue NVPTXTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3550 const TargetLowering *TLI = STI.getTargetLowering();
3551 SDLoc DL(Op);
3552
3553 SDNode *Node = Op.getNode();
3554 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3555 EVT VT = Node->getValueType(0);
3556 auto *Ty = VT.getTypeForEVT(*DAG.getContext());
3557 SDValue Tmp1 = Node->getOperand(0);
3558 SDValue Tmp2 = Node->getOperand(1);
3559 const MaybeAlign MA(Node->getConstantOperandVal(3));
3560
3561 SDValue VAListLoad = DAG.getLoad(TLI->getPointerTy(DAG.getDataLayout()), DL,
3562 Tmp1, Tmp2, MachinePointerInfo(V));
3563 SDValue VAList = VAListLoad;
3564
3565 if (MA && *MA > TLI->getMinStackArgumentAlignment()) {
3566 VAList = DAG.getNode(
3567 ISD::ADD, DL, VAList.getValueType(), VAList,
3568 DAG.getConstant(MA->value() - 1, DL, VAList.getValueType()));
3569
3570 VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
3571 DAG.getSignedConstant(-(int64_t)MA->value(), DL,
3572 VAList.getValueType()));
3573 }
3574
3575 // Increment the pointer, VAList, to the next vaarg
3576 Tmp1 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
3578 DL, VAList.getValueType()));
3579
3580 // Store the incremented VAList to the legalized pointer
3581 Tmp1 = DAG.getStore(VAListLoad.getValue(1), DL, Tmp1, Tmp2,
3582 MachinePointerInfo(V));
3583
3584 const Value *SrcV = Constant::getNullValue(
3586
3587 // Load the actual argument out of the pointer VAList
3588 return DAG.getLoad(VT, DL, Tmp1, VAList, MachinePointerInfo(SrcV));
3589}
3590
3591SDValue NVPTXTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3592 const TargetLowering *TLI = STI.getTargetLowering();
3593 SDLoc DL(Op);
3594 EVT PtrVT = TLI->getPointerTy(DAG.getDataLayout());
3595
3596 // Store the address of unsized array <function>_vararg[] in the ap object.
3597 SDValue VAReg = getParamSymbol(DAG, /* vararg */ -1, PtrVT);
3598
3599 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3600 return DAG.getStore(Op.getOperand(0), DL, VAReg, Op.getOperand(1),
3601 MachinePointerInfo(SV));
3602}
3603
3604static std::pair<MemSDNode *, uint32_t>
3606 const NVPTXSubtarget &STI) {
3607 SDValue Chain = N->getOperand(0);
3608 SDValue BasePtr = N->getOperand(1);
3609 SDValue Mask = N->getOperand(3);
3610 [[maybe_unused]] SDValue Passthru = N->getOperand(4);
3611
3612 SDLoc DL(N);
3613 EVT ResVT = N->getValueType(0);
3614 assert(ResVT.isVector() && "Masked vector load must have vector type");
3615 // While we only expect poison passthru vectors as an input to the backend,
3616 // when the legalization framework splits a poison vector in half, it creates
3617 // two undef vectors, so we can technically expect those too.
3618 assert((Passthru.getOpcode() == ISD::POISON ||
3619 Passthru.getOpcode() == ISD::UNDEF) &&
3620 "Passthru operand expected to be poison or undef");
3621
3622 // Extract the mask and convert it to a uint32_t representing the used bytes
3623 // of the entire vector load
3624 uint32_t UsedBytesMask = 0;
3625 uint32_t ElementSizeInBits = ResVT.getVectorElementType().getSizeInBits();
3626 assert(ElementSizeInBits % 8 == 0 && "Unexpected element size");
3627 uint32_t ElementSizeInBytes = ElementSizeInBits / 8;
3628 uint32_t ElementMask = (1u << ElementSizeInBytes) - 1u;
3629
3630 for (SDValue Op : reverse(Mask->ops())) {
3631 // We technically only want to do this shift for every
3632 // iteration *but* the first, but in the first iteration UsedBytesMask is 0,
3633 // so this shift is a no-op.
3634 UsedBytesMask <<= ElementSizeInBytes;
3635
3636 // Mask elements must be constants.
3637 if (Op->getAsZExtVal() != 0)
3638 UsedBytesMask |= ElementMask;
3639 }
3640
3641 assert(UsedBytesMask != 0 && UsedBytesMask != UINT32_MAX &&
3642 "Unexpected masked load with elements masked all on or all off");
3643
3644 // Create a new load sd node to be handled normally by ReplaceLoadVector.
3645 MemSDNode *NewLD = cast<MemSDNode>(
3646 DAG.getLoad(ResVT, DL, Chain, BasePtr, N->getMemOperand()).getNode());
3647
3648 // If our subtarget does not support the used bytes mask pragma, "drop" the
3649 // mask by setting it to UINT32_MAX
3650 if (!STI.hasUsedBytesMaskPragma())
3651 UsedBytesMask = UINT32_MAX;
3652
3653 return {NewLD, UsedBytesMask};
3654}
3655
3656/// replaceLoadVector - Convert vector loads into multi-output scalar loads.
3657static std::optional<std::pair<SDValue, SDValue>>
3660 const EVT ResVT = LD->getValueType(0);
3661 const EVT MemVT = LD->getMemoryVT();
3662
3663 // If we're doing sign/zero extension as part of the load, avoid lowering to
3664 // a LoadV node. TODO: consider relaxing this restriction.
3665 if (ResVT != MemVT)
3666 return std::nullopt;
3667
3668 const auto NumEltsAndEltVT =
3669 getVectorLoweringShape(ResVT, STI, LD->getAddressSpace());
3670 if (!NumEltsAndEltVT)
3671 return std::nullopt;
3672 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3673
3674 Align Alignment = LD->getAlign();
3675 const auto &TD = DAG.getDataLayout();
3676 Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext()));
3677 if (Alignment < PrefAlign) {
3678 // This load is not sufficiently aligned, so bail out and let this vector
3679 // load be scalarized. Note that we may still be able to emit smaller
3680 // vector loads. For example, if we are loading a <4 x float> with an
3681 // alignment of 8, this check will fail but the legalizer will try again
3682 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3683 return std::nullopt;
3684 }
3685
3686 // If we have a masked load, convert it to a normal load now
3687 std::optional<uint32_t> UsedBytesMask = std::nullopt;
3688 if (LD->getOpcode() == ISD::MLOAD)
3689 std::tie(LD, UsedBytesMask) =
3691
3692 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
3693 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3694 // loaded type to i16 and propagate the "real" type as the memory type.
3695 const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT;
3696
3697 unsigned Opcode;
3698 switch (NumElts) {
3699 default:
3700 return std::nullopt;
3701 case 2:
3702 Opcode = NVPTXISD::LoadV2;
3703 break;
3704 case 4:
3705 Opcode = NVPTXISD::LoadV4;
3706 break;
3707 case 8:
3708 Opcode = NVPTXISD::LoadV8;
3709 break;
3710 }
3711 auto ListVTs = SmallVector<EVT, 9>(NumElts, LoadEltVT);
3712 ListVTs.push_back(MVT::Other);
3713 SDVTList LdResVTs = DAG.getVTList(ListVTs);
3714
3715 SDLoc DL(LD);
3716
3717 // Copy regular operands
3718 SmallVector<SDValue, 8> OtherOps(LD->ops());
3719
3720 OtherOps.push_back(
3721 DAG.getConstant(UsedBytesMask.value_or(UINT32_MAX), DL, MVT::i32));
3722
3723 // The select routine does not have access to the LoadSDNode instance, so
3724 // pass along the extension information
3725 OtherOps.push_back(
3726 DAG.getIntPtrConstant(cast<LoadSDNode>(LD)->getExtensionType(), DL));
3727
3728 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps, MemVT,
3729 LD->getMemOperand());
3730
3731 SmallVector<SDValue> ScalarRes;
3732 if (EltVT.isVector()) {
3734 assert(NumElts * EltVT.getVectorNumElements() ==
3735 ResVT.getVectorNumElements());
3736 // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back
3737 // into individual elements.
3738 for (const unsigned I : llvm::seq(NumElts)) {
3739 SDValue SubVector = NewLD.getValue(I);
3740 DAG.ExtractVectorElements(SubVector, ScalarRes);
3741 }
3742 } else {
3743 for (const unsigned I : llvm::seq(NumElts)) {
3744 SDValue Res = NewLD.getValue(I);
3745 if (LoadEltVT != EltVT)
3746 Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
3747 ScalarRes.push_back(Res);
3748 }
3749 }
3750
3751 SDValue LoadChain = NewLD.getValue(NumElts);
3752
3753 const MVT BuildVecVT =
3754 MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size());
3755 SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes);
3756 SDValue LoadValue = DAG.getBitcast(ResVT, BuildVec);
3757
3758 return {{LoadValue, LoadChain}};
3759}
3760
3763 const NVPTXSubtarget &STI) {
3764 if (auto Res = replaceLoadVector(N, DAG, STI))
3765 Results.append({Res->first, Res->second});
3766}
3767
3769 const NVPTXSubtarget &STI) {
3770 if (auto Res = replaceLoadVector(N, DAG, STI))
3771 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(N));
3772 return SDValue();
3773}
3774
3775// v = ld i1* addr
3776// =>
3777// v1 = ld i8* addr (-> i16)
3778// v = trunc i16 to i1
3780 SDLoc dl(LD);
3781 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
3782 assert(LD->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only");
3783 SDValue newLD = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i16, LD->getChain(),
3784 LD->getBasePtr(), LD->getPointerInfo(),
3785 MVT::i8, LD->getAlign(),
3786 LD->getMemOperand()->getFlags());
3787 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
3788 // The legalizer (the caller) is expecting two values from the legalized
3789 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
3790 // in LegalizeDAG.cpp which also uses MergeValues.
3791 return DAG.getMergeValues({result, LD->getChain()}, dl);
3792}
3793
3794SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
3795 LoadSDNode *LD = cast<LoadSDNode>(Op);
3796
3797 if (Op.getValueType() == MVT::i1)
3798 return lowerLOADi1(LD, DAG);
3799
3800 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
3801 // how they'll be lowered in ISel anyway, and by doing this a little earlier
3802 // we allow for more DAG combine opportunities.
3803 if (LD->getExtensionType() == ISD::EXTLOAD) {
3804 assert(LD->getValueType(0).isInteger() && LD->getMemoryVT().isInteger() &&
3805 "Unexpected fpext-load");
3806 return DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(Op), Op.getValueType(),
3807 LD->getChain(), LD->getBasePtr(), LD->getMemoryVT(),
3808 LD->getMemOperand());
3809 }
3810
3811 llvm_unreachable("Unexpected custom lowering for load");
3812}
3813
3814SDValue NVPTXTargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
3815 // v2f16/v2bf16/v2i16/v4i8 are legal, so we can't rely on legalizer to handle
3816 // masked loads of these types and have to handle them here.
3817 // v2f32 also needs to be handled here if the subtarget has f32x2
3818 // instructions, making it legal.
3819 //
3820 // Note: misaligned masked loads should never reach this point
3821 // because the override of isLegalMaskedLoad in NVPTXTargetTransformInfo.cpp
3822 // will validate alignment. Therefore, we do not need to special case handle
3823 // them here.
3824 EVT VT = Op.getValueType();
3825 if (NVPTX::isPackedVectorTy(VT)) {
3827 cast<MemSDNode>(Op.getNode()), DAG, STI);
3828 MemSDNode *LD = std::get<0>(Result);
3829 uint32_t UsedBytesMask = std::get<1>(Result);
3830
3831 SDLoc DL(LD);
3832
3833 // Copy regular operands
3834 SmallVector<SDValue, 8> OtherOps(LD->ops());
3835
3836 OtherOps.push_back(DAG.getConstant(UsedBytesMask, DL, MVT::i32));
3837
3838 // We currently are not lowering extending loads, but pass the extension
3839 // type anyway as later handling expects it.
3840 OtherOps.push_back(
3841 DAG.getIntPtrConstant(cast<LoadSDNode>(LD)->getExtensionType(), DL));
3842 SDValue NewLD =
3843 DAG.getMemIntrinsicNode(NVPTXISD::MLoad, DL, LD->getVTList(), OtherOps,
3844 LD->getMemoryVT(), LD->getMemOperand());
3845 return NewLD;
3846 }
3847 return SDValue();
3848}
3849
3851 const NVPTXSubtarget &STI) {
3852 MemSDNode *N = cast<MemSDNode>(Op.getNode());
3853 SDValue Val = N->getOperand(1);
3854 SDLoc DL(N);
3855 const EVT ValVT = Val.getValueType();
3856 const EVT MemVT = N->getMemoryVT();
3857
3858 // If we're truncating as part of the store, avoid lowering to a StoreV node.
3859 // TODO: consider relaxing this restriction.
3860 if (ValVT != MemVT)
3861 return SDValue();
3862
3863 const auto NumEltsAndEltVT =
3864 getVectorLoweringShape(ValVT, STI, N->getAddressSpace());
3865 if (!NumEltsAndEltVT)
3866 return SDValue();
3867 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3868
3869 const DataLayout &TD = DAG.getDataLayout();
3870
3871 Align Alignment = N->getAlign();
3872 Align PrefAlign = TD.getPrefTypeAlign(ValVT.getTypeForEVT(*DAG.getContext()));
3873 if (Alignment < PrefAlign) {
3874 // This store is not sufficiently aligned, so bail out and let this vector
3875 // store be scalarized. Note that we may still be able to emit smaller
3876 // vector stores. For example, if we are storing a <4 x float> with an
3877 // alignment of 8, this check will fail but the legalizer will try again
3878 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3879 return SDValue();
3880 }
3881
3882 unsigned Opcode;
3883 switch (NumElts) {
3884 default:
3885 return SDValue();
3886 case 2:
3887 Opcode = NVPTXISD::StoreV2;
3888 break;
3889 case 4:
3890 Opcode = NVPTXISD::StoreV4;
3891 break;
3892 case 8:
3893 Opcode = NVPTXISD::StoreV8;
3894 break;
3895 }
3896
3898
3899 // First is the chain
3900 Ops.push_back(N->getOperand(0));
3901
3902 // Then the split values
3903 if (EltVT.isVector()) {
3905 assert(NumElts * EltVT.getVectorNumElements() ==
3906 ValVT.getVectorNumElements());
3907 // Combine individual elements into v2[i,f,bf]16/v4i8 subvectors to be
3908 // stored as b32s
3909 const unsigned NumEltsPerSubVector = EltVT.getVectorNumElements();
3910 for (const unsigned I : llvm::seq(NumElts)) {
3911 SmallVector<SDValue, 4> SubVectorElts;
3912 DAG.ExtractVectorElements(Val, SubVectorElts, I * NumEltsPerSubVector,
3913 NumEltsPerSubVector);
3914 Ops.push_back(DAG.getBuildVector(EltVT, DL, SubVectorElts));
3915 }
3916 } else {
3917 SDValue V = DAG.getBitcast(MVT::getVectorVT(EltVT, NumElts), Val);
3918 for (const unsigned I : llvm::seq(NumElts)) {
3919 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, V,
3920 DAG.getIntPtrConstant(I, DL));
3921
3922 // Since StoreV2 is a target node, we cannot rely on DAG type
3923 // legalization. Therefore, we must ensure the type is legal. For i1 and
3924 // i8, we set the stored type to i16 and propagate the "real" type as the
3925 // memory type.
3926 if (EltVT.getSizeInBits() < 16)
3927 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
3928 Ops.push_back(ExtVal);
3929 }
3930 }
3931
3932 // Then any remaining arguments
3933 Ops.append(N->op_begin() + 2, N->op_end());
3934
3935 SDValue NewSt =
3936 DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
3937 N->getMemoryVT(), N->getMemOperand());
3938
3939 // return DCI.CombineTo(N, NewSt, true);
3940 return NewSt;
3941}
3942
3943SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
3944 StoreSDNode *Store = cast<StoreSDNode>(Op);
3945 EVT VT = Store->getMemoryVT();
3946
3947 if (VT == MVT::i1)
3948 return LowerSTOREi1(Op, DAG);
3949
3950 // Lower store of any other vector type, including v2f32 as we want to break
3951 // it apart since this is not a widely-supported type.
3952 return lowerSTOREVector(Op, DAG, STI);
3953}
3954
3955// st i1 v, addr
3956// =>
3957// v1 = zxt v to i16
3958// st.u8 i16, addr
3959SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
3960 SDNode *Node = Op.getNode();
3961 SDLoc dl(Node);
3962 StoreSDNode *ST = cast<StoreSDNode>(Node);
3963 SDValue Tmp1 = ST->getChain();
3964 SDValue Tmp2 = ST->getBasePtr();
3965 SDValue Tmp3 = ST->getValue();
3966 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
3967 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
3968 SDValue Result =
3969 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
3970 ST->getAlign(), ST->getMemOperand()->getFlags());
3971 return Result;
3972}
3973
3974SDValue NVPTXTargetLowering::LowerCopyToReg_128(SDValue Op,
3975 SelectionDAG &DAG) const {
3976 // Change the CopyToReg to take in two 64-bit operands instead of a 128-bit
3977 // operand so that it can pass the legalization.
3978
3979 assert(Op.getOperand(1).getValueType() == MVT::i128 &&
3980 "Custom lowering for 128-bit CopyToReg only");
3981
3982 SDNode *Node = Op.getNode();
3983 SDLoc DL(Node);
3984
3985 SDValue Cast = DAG.getBitcast(MVT::v2i64, Op->getOperand(2));
3986 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
3987 DAG.getIntPtrConstant(0, DL));
3988 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
3989 DAG.getIntPtrConstant(1, DL));
3990
3992 SmallVector<EVT, 3> ResultsType(Node->values());
3993
3994 NewOps[0] = Op->getOperand(0); // Chain
3995 NewOps[1] = Op->getOperand(1); // Dst Reg
3996 NewOps[2] = Lo; // Lower 64-bit
3997 NewOps[3] = Hi; // Higher 64-bit
3998 if (Op.getNumOperands() == 4)
3999 NewOps[4] = Op->getOperand(3); // Glue if exists
4000
4001 return DAG.getNode(ISD::CopyToReg, DL, ResultsType, NewOps);
4002}
4003
4004unsigned NVPTXTargetLowering::getNumRegisters(
4005 LLVMContext &Context, EVT VT,
4006 std::optional<MVT> RegisterVT = std::nullopt) const {
4007 if (VT == MVT::i128 && RegisterVT == MVT::i128)
4008 return 1;
4009 return TargetLoweringBase::getNumRegisters(Context, VT, RegisterVT);
4010}
4011
4012bool NVPTXTargetLowering::splitValueIntoRegisterParts(
4013 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4014 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
4015 if (Val.getValueType() == MVT::i128 && NumParts == 1) {
4016 Parts[0] = Val;
4017 return true;
4018 }
4019 return false;
4020}
4021
4022// This creates target external symbol for a function parameter.
4023// Name of the symbol is composed from its index and the function name.
4024// Negative index corresponds to special parameter (unsized array) used for
4025// passing variable arguments.
4026SDValue NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int I,
4027 EVT T) const {
4028 StringRef SavedStr = nvTM->getStrPool().save(
4030 return DAG.getExternalSymbol(SavedStr.data(), T);
4031}
4032
4033SDValue NVPTXTargetLowering::getCallParamSymbol(SelectionDAG &DAG, int I,
4034 EVT T) const {
4035 const StringRef SavedStr = nvTM->getStrPool().save("param" + Twine(I));
4036 return DAG.getExternalSymbol(SavedStr.data(), T);
4037}
4038
4040 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4041 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4042 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4043 const DataLayout &DL = DAG.getDataLayout();
4044 LLVMContext &Ctx = *DAG.getContext();
4045 auto PtrVT = getPointerTy(DAG.getDataLayout());
4046
4047 const Function &F = DAG.getMachineFunction().getFunction();
4048
4049 SDValue Root = DAG.getRoot();
4050 SmallVector<SDValue, 16> OutChains;
4051
4052 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
4053 // Ins.size() will be larger
4054 // * if there is an aggregate argument with multiple fields (each field
4055 // showing up separately in Ins)
4056 // * if there is a vector argument with more than typical vector-length
4057 // elements (generally if more than 4) where each vector element is
4058 // individually present in Ins.
4059 // So a different index should be used for indexing into Ins.
4060 // See similar issue in LowerCall.
4061
4062 auto AllIns = ArrayRef(Ins);
4063 for (const auto &Arg : F.args()) {
4064 const auto ArgIns = AllIns.take_while(
4065 [&](auto I) { return I.OrigArgIndex == Arg.getArgNo(); });
4066 AllIns = AllIns.drop_front(ArgIns.size());
4067
4068 Type *Ty = Arg.getType();
4069
4070 if (ArgIns.empty())
4071 report_fatal_error("Empty parameter types are not supported");
4072
4073 if (Arg.use_empty()) {
4074 // argument is dead
4075 for (const auto &In : ArgIns) {
4076 assert(!In.Used && "Arg.use_empty() is true but Arg is used?");
4077 InVals.push_back(DAG.getUNDEF(In.VT));
4078 }
4079 continue;
4080 }
4081
4082 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);
4083
4084 // In the following cases, assign a node order of "i+1"
4085 // to newly created nodes. The SDNodes for params have to
4086 // appear in the same order as their order of appearance
4087 // in the original function. "i+1" holds that order.
4088 if (Arg.hasByValAttr()) {
4089 // Param has ByVal attribute
4090 // Return MoveParam(param symbol).
4091 // Ideally, the param symbol can be returned directly,
4092 // but when SDNode builder decides to use it in a CopyToReg(),
4093 // machine instruction fails because TargetExternalSymbol
4094 // (not lowered) is target dependent, and CopyToReg assumes
4095 // the source is lowered.
4096 assert(ArgIns.size() == 1 && "ByVal argument must be a pointer");
4097 const auto &ByvalIn = ArgIns[0];
4098 assert(getValueType(DL, Ty) == ByvalIn.VT &&
4099 "Ins type did not match function type");
4100 assert(ByvalIn.VT == PtrVT && "ByVal argument must be a pointer");
4101
4102 SDValue P;
4103 if (isKernelFunction(F)) {
4104 P = ArgSymbol;
4105 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4106 } else {
4107 P = DAG.getNode(NVPTXISD::MoveParam, dl, ByvalIn.VT, ArgSymbol);
4108 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4109 P = DAG.getAddrSpaceCast(dl, ByvalIn.VT, P, ADDRESS_SPACE_LOCAL,
4111 }
4112 InVals.push_back(P);
4113 } else {
4116 ComputePTXValueVTs(*this, DL, Ctx, CallConv, Ty, VTs, Offsets);
4117 assert(VTs.size() == ArgIns.size() && "Size mismatch");
4118 assert(VTs.size() == Offsets.size() && "Size mismatch");
4119
4120 const Align ArgAlign = getFunctionArgumentAlignment(
4121 &F, Ty, Arg.getArgNo() + AttributeList::FirstArgIndex, DL);
4122
4123 unsigned I = 0;
4124 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
4125 for (const unsigned NumElts : VI) {
4126 // i1 is loaded/stored as i8
4127 const EVT LoadVT = VTs[I] == MVT::i1 ? MVT::i8 : VTs[I];
4128 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
4129
4130 SDValue VecAddr = DAG.getObjectPtrOffset(
4131 dl, ArgSymbol, TypeSize::getFixed(Offsets[I]));
4132
4133 const Align PartAlign = commonAlignment(ArgAlign, Offsets[I]);
4134 SDValue P =
4135 DAG.getLoad(VecVT, dl, Root, VecAddr,
4139 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4140 for (const unsigned J : llvm::seq(NumElts)) {
4141 SDValue Elt = getExtractVectorizedValue(P, J, LoadVT, dl, DAG);
4142
4143 Elt = correctParamType(Elt, ArgIns[I + J].VT, ArgIns[I + J].Flags,
4144 DAG, dl);
4145 InVals.push_back(Elt);
4146 }
4147 I += NumElts;
4148 }
4149 }
4150 }
4151
4152 if (!OutChains.empty())
4153 DAG.setRoot(DAG.getTokenFactor(dl, OutChains));
4154
4155 return Chain;
4156}
4157
4158SDValue
4160 bool isVarArg,
4162 const SmallVectorImpl<SDValue> &OutVals,
4163 const SDLoc &dl, SelectionDAG &DAG) const {
4164 const Function &F = DAG.getMachineFunction().getFunction();
4165 Type *RetTy = F.getReturnType();
4166
4167 if (RetTy->isVoidTy()) {
4168 assert(OutVals.empty() && Outs.empty() && "Return value expected for void");
4169 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4170 }
4171
4172 const DataLayout &DL = DAG.getDataLayout();
4173 LLVMContext &Ctx = *DAG.getContext();
4174
4175 const SDValue RetSymbol = DAG.getExternalSymbol("func_retval0", MVT::i32);
4176 const auto RetAlign = getFunctionParamOptimizedAlign(&F, RetTy, DL);
4177
4178 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
4179 // 32-bits are sign extended or zero extended, depending on whether
4180 // they are signed or unsigned types.
4181 const bool ExtendIntegerRetVal =
4182 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
4183
4186 ComputePTXValueVTs(*this, DL, Ctx, CallConv, RetTy, VTs, Offsets);
4187 assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
4188
4189 const auto GetRetVal = [&](unsigned I) -> SDValue {
4190 SDValue RetVal = OutVals[I];
4192 RetVal.getValueType() &&
4193 "OutVal type should always be legal");
4194
4195 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
4196 const EVT StoreVT =
4197 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
4198 return correctParamType(RetVal, StoreVT, Outs[I].Flags, DAG, dl);
4199 };
4200
4201 unsigned I = 0;
4202 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
4203 for (const unsigned NumElts : VI) {
4204 const MaybeAlign CurrentAlign = ExtendIntegerRetVal
4205 ? MaybeAlign(std::nullopt)
4206 : commonAlignment(RetAlign, Offsets[I]);
4207
4209 NumElts, dl, DAG, [&](unsigned K) { return GetRetVal(I + K); });
4210
4211 SDValue Ptr =
4212 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
4213
4214 Chain = DAG.getStore(Chain, dl, Val, Ptr,
4216
4217 I += NumElts;
4218 }
4219
4220 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4221}
4222
4224 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
4225 SelectionDAG &DAG) const {
4226 if (Constraint.size() > 1)
4227 return;
4229}
4230
4231// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
4232// TgtMemIntrinsic
4233// because we need the information that is only available in the "Value" type
4234// of destination
4235// pointer. In particular, the address space information.
4237 const CallBase &I,
4238 MachineFunction &MF,
4239 unsigned Intrinsic) const {
4240 switch (Intrinsic) {
4241 default:
4242 return false;
4243 case Intrinsic::nvvm_match_all_sync_i32p:
4244 case Intrinsic::nvvm_match_all_sync_i64p:
4245 Info.opc = ISD::INTRINSIC_W_CHAIN;
4246 // memVT is bogus. These intrinsics have IntrInaccessibleMemOnly attribute
4247 // in order to model data exchange with other threads, but perform no real
4248 // memory accesses.
4249 Info.memVT = MVT::i1;
4250
4251 // Our result depends on both our and other thread's arguments.
4253 return true;
4254 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
4255 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
4256 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
4257 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
4258 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
4259 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
4260 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
4261 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
4262 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
4263 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
4264 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
4265 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
4266 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
4267 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
4268 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
4269 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
4270 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
4271 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
4272 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
4273 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
4274 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
4275 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
4276 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
4277 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
4278 Info.opc = ISD::INTRINSIC_W_CHAIN;
4279 Info.memVT = MVT::v8f16;
4280 Info.ptrVal = I.getArgOperand(0);
4281 Info.offset = 0;
4282 Info.flags = MachineMemOperand::MOLoad;
4283 Info.align = Align(16);
4284 return true;
4285 }
4286 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
4287 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
4288 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
4289 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
4290 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
4291 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
4292 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
4293 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
4294 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
4295 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
4296 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
4297 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
4298 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
4299 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
4300 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
4301 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
4302 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
4303 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
4304 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
4305 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
4306 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
4307 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
4308 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
4309 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
4310 Info.opc = ISD::INTRINSIC_W_CHAIN;
4311 Info.memVT = MVT::v2i32;
4312 Info.ptrVal = I.getArgOperand(0);
4313 Info.offset = 0;
4314 Info.flags = MachineMemOperand::MOLoad;
4315 Info.align = Align(8);
4316 return true;
4317 }
4318
4319 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
4320 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
4321 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
4322 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
4323 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
4324 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
4325 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
4326 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
4327 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
4328 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
4329 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
4330 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
4331 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
4332 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
4333 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
4334 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
4335
4336 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
4337 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
4338 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
4339 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
4340 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
4341 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
4342 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
4343 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
4344 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
4345 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
4346 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
4347 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
4348 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
4349 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
4350 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
4351 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
4352 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
4353 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:
4354 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:
4355 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:
4356 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:
4357 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:
4358 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {
4359 Info.opc = ISD::INTRINSIC_W_CHAIN;
4360 Info.memVT = MVT::v4i32;
4361 Info.ptrVal = I.getArgOperand(0);
4362 Info.offset = 0;
4363 Info.flags = MachineMemOperand::MOLoad;
4364 Info.align = Align(16);
4365 return true;
4366 }
4367
4368 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
4369 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
4370 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
4371 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
4372 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
4373 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
4374 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
4375 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
4376
4377 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
4378 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
4379 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
4380 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
4381 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
4382 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
4383 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
4384 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
4385 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
4386 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
4387 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
4388 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
4389 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
4390 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
4391 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
4392 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
4393 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
4394 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
4395 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
4396 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
4397 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
4398 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:
4399 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:
4400 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {
4401 Info.opc = ISD::INTRINSIC_W_CHAIN;
4402 Info.memVT = MVT::i32;
4403 Info.ptrVal = I.getArgOperand(0);
4404 Info.offset = 0;
4405 Info.flags = MachineMemOperand::MOLoad;
4406 Info.align = Align(4);
4407 return true;
4408 }
4409
4410 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
4411 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
4412 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
4413 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
4414 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
4415 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
4416 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
4417 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
4418 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
4419 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
4420 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
4421 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
4422 Info.opc = ISD::INTRINSIC_W_CHAIN;
4423 Info.memVT = MVT::v4f16;
4424 Info.ptrVal = I.getArgOperand(0);
4425 Info.offset = 0;
4426 Info.flags = MachineMemOperand::MOLoad;
4427 Info.align = Align(16);
4428 return true;
4429 }
4430
4431 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
4432 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
4433 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
4434 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
4435 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
4436 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
4437 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
4438 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
4439 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
4440 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
4441 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
4442 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
4443 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
4444 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
4445 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
4446 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
4447 Info.opc = ISD::INTRINSIC_W_CHAIN;
4448 Info.memVT = MVT::v8f32;
4449 Info.ptrVal = I.getArgOperand(0);
4450 Info.offset = 0;
4451 Info.flags = MachineMemOperand::MOLoad;
4452 Info.align = Align(16);
4453 return true;
4454 }
4455
4456 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
4457 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
4458 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
4459 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
4460
4461 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
4462 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
4463 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
4464 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
4465
4466 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
4467 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
4468 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
4469 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
4470 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
4471 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
4472 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
4473 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
4474 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
4475 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
4476 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
4477 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
4478 Info.opc = ISD::INTRINSIC_W_CHAIN;
4479 Info.memVT = MVT::v8i32;
4480 Info.ptrVal = I.getArgOperand(0);
4481 Info.offset = 0;
4482 Info.flags = MachineMemOperand::MOLoad;
4483 Info.align = Align(16);
4484 return true;
4485 }
4486
4487 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
4488 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
4489 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
4490 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
4491 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
4492 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
4493 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
4494 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
4495 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
4496 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:
4497 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:
4498 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:
4499 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:
4500 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:
4501 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {
4502 Info.opc = ISD::INTRINSIC_W_CHAIN;
4503 Info.memVT = MVT::v2i32;
4504 Info.ptrVal = I.getArgOperand(0);
4505 Info.offset = 0;
4506 Info.flags = MachineMemOperand::MOLoad;
4507 Info.align = Align(8);
4508 return true;
4509 }
4510
4511 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
4512 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
4513 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
4514 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
4515
4516 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
4517 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
4518 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
4519 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
4520 Info.opc = ISD::INTRINSIC_W_CHAIN;
4521 Info.memVT = MVT::f64;
4522 Info.ptrVal = I.getArgOperand(0);
4523 Info.offset = 0;
4524 Info.flags = MachineMemOperand::MOLoad;
4525 Info.align = Align(8);
4526 return true;
4527 }
4528
4529 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
4530 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
4531 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
4532 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
4533 Info.opc = ISD::INTRINSIC_W_CHAIN;
4534 Info.memVT = MVT::v2f64;
4535 Info.ptrVal = I.getArgOperand(0);
4536 Info.offset = 0;
4537 Info.flags = MachineMemOperand::MOLoad;
4538 Info.align = Align(16);
4539 return true;
4540 }
4541
4542 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
4543 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
4544 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
4545 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
4546 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
4547 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
4548 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
4549 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
4550 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
4551 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
4552 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
4553 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
4554 Info.opc = ISD::INTRINSIC_VOID;
4555 Info.memVT = MVT::v4f16;
4556 Info.ptrVal = I.getArgOperand(0);
4557 Info.offset = 0;
4558 Info.flags = MachineMemOperand::MOStore;
4559 Info.align = Align(16);
4560 return true;
4561 }
4562
4563 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
4564 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
4565 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
4566 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
4567 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
4568 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
4569 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
4570 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
4571 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
4572 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
4573 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
4574 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
4575 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
4576 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
4577 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
4578 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
4579 Info.opc = ISD::INTRINSIC_VOID;
4580 Info.memVT = MVT::v8f32;
4581 Info.ptrVal = I.getArgOperand(0);
4582 Info.offset = 0;
4583 Info.flags = MachineMemOperand::MOStore;
4584 Info.align = Align(16);
4585 return true;
4586 }
4587
4588 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
4589 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
4590 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
4591 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
4592 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
4593 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
4594 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
4595 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
4596 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
4597 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
4598 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
4599 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
4600 Info.opc = ISD::INTRINSIC_VOID;
4601 Info.memVT = MVT::v8i32;
4602 Info.ptrVal = I.getArgOperand(0);
4603 Info.offset = 0;
4604 Info.flags = MachineMemOperand::MOStore;
4605 Info.align = Align(16);
4606 return true;
4607 }
4608
4609 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4610 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4611 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4612 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4613 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4614 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4615 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4616 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:
4617 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:
4618 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:
4619 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {
4620 Info.opc = ISD::INTRINSIC_VOID;
4621 Info.memVT = MVT::v2i32;
4622 Info.ptrVal = I.getArgOperand(0);
4623 Info.offset = 0;
4624 Info.flags = MachineMemOperand::MOStore;
4625 Info.align = Align(8);
4626 return true;
4627 }
4628
4629 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4630 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4631 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4632 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4633 Info.opc = ISD::INTRINSIC_VOID;
4634 Info.memVT = MVT::v2f64;
4635 Info.ptrVal = I.getArgOperand(0);
4636 Info.offset = 0;
4637 Info.flags = MachineMemOperand::MOStore;
4638 Info.align = Align(16);
4639 return true;
4640 }
4641
4642 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:
4643 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:
4644 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {
4645 Info.opc = ISD::INTRINSIC_VOID;
4646 Info.memVT = MVT::i32;
4647 Info.ptrVal = I.getArgOperand(0);
4648 Info.offset = 0;
4649 Info.flags = MachineMemOperand::MOStore;
4650 Info.align = Align(4);
4651 return true;
4652 }
4653
4654 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:
4655 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:
4656 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {
4657 Info.opc = ISD::INTRINSIC_VOID;
4658 Info.memVT = MVT::v4i32;
4659 Info.ptrVal = I.getArgOperand(0);
4660 Info.offset = 0;
4661 Info.flags = MachineMemOperand::MOStore;
4662 Info.align = Align(16);
4663 return true;
4664 }
4665
4666 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4667 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4668 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4669 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4670 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4671 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4672 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4673 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4674 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4675 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4676 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4677 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4678 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4679 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4680 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4681 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4682 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4683 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4684 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4685 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4686 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4687 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4688 auto &DL = I.getDataLayout();
4689 Info.opc = ISD::INTRINSIC_W_CHAIN;
4690 Info.memVT = getValueType(DL, I.getType());
4691 Info.ptrVal = I.getArgOperand(0);
4692 Info.offset = 0;
4694 Info.align.reset();
4695 return true;
4696 }
4697
4698 case Intrinsic::nvvm_prefetch_tensormap: {
4699 auto &DL = I.getDataLayout();
4700 Info.opc = ISD::INTRINSIC_VOID;
4701 Info.memVT = getPointerTy(DL);
4702 Info.ptrVal = I.getArgOperand(0);
4703 Info.offset = 0;
4704 Info.flags =
4706 Info.align.reset();
4707 return true;
4708 }
4709
4710 case Intrinsic::nvvm_tensormap_replace_global_address:
4711 case Intrinsic::nvvm_tensormap_replace_global_stride: {
4712 Info.opc = ISD::INTRINSIC_VOID;
4713 Info.memVT = MVT::i64;
4714 Info.ptrVal = I.getArgOperand(0);
4715 Info.offset = 0;
4716 Info.flags = MachineMemOperand::MOStore;
4717 Info.align.reset();
4718 return true;
4719 }
4720
4721 case Intrinsic::nvvm_tensormap_replace_rank:
4722 case Intrinsic::nvvm_tensormap_replace_box_dim:
4723 case Intrinsic::nvvm_tensormap_replace_global_dim:
4724 case Intrinsic::nvvm_tensormap_replace_element_stride:
4725 case Intrinsic::nvvm_tensormap_replace_elemtype:
4726 case Intrinsic::nvvm_tensormap_replace_interleave_layout:
4727 case Intrinsic::nvvm_tensormap_replace_swizzle_mode:
4728 case Intrinsic::nvvm_tensormap_replace_swizzle_atomicity:
4729 case Intrinsic::nvvm_tensormap_replace_fill_mode: {
4730 Info.opc = ISD::INTRINSIC_VOID;
4731 Info.memVT = MVT::i32;
4732 Info.ptrVal = I.getArgOperand(0);
4733 Info.offset = 0;
4734 Info.flags = MachineMemOperand::MOStore;
4735 Info.align.reset();
4736 return true;
4737 }
4738
4739 case Intrinsic::nvvm_ldu_global_i:
4740 case Intrinsic::nvvm_ldu_global_f:
4741 case Intrinsic::nvvm_ldu_global_p: {
4742 Info.opc = ISD::INTRINSIC_W_CHAIN;
4743 Info.memVT = getValueType(I.getDataLayout(), I.getType());
4744 Info.ptrVal = I.getArgOperand(0);
4745 Info.offset = 0;
4746 Info.flags = MachineMemOperand::MOLoad;
4747 Info.align = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4748
4749 return true;
4750 }
4751 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4752 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4753 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4754 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4755 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4756 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4757 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4758 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4759 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4760 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4761 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4762 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4763 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4764 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4765 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4766 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4767 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4768 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4769 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4770 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4771 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4772 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4773 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4774 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4775 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4776 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4777 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4778 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4779 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4780 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4781 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4782 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4783 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4784 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4785 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4786 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4787 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4788 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4789 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4790 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4791 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4792 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4793 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4794 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4795 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4796 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4797 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4798 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4799 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4800 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4801 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4802 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4803 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4804 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4805 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4806 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4807 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4808 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4809 Info.opc = ISD::INTRINSIC_W_CHAIN;
4810 Info.memVT = MVT::v4f32;
4811 Info.ptrVal = nullptr;
4812 Info.offset = 0;
4813 Info.flags = MachineMemOperand::MOLoad;
4814 Info.align = Align(16);
4815 return true;
4816
4817 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4818 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4819 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4820 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4821 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4822 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4823 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4824 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4825 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4826 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4827 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4828 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4829 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4830 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4831 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4832 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4833 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4834 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4835 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4836 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4837 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4838 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4839 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4840 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4841 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4842 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4843 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4844 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4845 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4846 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4847 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4848 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4849 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4850 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4851 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4852 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4853 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4854 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4855 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4856 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4857 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4858 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4859 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4860 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4861 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4862 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4863 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4864 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4865 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4866 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4867 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4868 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4869 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4870 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4871 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4872 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4873 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4874 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4875 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4876 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4877 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4878 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4879 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4880 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4881 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4882 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4883 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4884 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4885 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4886 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4887 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4888 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4889 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4890 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4891 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4892 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4893 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4894 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4895 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4896 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4897 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4898 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4899 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4900 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4901 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4902 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4903 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4904 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4905 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4906 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4907 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4908 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4909 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4910 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4911 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4912 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4913 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4914 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4915 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4916 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4917 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4918 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4919 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4920 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4921 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4922 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4923 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4924 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4925 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4926 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4927 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4928 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4929 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4930 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4931 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4932 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4933 Info.opc = ISD::INTRINSIC_W_CHAIN;
4934 Info.memVT = MVT::v4i32;
4935 Info.ptrVal = nullptr;
4936 Info.offset = 0;
4937 Info.flags = MachineMemOperand::MOLoad;
4938 Info.align = Align(16);
4939 return true;
4940
4941 case Intrinsic::nvvm_suld_1d_i8_clamp:
4942 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4943 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4944 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4945 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4946 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4947 case Intrinsic::nvvm_suld_2d_i8_clamp:
4948 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4949 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4950 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4951 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4952 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4953 case Intrinsic::nvvm_suld_3d_i8_clamp:
4954 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4955 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4956 case Intrinsic::nvvm_suld_1d_i8_trap:
4957 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4958 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4959 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4960 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4961 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4962 case Intrinsic::nvvm_suld_2d_i8_trap:
4963 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4964 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4965 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4966 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4967 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4968 case Intrinsic::nvvm_suld_3d_i8_trap:
4969 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4970 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4971 case Intrinsic::nvvm_suld_1d_i8_zero:
4972 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4973 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4974 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4975 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4976 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4977 case Intrinsic::nvvm_suld_2d_i8_zero:
4978 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4979 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4980 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4981 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4982 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4983 case Intrinsic::nvvm_suld_3d_i8_zero:
4984 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4985 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4986 Info.opc = ISD::INTRINSIC_W_CHAIN;
4987 Info.memVT = MVT::i8;
4988 Info.ptrVal = nullptr;
4989 Info.offset = 0;
4990 Info.flags = MachineMemOperand::MOLoad;
4991 Info.align = Align(16);
4992 return true;
4993
4994 case Intrinsic::nvvm_suld_1d_i16_clamp:
4995 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4996 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4997 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4998 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4999 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
5000 case Intrinsic::nvvm_suld_2d_i16_clamp:
5001 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
5002 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
5003 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
5004 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
5005 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
5006 case Intrinsic::nvvm_suld_3d_i16_clamp:
5007 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
5008 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
5009 case Intrinsic::nvvm_suld_1d_i16_trap:
5010 case Intrinsic::nvvm_suld_1d_v2i16_trap:
5011 case Intrinsic::nvvm_suld_1d_v4i16_trap:
5012 case Intrinsic::nvvm_suld_1d_array_i16_trap:
5013 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
5014 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
5015 case Intrinsic::nvvm_suld_2d_i16_trap:
5016 case Intrinsic::nvvm_suld_2d_v2i16_trap:
5017 case Intrinsic::nvvm_suld_2d_v4i16_trap:
5018 case Intrinsic::nvvm_suld_2d_array_i16_trap:
5019 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
5020 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
5021 case Intrinsic::nvvm_suld_3d_i16_trap:
5022 case Intrinsic::nvvm_suld_3d_v2i16_trap:
5023 case Intrinsic::nvvm_suld_3d_v4i16_trap:
5024 case Intrinsic::nvvm_suld_1d_i16_zero:
5025 case Intrinsic::nvvm_suld_1d_v2i16_zero:
5026 case Intrinsic::nvvm_suld_1d_v4i16_zero:
5027 case Intrinsic::nvvm_suld_1d_array_i16_zero:
5028 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
5029 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
5030 case Intrinsic::nvvm_suld_2d_i16_zero:
5031 case Intrinsic::nvvm_suld_2d_v2i16_zero:
5032 case Intrinsic::nvvm_suld_2d_v4i16_zero:
5033 case Intrinsic::nvvm_suld_2d_array_i16_zero:
5034 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
5035 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
5036 case Intrinsic::nvvm_suld_3d_i16_zero:
5037 case Intrinsic::nvvm_suld_3d_v2i16_zero:
5038 case Intrinsic::nvvm_suld_3d_v4i16_zero:
5039 Info.opc = ISD::INTRINSIC_W_CHAIN;
5040 Info.memVT = MVT::i16;
5041 Info.ptrVal = nullptr;
5042 Info.offset = 0;
5043 Info.flags = MachineMemOperand::MOLoad;
5044 Info.align = Align(16);
5045 return true;
5046
5047 case Intrinsic::nvvm_suld_1d_i32_clamp:
5048 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
5049 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
5050 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
5051 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
5052 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
5053 case Intrinsic::nvvm_suld_2d_i32_clamp:
5054 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
5055 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
5056 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
5057 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
5058 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
5059 case Intrinsic::nvvm_suld_3d_i32_clamp:
5060 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
5061 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
5062 case Intrinsic::nvvm_suld_1d_i32_trap:
5063 case Intrinsic::nvvm_suld_1d_v2i32_trap:
5064 case Intrinsic::nvvm_suld_1d_v4i32_trap:
5065 case Intrinsic::nvvm_suld_1d_array_i32_trap:
5066 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
5067 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
5068 case Intrinsic::nvvm_suld_2d_i32_trap:
5069 case Intrinsic::nvvm_suld_2d_v2i32_trap:
5070 case Intrinsic::nvvm_suld_2d_v4i32_trap:
5071 case Intrinsic::nvvm_suld_2d_array_i32_trap:
5072 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
5073 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
5074 case Intrinsic::nvvm_suld_3d_i32_trap:
5075 case Intrinsic::nvvm_suld_3d_v2i32_trap:
5076 case Intrinsic::nvvm_suld_3d_v4i32_trap:
5077 case Intrinsic::nvvm_suld_1d_i32_zero:
5078 case Intrinsic::nvvm_suld_1d_v2i32_zero:
5079 case Intrinsic::nvvm_suld_1d_v4i32_zero:
5080 case Intrinsic::nvvm_suld_1d_array_i32_zero:
5081 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
5082 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
5083 case Intrinsic::nvvm_suld_2d_i32_zero:
5084 case Intrinsic::nvvm_suld_2d_v2i32_zero:
5085 case Intrinsic::nvvm_suld_2d_v4i32_zero:
5086 case Intrinsic::nvvm_suld_2d_array_i32_zero:
5087 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
5088 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
5089 case Intrinsic::nvvm_suld_3d_i32_zero:
5090 case Intrinsic::nvvm_suld_3d_v2i32_zero:
5091 case Intrinsic::nvvm_suld_3d_v4i32_zero:
5092 Info.opc = ISD::INTRINSIC_W_CHAIN;
5093 Info.memVT = MVT::i32;
5094 Info.ptrVal = nullptr;
5095 Info.offset = 0;
5096 Info.flags = MachineMemOperand::MOLoad;
5097 Info.align = Align(16);
5098 return true;
5099
5100 case Intrinsic::nvvm_suld_1d_i64_clamp:
5101 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
5102 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
5103 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
5104 case Intrinsic::nvvm_suld_2d_i64_clamp:
5105 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
5106 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
5107 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
5108 case Intrinsic::nvvm_suld_3d_i64_clamp:
5109 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
5110 case Intrinsic::nvvm_suld_1d_i64_trap:
5111 case Intrinsic::nvvm_suld_1d_v2i64_trap:
5112 case Intrinsic::nvvm_suld_1d_array_i64_trap:
5113 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
5114 case Intrinsic::nvvm_suld_2d_i64_trap:
5115 case Intrinsic::nvvm_suld_2d_v2i64_trap:
5116 case Intrinsic::nvvm_suld_2d_array_i64_trap:
5117 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
5118 case Intrinsic::nvvm_suld_3d_i64_trap:
5119 case Intrinsic::nvvm_suld_3d_v2i64_trap:
5120 case Intrinsic::nvvm_suld_1d_i64_zero:
5121 case Intrinsic::nvvm_suld_1d_v2i64_zero:
5122 case Intrinsic::nvvm_suld_1d_array_i64_zero:
5123 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
5124 case Intrinsic::nvvm_suld_2d_i64_zero:
5125 case Intrinsic::nvvm_suld_2d_v2i64_zero:
5126 case Intrinsic::nvvm_suld_2d_array_i64_zero:
5127 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
5128 case Intrinsic::nvvm_suld_3d_i64_zero:
5129 case Intrinsic::nvvm_suld_3d_v2i64_zero:
5130 Info.opc = ISD::INTRINSIC_W_CHAIN;
5131 Info.memVT = MVT::i64;
5132 Info.ptrVal = nullptr;
5133 Info.offset = 0;
5134 Info.flags = MachineMemOperand::MOLoad;
5135 Info.align = Align(16);
5136 return true;
5137
5138 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:
5139 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:
5140 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {
5141 Info.opc = ISD::INTRINSIC_W_CHAIN;
5142 Info.memVT = MVT::v1i32;
5143 Info.ptrVal = I.getArgOperand(0);
5144 Info.offset = 0;
5145 Info.flags = MachineMemOperand::MOLoad;
5146 Info.align.reset();
5147 return true;
5148 }
5149
5150 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
5151 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
5152 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
5153 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
5154 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_i32:
5155 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_i32: {
5156 Info.opc = ISD::INTRINSIC_W_CHAIN;
5157 Info.memVT = MVT::v2i32;
5158 Info.ptrVal = I.getArgOperand(0);
5159 Info.offset = 0;
5160 Info.flags = MachineMemOperand::MOLoad;
5161 Info.align.reset();
5162 return true;
5163 }
5164
5165 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x2_f32:
5166 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x2_f32: {
5167 Info.opc = ISD::INTRINSIC_W_CHAIN;
5168 Info.memVT = MVT::v2f32;
5169 Info.ptrVal = I.getArgOperand(0);
5170 Info.offset = 0;
5171 Info.flags = MachineMemOperand::MOLoad;
5172 Info.align.reset();
5173 return true;
5174 }
5175
5176 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
5177 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
5178 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
5179 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
5180 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
5181 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_i32:
5182 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_i32: {
5183 Info.opc = ISD::INTRINSIC_W_CHAIN;
5184 Info.memVT = MVT::v4i32;
5185 Info.ptrVal = I.getArgOperand(0);
5186 Info.offset = 0;
5187 Info.flags = MachineMemOperand::MOLoad;
5188 Info.align.reset();
5189 return true;
5190 }
5191
5192 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_f32:
5193 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_f32: {
5194 Info.opc = ISD::INTRINSIC_W_CHAIN;
5195 Info.memVT = MVT::v4f32;
5196 Info.ptrVal = I.getArgOperand(0);
5197 Info.offset = 0;
5198 Info.flags = MachineMemOperand::MOLoad;
5199 Info.align.reset();
5200 return true;
5201 }
5202
5203 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
5204 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
5205 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
5206 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
5207 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
5208 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_i32:
5209 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_i32: {
5210 Info.opc = ISD::INTRINSIC_W_CHAIN;
5211 Info.memVT = MVT::v8i32;
5212 Info.ptrVal = I.getArgOperand(0);
5213 Info.offset = 0;
5214 Info.flags = MachineMemOperand::MOLoad;
5215 Info.align.reset();
5216 return true;
5217 }
5218
5219 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_f32:
5220 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_f32: {
5221 Info.opc = ISD::INTRINSIC_W_CHAIN;
5222 Info.memVT = MVT::v8f32;
5223 Info.ptrVal = I.getArgOperand(0);
5224 Info.offset = 0;
5225 Info.flags = MachineMemOperand::MOLoad;
5226 Info.align.reset();
5227 return true;
5228 }
5229
5230 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
5231 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
5232 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
5233 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
5234 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
5235 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_i32:
5236 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_i32: {
5237 Info.opc = ISD::INTRINSIC_W_CHAIN;
5238 Info.memVT = MVT::v16i32;
5239 Info.ptrVal = I.getArgOperand(0);
5240 Info.offset = 0;
5241 Info.flags = MachineMemOperand::MOLoad;
5242 Info.align.reset();
5243 return true;
5244 }
5245
5246 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_f32:
5247 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_f32: {
5248 Info.opc = ISD::INTRINSIC_W_CHAIN;
5249 Info.memVT = MVT::v16f32;
5250 Info.ptrVal = I.getArgOperand(0);
5251 Info.offset = 0;
5252 Info.flags = MachineMemOperand::MOLoad;
5253 Info.align.reset();
5254 return true;
5255 }
5256
5257 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
5258 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
5259 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
5260 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
5261 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
5262 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_i32:
5263 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_i32: {
5264 Info.opc = ISD::INTRINSIC_W_CHAIN;
5265 Info.memVT = MVT::v32i32;
5266 Info.ptrVal = I.getArgOperand(0);
5267 Info.offset = 0;
5268 Info.flags = MachineMemOperand::MOLoad;
5269 Info.align.reset();
5270 return true;
5271 }
5272
5273 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_f32:
5274 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_f32: {
5275 Info.opc = ISD::INTRINSIC_W_CHAIN;
5276 Info.memVT = MVT::v32f32;
5277 Info.ptrVal = I.getArgOperand(0);
5278 Info.offset = 0;
5279 Info.flags = MachineMemOperand::MOLoad;
5280 Info.align.reset();
5281 return true;
5282 }
5283
5284 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
5285 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
5286 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
5287 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
5288 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
5289 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_i32:
5290 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_i32: {
5291 Info.opc = ISD::INTRINSIC_W_CHAIN;
5292 Info.memVT = MVT::v64i32;
5293 Info.ptrVal = I.getArgOperand(0);
5294 Info.offset = 0;
5295 Info.flags = MachineMemOperand::MOLoad;
5296 Info.align.reset();
5297 return true;
5298 }
5299
5300 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_f32:
5301 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_f32: {
5302 Info.opc = ISD::INTRINSIC_W_CHAIN;
5303 Info.memVT = MVT::v64f32;
5304 Info.ptrVal = I.getArgOperand(0);
5305 Info.offset = 0;
5306 Info.flags = MachineMemOperand::MOLoad;
5307 Info.align.reset();
5308 return true;
5309 }
5310
5311 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
5312 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
5313 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
5314 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
5315 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
5316 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_i32:
5317 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_i32: {
5318 Info.opc = ISD::INTRINSIC_W_CHAIN;
5319 Info.memVT = MVT::v128i32;
5320 Info.ptrVal = I.getArgOperand(0);
5321 Info.offset = 0;
5322 Info.flags = MachineMemOperand::MOLoad;
5323 Info.align.reset();
5324 return true;
5325 }
5326
5327 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_f32:
5328 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_f32: {
5329 Info.opc = ISD::INTRINSIC_W_CHAIN;
5330 Info.memVT = MVT::v128f32;
5331 Info.ptrVal = I.getArgOperand(0);
5332 Info.offset = 0;
5333 Info.flags = MachineMemOperand::MOLoad;
5334 Info.align.reset();
5335 return true;
5336 }
5337
5338 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
5339 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
5340 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {
5341 Info.opc = ISD::INTRINSIC_VOID;
5342 Info.memVT = MVT::i32;
5343 Info.ptrVal = I.getArgOperand(0);
5344 Info.offset = 0;
5345 Info.flags = MachineMemOperand::MOStore;
5346 Info.align.reset();
5347 return true;
5348 }
5349
5350 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
5351 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
5352 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
5353 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {
5354 Info.opc = ISD::INTRINSIC_VOID;
5355 Info.memVT = MVT::v2i32;
5356 Info.ptrVal = I.getArgOperand(0);
5357 Info.offset = 0;
5358 Info.flags = MachineMemOperand::MOStore;
5359 Info.align.reset();
5360 return true;
5361 }
5362
5363 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
5364 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
5365 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
5366 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
5367 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {
5368 Info.opc = ISD::INTRINSIC_VOID;
5369 Info.memVT = MVT::v4i32;
5370 Info.ptrVal = I.getArgOperand(0);
5371 Info.offset = 0;
5372 Info.flags = MachineMemOperand::MOStore;
5373 Info.align.reset();
5374 return true;
5375 }
5376
5377 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
5378 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
5379 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
5380 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
5381 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {
5382 Info.opc = ISD::INTRINSIC_VOID;
5383 Info.memVT = MVT::v8i32;
5384 Info.ptrVal = I.getArgOperand(0);
5385 Info.offset = 0;
5386 Info.flags = MachineMemOperand::MOStore;
5387 Info.align.reset();
5388 return true;
5389 }
5390
5391 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
5392 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
5393 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
5394 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
5395 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {
5396 Info.opc = ISD::INTRINSIC_VOID;
5397 Info.memVT = MVT::v16i32;
5398 Info.ptrVal = I.getArgOperand(0);
5399 Info.offset = 0;
5400 Info.flags = MachineMemOperand::MOStore;
5401 Info.align.reset();
5402 return true;
5403 }
5404
5405 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
5406 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
5407 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
5408 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
5409 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {
5410 Info.opc = ISD::INTRINSIC_VOID;
5411 Info.memVT = MVT::v32i32;
5412 Info.ptrVal = I.getArgOperand(0);
5413 Info.offset = 0;
5414 Info.flags = MachineMemOperand::MOStore;
5415 Info.align.reset();
5416 return true;
5417 }
5418
5419 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
5420 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
5421 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
5422 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
5423 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {
5424 Info.opc = ISD::INTRINSIC_VOID;
5425 Info.memVT = MVT::v64i32;
5426 Info.ptrVal = I.getArgOperand(0);
5427 Info.offset = 0;
5428 Info.flags = MachineMemOperand::MOStore;
5429 Info.align.reset();
5430 return true;
5431 }
5432
5433 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
5434 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
5435 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
5436 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
5437 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {
5438 Info.opc = ISD::INTRINSIC_VOID;
5439 Info.memVT = MVT::v128i32;
5440 Info.ptrVal = I.getArgOperand(0);
5441 Info.offset = 0;
5442 Info.flags = MachineMemOperand::MOStore;
5443 Info.align.reset();
5444 return true;
5445 }
5446 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
5447 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
5448 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
5449 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
5450 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
5451 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
5452 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
5453 case Intrinsic::
5454 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
5455 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
5456 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
5457 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
5458 case Intrinsic::
5459 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: {
5460 // We are reading and writing back to TMem
5461 Info.opc = ISD::INTRINSIC_VOID;
5462 Info.memVT = MVT::v4i32;
5463 Info.ptrVal = I.getArgOperand(0);
5464 Info.offset = 0;
5466 Info.align = Align(16);
5467 return true;
5468 }
5469
5470 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
5471 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
5472 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
5473 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
5474 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
5475 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
5476 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
5477 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
5478 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
5479 case Intrinsic::
5480 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
5481 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
5482 case Intrinsic::
5483 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: {
5484 // We are reading and writing back to TMem
5485 Info.opc = ISD::INTRINSIC_VOID;
5486 Info.memVT = MVT::v8i32;
5487 Info.ptrVal = I.getArgOperand(0);
5488 Info.offset = 0;
5490 Info.align = Align(16);
5491 return true;
5492 }
5493 }
5494 return false;
5495}
5496
5497/// getFunctionParamOptimizedAlign - since function arguments are passed via
5498/// .param space, we may want to increase their alignment in a way that
5499/// ensures that we can effectively vectorize their loads & stores. We can
5500/// increase alignment only if the function has internal or has private
5501/// linkage as for other linkage types callers may already rely on default
5502/// alignment. To allow using 128-bit vectorized loads/stores, this function
5503/// ensures that alignment is 16 or greater.
5505 const Function *F, Type *ArgTy, const DataLayout &DL) const {
5506 // Capping the alignment to 128 bytes as that is the maximum alignment
5507 // supported by PTX.
5508 const Align ABITypeAlign = std::min(Align(128), DL.getABITypeAlign(ArgTy));
5509
5510 // If a function has linkage different from internal or private, we
5511 // must use default ABI alignment as external users rely on it. Same
5512 // for a function that may be called from a function pointer.
5513 if (!F || !F->hasLocalLinkage() ||
5514 F->hasAddressTaken(/*Users=*/nullptr,
5515 /*IgnoreCallbackUses=*/false,
5516 /*IgnoreAssumeLikeCalls=*/true,
5517 /*IgnoreLLVMUsed=*/true))
5518 return ABITypeAlign;
5519
5520 assert(!isKernelFunction(*F) && "Expect kernels to have non-local linkage");
5521 return std::max(Align(16), ABITypeAlign);
5522}
5523
5524/// Helper for computing alignment of a device function byval parameter.
5526 const Function *F, Type *ArgTy, Align InitialAlign,
5527 const DataLayout &DL) const {
5528 Align ArgAlign = InitialAlign;
5529 // Try to increase alignment to enhance vectorization options.
5530 if (F)
5531 ArgAlign = std::max(ArgAlign, getFunctionParamOptimizedAlign(F, ArgTy, DL));
5532
5533 // Old ptx versions have a bug. When PTX code takes address of
5534 // byval parameter with alignment < 4, ptxas generates code to
5535 // spill argument into memory. Alas on sm_50+ ptxas generates
5536 // SASS code that fails with misaligned access. To work around
5537 // the problem, make sure that we align byval parameters by at
5538 // least 4. This bug seems to be fixed at least starting from
5539 // ptxas > 9.0.
5540 // TODO: remove this after verifying the bug is not reproduced
5541 // on non-deprecated ptxas versions.
5543 ArgAlign = std::max(ArgAlign, Align(4));
5544
5545 return ArgAlign;
5546}
5547
5548// Helper for getting a function parameter name. Name is composed from
5549// its index and the function name. Negative index corresponds to special
5550// parameter (unsized array) used for passing variable arguments.
5552 int Idx) const {
5553 std::string ParamName;
5554 raw_string_ostream ParamStr(ParamName);
5555
5556 ParamStr << getTargetMachine().getSymbol(F)->getName();
5557 if (Idx < 0)
5558 ParamStr << "_vararg";
5559 else
5560 ParamStr << "_param_" << Idx;
5561
5562 return ParamName;
5563}
5564
5565/// isLegalAddressingMode - Return true if the addressing mode represented
5566/// by AM is legal for this target, for a load/store of the specified type.
5567/// Used to guide target specific optimizations, like loop strength reduction
5568/// (LoopStrengthReduce.cpp) and memory optimization for address mode
5569/// (CodeGenPrepare.cpp)
5571 const AddrMode &AM, Type *Ty,
5572 unsigned AS, Instruction *I) const {
5573 // AddrMode - This represents an addressing mode of:
5574 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
5575 //
5576 // The legal address modes are
5577 // - [avar]
5578 // - [areg]
5579 // - [areg+immoff]
5580 // - [immAddr]
5581
5582 // immoff must fit in a signed 32-bit int
5583 if (!APInt(64, AM.BaseOffs).isSignedIntN(32))
5584 return false;
5585
5586 if (AM.BaseGV)
5587 return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
5588
5589 switch (AM.Scale) {
5590 case 0: // "r", "r+i" or "i" is allowed
5591 break;
5592 case 1:
5593 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
5594 return false;
5595 // Otherwise we have r+i.
5596 break;
5597 default:
5598 // No scale > 1 is allowed
5599 return false;
5600 }
5601 return true;
5602}
5603
5604//===----------------------------------------------------------------------===//
5605// NVPTX Inline Assembly Support
5606//===----------------------------------------------------------------------===//
5607
5608/// getConstraintType - Given a constraint letter, return the type of
5609/// constraint it is for this target.
5612 if (Constraint.size() == 1) {
5613 switch (Constraint[0]) {
5614 default:
5615 break;
5616 case 'b':
5617 case 'r':
5618 case 'h':
5619 case 'c':
5620 case 'l':
5621 case 'f':
5622 case 'd':
5623 case 'q':
5624 case '0':
5625 case 'N':
5626 return C_RegisterClass;
5627 }
5628 }
5629 return TargetLowering::getConstraintType(Constraint);
5630}
5631
5632std::pair<unsigned, const TargetRegisterClass *>
5634 StringRef Constraint,
5635 MVT VT) const {
5636 if (Constraint.size() == 1) {
5637 switch (Constraint[0]) {
5638 case 'b':
5639 return std::make_pair(0U, &NVPTX::B1RegClass);
5640 case 'c':
5641 case 'h':
5642 return std::make_pair(0U, &NVPTX::B16RegClass);
5643 case 'r':
5644 case 'f':
5645 return std::make_pair(0U, &NVPTX::B32RegClass);
5646 case 'l':
5647 case 'N':
5648 case 'd':
5649 return std::make_pair(0U, &NVPTX::B64RegClass);
5650 case 'q': {
5651 if (STI.getSmVersion() < 70)
5652 report_fatal_error("Inline asm with 128 bit operands is only "
5653 "supported for sm_70 and higher!");
5654 return std::make_pair(0U, &NVPTX::B128RegClass);
5655 }
5656 }
5657 }
5658 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5659}
5660
5661//===----------------------------------------------------------------------===//
5662// NVPTX DAG Combining
5663//===----------------------------------------------------------------------===//
5664
5666 CodeGenOptLevel OptLevel) const {
5667 // Always honor command-line argument
5668 if (FMAContractLevelOpt.getNumOccurrences() > 0)
5669 return FMAContractLevelOpt > 0;
5670
5671 // Do not contract if we're not optimizing the code.
5672 if (OptLevel == CodeGenOptLevel::None)
5673 return false;
5674
5675 // Honor TargetOptions flags that explicitly say fusion is okay.
5677 return true;
5678
5679 return false;
5680}
5681
5682static bool isConstZero(const SDValue &Operand) {
5683 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5684 return Const && Const->getZExtValue() == 0;
5685}
5686
5687/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
5688/// operands N0 and N1. This is a helper for PerformADDCombine that is
5689/// called with the default operands, and if that fails, with commuted
5690/// operands.
5691static SDValue
5694 EVT VT = N0.getValueType();
5695
5696 // Since integer multiply-add costs the same as integer multiply
5697 // but is more costly than integer add, do the fusion only when
5698 // the mul is only used in the add.
5699 // TODO: this may not be true for later architectures, consider relaxing this
5700 if (!N0.getNode()->hasOneUse())
5701 return SDValue();
5702
5703 // fold (add (select cond, 0, (mul a, b)), c)
5704 // -> (select cond, c, (add (mul a, b), c))
5705 //
5706 if (N0.getOpcode() == ISD::SELECT) {
5707 unsigned ZeroOpNum;
5708 if (isConstZero(N0->getOperand(1)))
5709 ZeroOpNum = 1;
5710 else if (isConstZero(N0->getOperand(2)))
5711 ZeroOpNum = 2;
5712 else
5713 return SDValue();
5714
5715 SDValue M = N0->getOperand((ZeroOpNum == 1) ? 2 : 1);
5716 if (M->getOpcode() != ISD::MUL || !M.getNode()->hasOneUse())
5717 return SDValue();
5718
5719 SDLoc DL(N);
5720 SDValue Mul =
5721 DCI.DAG.getNode(ISD::MUL, DL, VT, M->getOperand(0), M->getOperand(1));
5722 SDValue MAD = DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, N1);
5723 return DCI.DAG.getSelect(SDLoc(N), VT, N0->getOperand(0),
5724 ((ZeroOpNum == 1) ? N1 : MAD),
5725 ((ZeroOpNum == 1) ? MAD : N1));
5726 }
5727
5728 return SDValue();
5729}
5730
5731static SDValue
5734 CodeGenOptLevel OptLevel) {
5735 EVT VT = N0.getValueType();
5736 if (N0.getOpcode() == ISD::FMUL) {
5737 const auto *TLI = static_cast<const NVPTXTargetLowering *>(
5738 &DCI.DAG.getTargetLoweringInfo());
5739 if (!(TLI->allowFMA(DCI.DAG.getMachineFunction(), OptLevel) ||
5740 (N->getFlags().hasAllowContract() &&
5741 N0->getFlags().hasAllowContract())))
5742 return SDValue();
5743
5744 // For floating point:
5745 // Do the fusion only when the mul has less than 5 uses and all
5746 // are add.
5747 // The heuristic is that if a use is not an add, then that use
5748 // cannot be fused into fma, therefore mul is still needed anyway.
5749 // If there are more than 4 uses, even if they are all add, fusing
5750 // them will increase register pressue.
5751 //
5752 int numUses = 0;
5753 int nonAddCount = 0;
5754 for (const SDNode *User : N0.getNode()->users()) {
5755 numUses++;
5756 if (User->getOpcode() != ISD::FADD)
5757 ++nonAddCount;
5758 if (numUses >= 5)
5759 return SDValue();
5760 }
5761 if (nonAddCount) {
5762 int orderNo = N->getIROrder();
5763 int orderNo2 = N0.getNode()->getIROrder();
5764 // simple heuristics here for considering potential register
5765 // pressure, the logics here is that the differnce are used
5766 // to measure the distance between def and use, the longer distance
5767 // more likely cause register pressure.
5768 if (orderNo - orderNo2 < 500)
5769 return SDValue();
5770
5771 // Now, check if at least one of the FMUL's operands is live beyond the
5772 // node N, which guarantees that the FMA will not increase register
5773 // pressure at node N.
5774 bool opIsLive = false;
5775 const SDNode *left = N0.getOperand(0).getNode();
5776 const SDNode *right = N0.getOperand(1).getNode();
5777
5778 if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
5779 opIsLive = true;
5780
5781 if (!opIsLive)
5782 for (const SDNode *User : left->users()) {
5783 int orderNo3 = User->getIROrder();
5784 if (orderNo3 > orderNo) {
5785 opIsLive = true;
5786 break;
5787 }
5788 }
5789
5790 if (!opIsLive)
5791 for (const SDNode *User : right->users()) {
5792 int orderNo3 = User->getIROrder();
5793 if (orderNo3 > orderNo) {
5794 opIsLive = true;
5795 break;
5796 }
5797 }
5798
5799 if (!opIsLive)
5800 return SDValue();
5801 }
5802
5803 return DCI.DAG.getNode(ISD::FMA, SDLoc(N), VT, N0.getOperand(0),
5804 N0.getOperand(1), N1);
5805 }
5806
5807 return SDValue();
5808}
5809
5810/// Fold unpacking movs into a load by increasing the number of return values.
5811///
5812/// ex:
5813/// L: v2f16,ch = load <p>
5814/// a: f16 = extractelt L:0, 0
5815/// b: f16 = extractelt L:0, 1
5816/// use(a, b)
5817///
5818/// ...is turned into...
5819///
5820/// L: f16,f16,ch = LoadV2 <p>
5821/// use(L:0, L:1)
5822static SDValue
5824 // Don't run this optimization before the legalizer
5825 if (!DCI.isAfterLegalizeDAG())
5826 return SDValue();
5827
5828 EVT ElementVT = N->getValueType(0);
5829 // Avoid non-packed types and v4i8
5830 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5831 return SDValue();
5832
5833 // Check whether all outputs are either used by an extractelt or are
5834 // glue/chain nodes
5835 if (!all_of(N->uses(), [&](SDUse &U) {
5836 // Skip glue, chain nodes
5837 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)
5838 return true;
5839 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5840 if (N->getOpcode() != ISD::LOAD)
5841 return true;
5842 // Since this is an ISD::LOAD, check all extractelts are used. If
5843 // any are not used, we don't want to defeat another optimization that
5844 // will narrow the load.
5845 //
5846 // For example:
5847 //
5848 // L: v2f16,ch = load <p>
5849 // e0: f16 = extractelt L:0, 0
5850 // e1: f16 = extractelt L:0, 1 <-- unused
5851 // store e0
5852 //
5853 // Can be optimized by DAGCombiner to:
5854 //
5855 // L: f16,ch = load <p>
5856 // store L:0
5857 return !U.getUser()->use_empty();
5858 }
5859
5860 // Otherwise, this use prevents us from splitting a value.
5861 return false;
5862 }))
5863 return SDValue();
5864
5865 auto *LD = cast<MemSDNode>(N);
5866 SDLoc DL(LD);
5867
5868 // the new opcode after we double the number of operands
5869 unsigned Opcode;
5870 SmallVector<SDValue> Operands(LD->ops());
5871 unsigned OldNumOutputs; // non-glue, non-chain outputs
5872 switch (LD->getOpcode()) {
5873 case ISD::LOAD:
5874 OldNumOutputs = 1;
5875 // Any packed type is legal, so the legalizer will not have lowered
5876 // ISD::LOAD -> NVPTXISD::Load (unless it's under-aligned). We have to do it
5877 // here.
5878 Opcode = NVPTXISD::LoadV2;
5879 // append a "full" used bytes mask operand right before the extension type
5880 // operand, signifying that all bytes are used.
5881 Operands.push_back(DCI.DAG.getConstant(UINT32_MAX, DL, MVT::i32));
5882 Operands.push_back(DCI.DAG.getIntPtrConstant(
5883 cast<LoadSDNode>(LD)->getExtensionType(), DL));
5884 break;
5885 case NVPTXISD::LoadV2:
5886 OldNumOutputs = 2;
5887 Opcode = NVPTXISD::LoadV4;
5888 break;
5889 case NVPTXISD::LoadV4:
5890 // V8 is only supported for f32/i32. Don't forget, we're not changing the
5891 // load size here. This is already a 256-bit load.
5892 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5893 return SDValue();
5894 OldNumOutputs = 4;
5895 Opcode = NVPTXISD::LoadV8;
5896 break;
5897 case NVPTXISD::LoadV8:
5898 // PTX doesn't support the next doubling of outputs
5899 return SDValue();
5900 }
5901
5902 // the non-glue, non-chain outputs in the new load
5903 const unsigned NewNumOutputs = OldNumOutputs * 2;
5904 SmallVector<EVT> NewVTs(NewNumOutputs, ElementVT.getVectorElementType());
5905 // add remaining chain and glue values
5906 NewVTs.append(LD->value_begin() + OldNumOutputs, LD->value_end());
5907
5908 // Create the new load
5909 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
5910 Opcode, DL, DCI.DAG.getVTList(NewVTs), Operands, LD->getMemoryVT(),
5911 LD->getMemOperand());
5912
5913 // Now we use a combination of BUILD_VECTORs and a MERGE_VALUES node to keep
5914 // the outputs the same. These nodes will be optimized away in later
5915 // DAGCombiner iterations.
5917 for (unsigned I : seq(OldNumOutputs))
5918 Results.push_back(DCI.DAG.getBuildVector(
5919 ElementVT, DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));
5920 // Add remaining chain and glue nodes
5921 for (unsigned I : seq(NewLoad->getNumValues() - NewNumOutputs))
5922 Results.push_back(NewLoad.getValue(NewNumOutputs + I));
5923
5924 return DCI.DAG.getMergeValues(Results, DL);
5925}
5926
5927/// Fold packing movs into a store.
5928///
5929/// ex:
5930/// v1: v2f16 = BUILD_VECTOR a:f16, b:f16
5931/// v2: v2f16 = BUILD_VECTOR c:f16, d:f16
5932/// StoreV2 v1, v2
5933///
5934/// ...is turned into...
5935///
5936/// StoreV4 a, b, c, d
5939 unsigned Front, unsigned Back) {
5940 // We want to run this as late as possible since other optimizations may
5941 // eliminate the BUILD_VECTORs.
5942 if (!DCI.isAfterLegalizeDAG())
5943 return SDValue();
5944
5945 // Get the type of the operands being stored.
5946 EVT ElementVT = N->getOperand(Front).getValueType();
5947
5948 // Avoid non-packed types and v4i8
5949 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5950 return SDValue();
5951
5952 auto *ST = cast<MemSDNode>(N);
5953
5954 // The new opcode after we double the number of operands.
5955 unsigned Opcode;
5956 switch (N->getOpcode()) {
5957 case ISD::STORE:
5958 // Any packed type is legal, so the legalizer will not have lowered
5959 // ISD::STORE -> NVPTXISD::Store (unless it's under-aligned). We have to do
5960 // it here.
5961 Opcode = NVPTXISD::StoreV2;
5962 break;
5963 case NVPTXISD::StoreV2:
5964 Opcode = NVPTXISD::StoreV4;
5965 break;
5966 case NVPTXISD::StoreV4:
5967 // V8 is only supported for f32/i32. Don't forget, we're not changing the
5968 // store size here. This is already a 256-bit store.
5969 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5970 return SDValue();
5971 Opcode = NVPTXISD::StoreV8;
5972 break;
5973 case NVPTXISD::StoreV8:
5974 // PTX doesn't support the next doubling of operands
5975 return SDValue();
5976 default:
5977 llvm_unreachable("Unhandled store opcode");
5978 }
5979
5980 // Scan the operands and if they're all BUILD_VECTORs, we'll have gathered
5981 // their elements.
5982 SmallVector<SDValue, 4> Operands(N->ops().take_front(Front));
5983 for (SDValue BV : N->ops().drop_front(Front).drop_back(Back)) {
5984 if (BV.getOpcode() != ISD::BUILD_VECTOR)
5985 return SDValue();
5986
5987 // If the operand has multiple uses, this optimization can increase register
5988 // pressure.
5989 if (!BV.hasOneUse())
5990 return SDValue();
5991
5992 // DAGCombiner visits nodes bottom-up. Check the BUILD_VECTOR operands for
5993 // any signs they may be folded by some other pattern or rule.
5994 for (SDValue Op : BV->ops()) {
5995 // Peek through bitcasts
5996 if (Op.getOpcode() == ISD::BITCAST)
5997 Op = Op.getOperand(0);
5998
5999 // This may be folded into a PRMT.
6000 if (Op.getValueType() == MVT::i16 && Op.getOpcode() == ISD::TRUNCATE &&
6001 Op->getOperand(0).getValueType() == MVT::i32)
6002 return SDValue();
6003
6004 // This may be folded into cvt.bf16x2
6005 if (Op.getOpcode() == ISD::FP_ROUND)
6006 return SDValue();
6007 }
6008 Operands.append({BV.getOperand(0), BV.getOperand(1)});
6009 }
6010 Operands.append(N->op_end() - Back, N->op_end());
6011
6012 // Now we replace the store
6013 return DCI.DAG.getMemIntrinsicNode(Opcode, SDLoc(N), N->getVTList(), Operands,
6014 ST->getMemoryVT(), ST->getMemOperand());
6015}
6016
6018 const NVPTXSubtarget &STI) {
6019
6020 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::STORE) {
6021 // Here is our chance to custom lower a store with a non-simple type.
6022 // Unfortunately, we can't do this in the legalizer because there is no
6023 // way to setOperationAction for an non-simple type.
6025 if (!ST->getValue().getValueType().isSimple())
6026 return lowerSTOREVector(SDValue(ST, 0), DCI.DAG, STI);
6027 }
6028
6029 return combinePackingMovIntoStore(N, DCI, 1, 2);
6030}
6031
6033 const NVPTXSubtarget &STI) {
6034 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::LOAD) {
6035 // Here is our chance to custom lower a load with a non-simple type.
6036 // Unfortunately, we can't do this in the legalizer because there is no
6037 // way to setOperationAction for an non-simple type.
6038 if (!N->getValueType(0).isSimple())
6039 return lowerLoadVector(N, DCI.DAG, STI);
6040 }
6041
6042 return combineUnpackingMovIntoLoad(N, DCI);
6043}
6044
6045/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
6046///
6049 CodeGenOptLevel OptLevel) {
6050 if (OptLevel == CodeGenOptLevel::None)
6051 return SDValue();
6052
6053 SDValue N0 = N->getOperand(0);
6054 SDValue N1 = N->getOperand(1);
6055
6056 // Skip non-integer, non-scalar case
6057 EVT VT = N0.getValueType();
6058 if (VT.isVector() || VT != MVT::i32)
6059 return SDValue();
6060
6061 // First try with the default operand order.
6062 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI))
6063 return Result;
6064
6065 // If that didn't work, try again with the operands commuted.
6066 return PerformADDCombineWithOperands(N, N1, N0, DCI);
6067}
6068
6069/// PerformFADDCombine - Target-specific dag combine xforms for ISD::FADD.
6070///
6073 CodeGenOptLevel OptLevel) {
6074 SDValue N0 = N->getOperand(0);
6075 SDValue N1 = N->getOperand(1);
6076
6077 EVT VT = N0.getValueType();
6078 if (VT.isVector() || !(VT == MVT::f32 || VT == MVT::f64))
6079 return SDValue();
6080
6081 // First try with the default operand order.
6082 if (SDValue Result = PerformFADDCombineWithOperands(N, N0, N1, DCI, OptLevel))
6083 return Result;
6084
6085 // If that didn't work, try again with the operands commuted.
6086 return PerformFADDCombineWithOperands(N, N1, N0, DCI, OptLevel);
6087}
6088
6089/// Get 3-input version of a 2-input min/max opcode
6090static unsigned getMinMax3Opcode(unsigned MinMax2Opcode) {
6091 switch (MinMax2Opcode) {
6092 case ISD::FMAXNUM:
6093 case ISD::FMAXIMUMNUM:
6094 return NVPTXISD::FMAXNUM3;
6095 case ISD::FMINNUM:
6096 case ISD::FMINIMUMNUM:
6097 return NVPTXISD::FMINNUM3;
6098 case ISD::FMAXIMUM:
6099 return NVPTXISD::FMAXIMUM3;
6100 case ISD::FMINIMUM:
6101 return NVPTXISD::FMINIMUM3;
6102 default:
6103 llvm_unreachable("Invalid 2-input min/max opcode");
6104 }
6105}
6106
6107/// PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into
6108/// (fmaxnum3 a, b, c). Also covers other llvm min/max intrinsics.
6111 unsigned PTXVersion, unsigned SmVersion) {
6112
6113 // 3-input min/max requires PTX 8.8+ and SM_100+, and only supports f32s
6114 EVT VT = N->getValueType(0);
6115 if (VT != MVT::f32 || PTXVersion < 88 || SmVersion < 100)
6116 return SDValue();
6117
6118 SDValue Op0 = N->getOperand(0);
6119 SDValue Op1 = N->getOperand(1);
6120 unsigned MinMaxOp2 = N->getOpcode();
6121 unsigned MinMaxOp3 = getMinMax3Opcode(MinMaxOp2);
6122
6123 if (Op0.getOpcode() == MinMaxOp2 && Op0.hasOneUse()) {
6124 // (maxnum (maxnum a, b), c) -> (maxnum3 a, b, c)
6125 SDValue A = Op0.getOperand(0);
6126 SDValue B = Op0.getOperand(1);
6127 SDValue C = Op1;
6128 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
6129 } else if (Op1.getOpcode() == MinMaxOp2 && Op1.hasOneUse()) {
6130 // (maxnum a, (maxnum b, c)) -> (maxnum3 a, b, c)
6131 SDValue A = Op0;
6132 SDValue B = Op1.getOperand(0);
6133 SDValue C = Op1.getOperand(1);
6134 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
6135 }
6136 return SDValue();
6137}
6138
6141 CodeGenOptLevel OptLevel) {
6142 assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
6143
6144 // Don't do anything at less than -O2.
6145 if (OptLevel < CodeGenOptLevel::Default)
6146 return SDValue();
6147
6148 SelectionDAG &DAG = DCI.DAG;
6149 SDLoc DL(N);
6150 EVT VT = N->getValueType(0);
6151 bool IsSigned = N->getOpcode() == ISD::SREM;
6152 unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
6153
6154 const SDValue &Num = N->getOperand(0);
6155 const SDValue &Den = N->getOperand(1);
6156
6157 for (const SDNode *U : Num->users()) {
6158 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
6159 U->getOperand(1) == Den) {
6160 // Num % Den -> Num - (Num / Den) * Den
6161 return DAG.getNode(ISD::SUB, DL, VT, Num,
6162 DAG.getNode(ISD::MUL, DL, VT,
6163 DAG.getNode(DivOpc, DL, VT, Num, Den),
6164 Den));
6165 }
6166 }
6167 return SDValue();
6168}
6169
6170// (sign_extend|zero_extend (mul|shl) x, y) -> (mul.wide x, y)
6172 CodeGenOptLevel OptLevel) {
6173 if (OptLevel == CodeGenOptLevel::None)
6174 return SDValue();
6175
6176 SDValue Op = N->getOperand(0);
6177 if (!Op.hasOneUse())
6178 return SDValue();
6179 EVT ToVT = N->getValueType(0);
6180 EVT FromVT = Op.getValueType();
6181 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
6182 (ToVT == MVT::i64 && FromVT == MVT::i32)))
6183 return SDValue();
6184 if (!(Op.getOpcode() == ISD::MUL ||
6185 (Op.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Op.getOperand(1)))))
6186 return SDValue();
6187
6188 SDLoc DL(N);
6189 unsigned ExtOpcode = N->getOpcode();
6190 unsigned Opcode = 0;
6191 if (ExtOpcode == ISD::SIGN_EXTEND && Op->getFlags().hasNoSignedWrap())
6192 Opcode = NVPTXISD::MUL_WIDE_SIGNED;
6193 else if (ExtOpcode == ISD::ZERO_EXTEND && Op->getFlags().hasNoUnsignedWrap())
6194 Opcode = NVPTXISD::MUL_WIDE_UNSIGNED;
6195 else
6196 return SDValue();
6197 SDValue RHS = Op.getOperand(1);
6198 if (Op.getOpcode() == ISD::SHL) {
6199 const auto ShiftAmt = Op.getConstantOperandVal(1);
6200 const auto MulVal = APInt(FromVT.getSizeInBits(), 1) << ShiftAmt;
6201 RHS = DCI.DAG.getConstant(MulVal, DL, FromVT);
6202 }
6203 return DCI.DAG.getNode(Opcode, DL, ToVT, Op.getOperand(0), RHS);
6204}
6205
6211
6212/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
6213/// that can be demoted to \p OptSize bits without loss of information. The
6214/// signedness of the operand, if determinable, is placed in \p S.
6216 unsigned OptSize,
6217 OperandSignedness &S) {
6218 S = Unknown;
6219
6220 if (Op.getOpcode() == ISD::SIGN_EXTEND ||
6221 Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
6222 EVT OrigVT = Op.getOperand(0).getValueType();
6223 if (OrigVT.getFixedSizeInBits() <= OptSize) {
6224 S = Signed;
6225 return true;
6226 }
6227 } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
6228 EVT OrigVT = Op.getOperand(0).getValueType();
6229 if (OrigVT.getFixedSizeInBits() <= OptSize) {
6230 S = Unsigned;
6231 return true;
6232 }
6233 }
6234
6235 return false;
6236}
6237
6238/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
6239/// be demoted to \p OptSize bits without loss of information. If the operands
6240/// contain a constant, it should appear as the RHS operand. The signedness of
6241/// the operands is placed in \p IsSigned.
6243 unsigned OptSize,
6244 bool &IsSigned) {
6245 OperandSignedness LHSSign;
6246
6247 // The LHS operand must be a demotable op
6248 if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
6249 return false;
6250
6251 // We should have been able to determine the signedness from the LHS
6252 if (LHSSign == Unknown)
6253 return false;
6254
6255 IsSigned = (LHSSign == Signed);
6256
6257 // The RHS can be a demotable op or a constant
6259 const APInt &Val = CI->getAPIntValue();
6260 if (LHSSign == Unsigned) {
6261 return Val.isIntN(OptSize);
6262 } else {
6263 return Val.isSignedIntN(OptSize);
6264 }
6265 } else {
6266 OperandSignedness RHSSign;
6267 if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
6268 return false;
6269
6270 return LHSSign == RHSSign;
6271 }
6272}
6273
6274/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
6275/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
6276/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
6277/// amount.
6280 EVT MulType = N->getValueType(0);
6281 if (MulType != MVT::i32 && MulType != MVT::i64) {
6282 return SDValue();
6283 }
6284
6285 SDLoc DL(N);
6286 unsigned OptSize = MulType.getSizeInBits() >> 1;
6287 SDValue LHS = N->getOperand(0);
6288 SDValue RHS = N->getOperand(1);
6289
6290 // Canonicalize the multiply so the constant (if any) is on the right
6291 if (N->getOpcode() == ISD::MUL) {
6292 if (isa<ConstantSDNode>(LHS)) {
6293 std::swap(LHS, RHS);
6294 }
6295 }
6296
6297 // If we have a SHL, determine the actual multiply amount
6298 if (N->getOpcode() == ISD::SHL) {
6300 if (!ShlRHS) {
6301 return SDValue();
6302 }
6303
6304 APInt ShiftAmt = ShlRHS->getAPIntValue();
6305 unsigned BitWidth = MulType.getSizeInBits();
6306 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
6307 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
6308 RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
6309 } else {
6310 return SDValue();
6311 }
6312 }
6313
6314 bool Signed;
6315 // Verify that our operands are demotable
6316 if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
6317 return SDValue();
6318 }
6319
6320 EVT DemotedVT;
6321 if (MulType == MVT::i32) {
6322 DemotedVT = MVT::i16;
6323 } else {
6324 DemotedVT = MVT::i32;
6325 }
6326
6327 // Truncate the operands to the correct size. Note that these are just for
6328 // type consistency and will (likely) be eliminated in later phases.
6329 SDValue TruncLHS =
6330 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
6331 SDValue TruncRHS =
6332 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
6333
6334 unsigned Opc;
6335 if (Signed) {
6336 Opc = NVPTXISD::MUL_WIDE_SIGNED;
6337 } else {
6338 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
6339 }
6340
6341 return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
6342}
6343
6344static bool isConstOne(const SDValue &Operand) {
6345 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
6346 return Const && Const->getZExtValue() == 1;
6347}
6348
6350 if (Add->getOpcode() != ISD::ADD)
6351 return SDValue();
6352
6353 if (isConstOne(Add->getOperand(0)))
6354 return Add->getOperand(1);
6355
6356 if (isConstOne(Add->getOperand(1)))
6357 return Add->getOperand(0);
6358
6359 return SDValue();
6360}
6361
6364
6366 SDValue Mul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
6367 return DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, X);
6368 }
6369
6370 return SDValue();
6371}
6372
6374 SDLoc DL,
6376 if (Select->getOpcode() != ISD::SELECT)
6377 return SDValue();
6378
6379 SDValue Cond = Select->getOperand(0);
6380
6381 unsigned ConstOpNo;
6382 if (isConstOne(Select->getOperand(1)))
6383 ConstOpNo = 1;
6384 else if (isConstOne(Select->getOperand(2)))
6385 ConstOpNo = 2;
6386 else
6387 return SDValue();
6388
6389 SDValue Y = Select->getOperand((ConstOpNo == 1) ? 2 : 1);
6390
6391 // Do not combine if the resulting sequence is not obviously profitable.
6393 return SDValue();
6394
6395 SDValue NewMul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
6396
6397 return DCI.DAG.getNode(ISD::SELECT, DL, VT, Cond,
6398 (ConstOpNo == 1) ? X : NewMul,
6399 (ConstOpNo == 1) ? NewMul : X);
6400}
6401
6402static SDValue
6405
6406 EVT VT = N0.getValueType();
6407 if (VT.isVector())
6408 return SDValue();
6409
6410 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
6411 return SDValue();
6412
6413 SDLoc DL(N);
6414
6415 // (mul x, (add y, 1)) -> (add (mul x, y), x)
6416 if (SDValue Res = combineMADConstOne(N0, N1, VT, DL, DCI))
6417 return Res;
6418 if (SDValue Res = combineMADConstOne(N1, N0, VT, DL, DCI))
6419 return Res;
6420
6421 // (mul x, (select y, 1)) -> (select (mul x, y), x)
6422 if (SDValue Res = combineMulSelectConstOne(N0, N1, VT, DL, DCI))
6423 return Res;
6424 if (SDValue Res = combineMulSelectConstOne(N1, N0, VT, DL, DCI))
6425 return Res;
6426
6427 return SDValue();
6428}
6429
6430/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
6433 CodeGenOptLevel OptLevel) {
6434 if (OptLevel == CodeGenOptLevel::None)
6435 return SDValue();
6436
6437 if (SDValue Ret = TryMULWIDECombine(N, DCI))
6438 return Ret;
6439
6440 SDValue N0 = N->getOperand(0);
6441 SDValue N1 = N->getOperand(1);
6442 return PerformMULCombineWithOperands(N, N0, N1, DCI);
6443}
6444
6445/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
6448 CodeGenOptLevel OptLevel) {
6449 if (OptLevel > CodeGenOptLevel::None) {
6450 // Try mul.wide combining at OptLevel > 0
6451 if (SDValue Ret = TryMULWIDECombine(N, DCI))
6452 return Ret;
6453 }
6454
6455 return SDValue();
6456}
6457
6460 unsigned int SmVersion) {
6461 EVT CCType = N->getValueType(0);
6462 SDValue A = N->getOperand(0);
6463 SDValue B = N->getOperand(1);
6464
6465 EVT AType = A.getValueType();
6466 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
6467 return SDValue();
6468
6469 if (A.getValueType() == MVT::v2bf16 && SmVersion < 90)
6470 return SDValue();
6471
6472 SDLoc DL(N);
6473 // setp.f16x2 returns two scalar predicates, which we need to
6474 // convert back to v2i1. The returned result will be scalarized by
6475 // the legalizer, but the comparison will remain a single vector
6476 // instruction.
6477 SDValue CCNode = DCI.DAG.getNode(
6478 A.getValueType() == MVT::v2f16 ? NVPTXISD::SETP_F16X2
6480 DL, DCI.DAG.getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
6481 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
6482 CCNode.getValue(1));
6483}
6484
6487 SDValue Vector = N->getOperand(0);
6488 if (Vector->getOpcode() == ISD::FREEZE)
6489 Vector = Vector->getOperand(0);
6490 SDLoc DL(N);
6491 EVT VectorVT = Vector.getValueType();
6492 if (Vector->getOpcode() == ISD::LOAD && VectorVT.isSimple() &&
6493 IsPTXVectorType(VectorVT.getSimpleVT()))
6494 return SDValue(); // Native vector loads already combine nicely w/
6495 // extract_vector_elt.
6496 // Don't mess with singletons or packed types (v2*32, v2*16, v4i8 and v8i8),
6497 // we already handle them OK.
6498 if (VectorVT.getVectorNumElements() == 1 ||
6499 NVPTX::isPackedVectorTy(VectorVT) || VectorVT == MVT::v8i8)
6500 return SDValue();
6501
6502 // Don't mess with undef values as sra may be simplified to 0, not undef.
6503 if (Vector->isUndef() || ISD::allOperandsUndef(Vector.getNode()))
6504 return SDValue();
6505
6506 uint64_t VectorBits = VectorVT.getSizeInBits();
6507 // We only handle the types we can extract in-register.
6508 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
6509 return SDValue();
6510
6511 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(N->getOperand(1));
6512 // Index == 0 is handled by generic DAG combiner.
6513 if (!Index || Index->getZExtValue() == 0)
6514 return SDValue();
6515
6516 MVT IVT = MVT::getIntegerVT(VectorBits);
6517 EVT EltVT = VectorVT.getVectorElementType();
6518 EVT EltIVT = EltVT.changeTypeToInteger();
6519 uint64_t EltBits = EltVT.getScalarSizeInBits();
6520
6521 SDValue Result = DCI.DAG.getNode(
6522 ISD::TRUNCATE, DL, EltIVT,
6523 DCI.DAG.getNode(
6524 ISD::SRA, DL, IVT, DCI.DAG.getNode(ISD::BITCAST, DL, IVT, Vector),
6525 DCI.DAG.getConstant(Index->getZExtValue() * EltBits, DL, IVT)));
6526
6527 // If element has non-integer type, bitcast it back to the expected type.
6528 if (EltVT != EltIVT)
6529 Result = DCI.DAG.getNode(ISD::BITCAST, DL, EltVT, Result);
6530 // Past legalizer, we may need to extent i8 -> i16 to match the register type.
6531 if (EltVT != N->getValueType(0))
6532 Result = DCI.DAG.getNode(ISD::ANY_EXTEND, DL, N->getValueType(0), Result);
6533
6534 return Result;
6535}
6536
6537/// Transform patterns like:
6538/// (select (ugt shift_amt, BitWidth-1), 0, (srl/shl x, shift_amt))
6539/// (select (ult shift_amt, BitWidth), (srl/shl x, shift_amt), 0)
6540/// Into:
6541/// (NVPTXISD::SRL_CLAMP x, shift_amt) or (NVPTXISD::SHL_CLAMP x, shift_amt)
6542///
6543/// These patterns arise from C/C++ code like `shift >= 32 ? 0 : x >> shift`
6544/// which guards against undefined behavior. PTX shr/shl instructions clamp
6545/// shift amounts >= BitWidth to produce 0 for logical shifts, making the
6546/// guard redundant.
6547///
6548/// Note: We only handle SRL and SHL, not SRA, because arithmetic right
6549/// shifts could produce 0 or -1 when shift >= BitWidth.
6550/// Note: We don't handle uge or ule. These don't appear because of
6551/// canonicalization.
6554 if (!DCI.isAfterLegalizeDAG())
6555 return SDValue();
6556
6557 using namespace SDPatternMatch;
6558 unsigned BitWidth = N->getValueType(0).getSizeInBits();
6559 SDValue ShiftAmt, ShiftOp;
6560
6561 // Match logical shifts where the shift amount in the guard matches the shift
6562 // amount in the operation.
6563 auto LogicalShift =
6564 m_AllOf(m_Value(ShiftOp),
6565 m_AnyOf(m_Srl(m_Value(), m_TruncOrSelf(m_Deferred(ShiftAmt))),
6566 m_Shl(m_Value(), m_TruncOrSelf(m_Deferred(ShiftAmt)))));
6567
6568 // shift_amt > BitWidth-1 ? 0 : shift_op
6569 bool MatchedUGT =
6570 sd_match(N, m_Select(m_SetCC(m_Value(ShiftAmt),
6572 m_SpecificCondCode(ISD::SETUGT)),
6573 m_Zero(), LogicalShift));
6574 // shift_amt < BitWidth ? shift_op : 0
6575 bool MatchedULT =
6576 !MatchedUGT &&
6577 sd_match(N, m_Select(m_SetCC(m_Value(ShiftAmt),
6579 m_SpecificCondCode(ISD::SETULT)),
6580 LogicalShift, m_Zero()));
6581
6582 if (!MatchedUGT && !MatchedULT)
6583 return SDValue();
6584
6585 // Return a clamp shift operation, which has the same semantics as PTX shift.
6586 unsigned ClampOpc = ShiftOp.getOpcode() == ISD::SRL ? NVPTXISD::SRL_CLAMP
6587 : NVPTXISD::SHL_CLAMP;
6588 return DCI.DAG.getNode(ClampOpc, SDLoc(N), ShiftOp.getValueType(),
6589 ShiftOp.getOperand(0), ShiftOp.getOperand(1));
6590}
6591
6594 SDValue VA = N->getOperand(1);
6595 EVT VectorVT = VA.getValueType();
6596 if (VectorVT != MVT::v4i8)
6597 return SDValue();
6598
6599 // We need to split vselect into individual per-element operations Because we
6600 // use BFE/BFI instruction for byte extraction/insertion, we do end up with
6601 // 32-bit values, so we may as well do comparison as i32 to avoid conversions
6602 // to/from i16 normally used for i8 values.
6604 SDLoc DL(N);
6605 SDValue VCond = N->getOperand(0);
6606 SDValue VB = N->getOperand(2);
6607 for (int I = 0; I < 4; ++I) {
6608 SDValue C = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i1, VCond,
6609 DCI.DAG.getConstant(I, DL, MVT::i32));
6610 SDValue EA = DCI.DAG.getAnyExtOrTrunc(
6611 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VA,
6612 DCI.DAG.getConstant(I, DL, MVT::i32)),
6613 DL, MVT::i32);
6614 SDValue EB = DCI.DAG.getAnyExtOrTrunc(
6615 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VB,
6616 DCI.DAG.getConstant(I, DL, MVT::i32)),
6617 DL, MVT::i32);
6618 E.push_back(DCI.DAG.getAnyExtOrTrunc(
6619 DCI.DAG.getNode(ISD::SELECT, DL, MVT::i32, C, EA, EB), DL, MVT::i8));
6620 }
6621 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i8, E);
6622}
6623
6624static SDValue
6626 auto VT = N->getValueType(0);
6627 if (!DCI.isAfterLegalizeDAG() ||
6628 // only process v2*16 types
6629 !(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector() &&
6630 VT.getVectorNumElements() == 2))
6631 return SDValue();
6632
6633 auto Op0 = N->getOperand(0);
6634 auto Op1 = N->getOperand(1);
6635
6636 // Start out by assuming we want to take the lower 2 bytes of each i32
6637 // operand.
6638 uint64_t Op0Bytes = 0x10;
6639 uint64_t Op1Bytes = 0x54;
6640
6641 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
6642 {&Op1, &Op1Bytes}};
6643
6644 // Check that each operand is an i16, truncated from an i32 operand. We'll
6645 // select individual bytes from those original operands. Optionally, fold in a
6646 // shift right of that original operand.
6647 for (auto &[Op, OpBytes] : OpData) {
6648 // Eat up any bitcast
6649 if (Op->getOpcode() == ISD::BITCAST)
6650 *Op = Op->getOperand(0);
6651
6652 if (!(Op->getValueType() == MVT::i16 && Op->getOpcode() == ISD::TRUNCATE &&
6653 Op->getOperand(0).getValueType() == MVT::i32))
6654 return SDValue();
6655
6656 // If the truncate has multiple uses, this optimization can increase
6657 // register pressure
6658 if (!Op->hasOneUse())
6659 return SDValue();
6660
6661 *Op = Op->getOperand(0);
6662
6663 // Optionally, fold in a shift-right of the original operand and let permute
6664 // pick the two higher bytes of the original value directly.
6665 if (Op->getOpcode() == ISD::SRL && isa<ConstantSDNode>(Op->getOperand(1))) {
6666 if (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue() == 16) {
6667 // Shift the PRMT byte selector to pick upper bytes from each respective
6668 // value, instead of the lower ones: 0x10 -> 0x32, 0x54 -> 0x76
6669 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
6670 "PRMT selector values out of range");
6671 *OpBytes += 0x22;
6672 *Op = Op->getOperand(0);
6673 }
6674 }
6675 }
6676
6677 SDLoc DL(N);
6678 auto &DAG = DCI.DAG;
6679
6680 auto PRMT =
6681 getPRMT(DAG.getBitcast(MVT::i32, Op0), DAG.getBitcast(MVT::i32, Op1),
6682 (Op1Bytes << 8) | Op0Bytes, DL, DAG);
6683 return DAG.getBitcast(VT, PRMT);
6684}
6685
6688 auto *ASCN1 = cast<AddrSpaceCastSDNode>(N);
6689
6690 if (auto *ASCN2 = dyn_cast<AddrSpaceCastSDNode>(ASCN1->getOperand(0))) {
6691 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());
6692
6693 // Fold asc[B -> A](asc[A -> B](x)) -> x
6694 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())
6695 return ASCN2->getOperand(0);
6696 }
6697
6698 return SDValue();
6699}
6700
6701// Given a constant selector value and a prmt mode, return the selector value
6702// normalized to the generic prmt mode. See the PTX ISA documentation for more
6703// details:
6704// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prmt
6705static APInt getPRMTSelector(const APInt &Selector, unsigned Mode) {
6706 assert(Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6707
6709 return Selector;
6710
6711 const unsigned V = Selector.trunc(2).getZExtValue();
6712
6713 const auto GetSelector = [](unsigned S0, unsigned S1, unsigned S2,
6714 unsigned S3) {
6715 return APInt(32, S0 | (S1 << 4) | (S2 << 8) | (S3 << 12));
6716 };
6717
6718 switch (Mode) {
6720 return GetSelector(V, V + 1, V + 2, V + 3);
6722 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);
6724 return GetSelector(V, V, V, V);
6726 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);
6728 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);
6730 unsigned V1 = (V & 1) << 1;
6731 return GetSelector(V1, V1 + 1, V1, V1 + 1);
6732 }
6733 default:
6734 llvm_unreachable("Invalid PRMT mode");
6735 }
6736}
6737
6738static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode) {
6739 assert(A.getBitWidth() == 32 && B.getBitWidth() == 32 &&
6740 Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6741 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
6742 APInt BitField = B.concat(A);
6743 APInt SelectorVal = getPRMTSelector(Selector, Mode);
6744 APInt Result(32, 0);
6745 for (unsigned I : llvm::seq(4U)) {
6746 APInt Sel = SelectorVal.extractBits(4, I * 4);
6747 unsigned Idx = Sel.getLoBits(3).getZExtValue();
6748 unsigned Sign = Sel.getHiBits(1).getZExtValue();
6749 APInt Byte = BitField.extractBits(8, Idx * 8);
6750 if (Sign)
6751 Byte = Byte.ashr(8);
6752 Result.insertBits(Byte, I * 8);
6753 }
6754 return Result;
6755}
6756
6758 CodeGenOptLevel OptLevel) {
6759 if (OptLevel == CodeGenOptLevel::None)
6760 return SDValue();
6761
6762 // Constant fold PRMT
6763 if (isa<ConstantSDNode>(N->getOperand(0)) &&
6764 isa<ConstantSDNode>(N->getOperand(1)) &&
6765 isa<ConstantSDNode>(N->getOperand(2)))
6766 return DCI.DAG.getConstant(computePRMT(N->getConstantOperandAPInt(0),
6767 N->getConstantOperandAPInt(1),
6768 N->getConstantOperandAPInt(2),
6769 N->getConstantOperandVal(3)),
6770 SDLoc(N), N->getValueType(0));
6771 return SDValue();
6772}
6773
6774// During call lowering we wrap the return values in a ProxyReg node which
6775// depend on the chain value produced by the completed call. This ensures that
6776// the full call is emitted in cases where libcalls are used to legalize
6777// operations. To improve the functioning of other DAG combines we pull all
6778// operations we can through one of these nodes, ensuring that the ProxyReg
6779// directly wraps a load. That is:
6780//
6781// (ProxyReg (zext (load retval0))) => (zext (ProxyReg (load retval0)))
6782//
6785 switch (R.getOpcode()) {
6786 case ISD::TRUNCATE:
6787 case ISD::ANY_EXTEND:
6788 case ISD::SIGN_EXTEND:
6789 case ISD::ZERO_EXTEND:
6790 case ISD::BITCAST: {
6791 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
6792 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), V);
6793 return SDValue();
6794 }
6795 case ISD::SHL:
6796 case ISD::SRL:
6797 case ISD::SRA:
6798 case ISD::OR: {
6799 if (SDValue A = sinkProxyReg(R.getOperand(0), Chain, DCI))
6800 if (SDValue B = sinkProxyReg(R.getOperand(1), Chain, DCI))
6801 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), A, B);
6802 return SDValue();
6803 }
6804 case ISD::Constant:
6805 return R;
6806 case ISD::LOAD:
6807 case NVPTXISD::LoadV2:
6808 case NVPTXISD::LoadV4: {
6809 return DCI.DAG.getNode(NVPTXISD::ProxyReg, SDLoc(R), R.getValueType(),
6810 {Chain, R});
6811 }
6812 case ISD::BUILD_VECTOR: {
6813 if (DCI.isBeforeLegalize())
6814 return SDValue();
6815
6817 for (auto &Op : R->ops()) {
6818 SDValue V = sinkProxyReg(Op, Chain, DCI);
6819 if (!V)
6820 return SDValue();
6821 Ops.push_back(V);
6822 }
6823 return DCI.DAG.getNode(ISD::BUILD_VECTOR, SDLoc(R), R.getValueType(), Ops);
6824 }
6826 if (DCI.isBeforeLegalize())
6827 return SDValue();
6828
6829 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
6831 R.getValueType(), V, R.getOperand(1));
6832 return SDValue();
6833 }
6834 default:
6835 return SDValue();
6836 }
6837}
6838
6841
6842 SDValue Chain = N->getOperand(0);
6843 SDValue Reg = N->getOperand(1);
6844
6845 // If the ProxyReg is not wrapping a load, try to pull the operations through
6846 // the ProxyReg.
6847 if (Reg.getOpcode() != ISD::LOAD) {
6848 if (SDValue V = sinkProxyReg(Reg, Chain, DCI))
6849 return V;
6850 }
6851
6852 return SDValue();
6853}
6854
6855SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
6856 DAGCombinerInfo &DCI) const {
6858 switch (N->getOpcode()) {
6859 default:
6860 break;
6861 case ISD::ADD:
6862 return PerformADDCombine(N, DCI, OptLevel);
6863 case ISD::ADDRSPACECAST:
6864 return combineADDRSPACECAST(N, DCI);
6865 case ISD::SIGN_EXTEND:
6866 case ISD::ZERO_EXTEND:
6867 return combineMulWide(N, DCI, OptLevel);
6868 case ISD::BUILD_VECTOR:
6869 return PerformBUILD_VECTORCombine(N, DCI);
6871 return PerformEXTRACTCombine(N, DCI);
6872 case ISD::FADD:
6873 return PerformFADDCombine(N, DCI, OptLevel);
6874 case ISD::FMAXNUM:
6875 case ISD::FMINNUM:
6876 case ISD::FMAXIMUM:
6877 case ISD::FMINIMUM:
6878 case ISD::FMAXIMUMNUM:
6879 case ISD::FMINIMUMNUM:
6880 return PerformFMinMaxCombine(N, DCI, STI.getPTXVersion(),
6881 STI.getSmVersion());
6882 case ISD::LOAD:
6883 case NVPTXISD::LoadV2:
6884 case NVPTXISD::LoadV4:
6885 return combineLOAD(N, DCI, STI);
6886 case ISD::MUL:
6887 return PerformMULCombine(N, DCI, OptLevel);
6888 case NVPTXISD::PRMT:
6889 return combinePRMT(N, DCI, OptLevel);
6890 case NVPTXISD::ProxyReg:
6891 return combineProxyReg(N, DCI);
6892 case ISD::SETCC:
6893 return PerformSETCCCombine(N, DCI, STI.getSmVersion());
6894 case ISD::SHL:
6895 return PerformSHLCombine(N, DCI, OptLevel);
6896 case ISD::SREM:
6897 case ISD::UREM:
6898 return PerformREMCombine(N, DCI, OptLevel);
6899 case ISD::STORE:
6900 case NVPTXISD::StoreV2:
6901 case NVPTXISD::StoreV4:
6902 return combineSTORE(N, DCI, STI);
6903 case ISD::SELECT:
6904 return PerformSELECTShiftCombine(N, DCI);
6905 case ISD::VSELECT:
6906 return PerformVSELECTCombine(N, DCI);
6907 }
6908 return SDValue();
6909}
6910
6913 // Handle bitcasting to v2i8 without hitting the default promotion
6914 // strategy which goes through stack memory.
6915 SDValue Op(Node, 0);
6916 EVT ToVT = Op->getValueType(0);
6917 if (ToVT != MVT::v2i8) {
6918 return;
6919 }
6920
6921 // Bitcast to i16 and unpack elements into a vector
6922 SDLoc DL(Node);
6923 SDValue AsInt = DAG.getBitcast(MVT::i16, Op->getOperand(0));
6924 SDValue Vec0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, AsInt);
6925 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
6926 SDValue Vec1 =
6927 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
6928 DAG.getNode(ISD::SRL, DL, MVT::i16, {AsInt, Const8}));
6929 Results.push_back(
6930 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i8, {Vec0, Vec1}));
6931}
6932
6935 SDValue Chain = N->getOperand(0);
6936 SDValue Intrin = N->getOperand(1);
6937 SDLoc DL(N);
6938
6939 // Get the intrinsic ID
6940 unsigned IntrinNo = Intrin.getNode()->getAsZExtVal();
6941 switch (IntrinNo) {
6942 default:
6943 return;
6944 case Intrinsic::nvvm_ldu_global_i:
6945 case Intrinsic::nvvm_ldu_global_f:
6946 case Intrinsic::nvvm_ldu_global_p: {
6947 EVT ResVT = N->getValueType(0);
6948
6949 if (ResVT.isVector()) {
6950 // Vector LDG/LDU
6951
6952 unsigned NumElts = ResVT.getVectorNumElements();
6953 EVT EltVT = ResVT.getVectorElementType();
6954
6955 // Since LDU/LDG are target nodes, we cannot rely on DAG type
6956 // legalization.
6957 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
6958 // loaded type to i16 and propagate the "real" type as the memory type.
6959 bool NeedTrunc = false;
6960 if (EltVT.getSizeInBits() < 16) {
6961 EltVT = MVT::i16;
6962 NeedTrunc = true;
6963 }
6964
6965 unsigned Opcode = 0;
6966 SDVTList LdResVTs;
6967
6968 switch (NumElts) {
6969 default:
6970 return;
6971 case 2:
6972 Opcode = NVPTXISD::LDUV2;
6973 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
6974 break;
6975 case 4: {
6976 Opcode = NVPTXISD::LDUV4;
6977 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
6978 LdResVTs = DAG.getVTList(ListVTs);
6979 break;
6980 }
6981 }
6982
6983 SmallVector<SDValue, 8> OtherOps;
6984
6985 // Copy regular operands
6986
6987 OtherOps.push_back(Chain); // Chain
6988 // Skip operand 1 (intrinsic ID)
6989 // Others
6990 OtherOps.append(N->op_begin() + 2, N->op_end());
6991
6993
6994 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
6995 MemSD->getMemoryVT(),
6996 MemSD->getMemOperand());
6997
6998 SmallVector<SDValue, 4> ScalarRes;
6999
7000 for (unsigned i = 0; i < NumElts; ++i) {
7001 SDValue Res = NewLD.getValue(i);
7002 if (NeedTrunc)
7003 Res =
7004 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
7005 ScalarRes.push_back(Res);
7006 }
7007
7008 SDValue LoadChain = NewLD.getValue(NumElts);
7009
7010 SDValue BuildVec =
7011 DAG.getBuildVector(ResVT, DL, ScalarRes);
7012
7013 Results.push_back(BuildVec);
7014 Results.push_back(LoadChain);
7015 } else {
7016 // i8 LDG/LDU
7017 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
7018 "Custom handling of non-i8 ldu/ldg?");
7019
7020 // Just copy all operands as-is
7022
7023 // Force output to i16
7024 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
7025
7027
7028 // We make sure the memory type is i8, which will be used during isel
7029 // to select the proper instruction.
7030 SDValue NewLD =
7032 MVT::i8, MemSD->getMemOperand());
7033
7034 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
7035 NewLD.getValue(0)));
7036 Results.push_back(NewLD.getValue(1));
7037 }
7038 return;
7039 }
7040
7041 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
7042 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
7043 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
7044 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
7045 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
7046 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
7047 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
7048 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
7049 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
7050 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
7051 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
7052 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
7053 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
7054 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
7055 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
7056 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
7057 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
7058 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
7059 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
7060 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
7061 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
7062 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
7063 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
7064 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
7065 if (auto Res = lowerTcgen05Ld(N, DAG)) {
7066 Results.push_back(Res->first);
7067 Results.push_back(Res->second);
7068 }
7069 return;
7070
7071 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
7072 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
7073 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
7074 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
7075 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
7076 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
7077 if (auto Res = lowerTcgen05Ld(N, DAG, /*HasOffset=*/true)) {
7078 Results.push_back(Res->first);
7079 Results.push_back(Res->second);
7080 }
7081 return;
7082
7083 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_i32:
7084 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x8_f32:
7085 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_i32:
7086 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x64_f32:
7087 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_i32:
7088 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x4_f32:
7089 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_i32:
7090 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x32_f32:
7091 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_i32:
7092 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x16_f32:
7093 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_i32:
7094 case Intrinsic::nvvm_tcgen05_ld_red_32x32b_x128_f32:
7095 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_i32:
7096 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x8_f32:
7097 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_i32:
7098 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x64_f32:
7099 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_i32:
7100 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x4_f32:
7101 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_i32:
7102 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x32_f32:
7103 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_i32:
7104 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x16_f32:
7105 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_i32:
7106 case Intrinsic::nvvm_tcgen05_ld_red_16x32bx2_x128_f32:
7107 if (auto Res = lowerTcgen05LdRed(N, DAG)) {
7108 Results.push_back(std::get<0>(*Res));
7109 Results.push_back(std::get<1>(*Res));
7110 Results.push_back(std::get<2>(*Res));
7111 }
7112 return;
7113 }
7114}
7115
7118 // Change the CopyFromReg to output 2 64-bit results instead of a 128-bit
7119 // result so that it can pass the legalization
7120 SDLoc DL(N);
7121 SDValue Chain = N->getOperand(0);
7122 SDValue Reg = N->getOperand(1);
7123 SDValue Glue = N->getOperand(2);
7124
7125 assert(Reg.getValueType() == MVT::i128 &&
7126 "Custom lowering for CopyFromReg with 128-bit reg only");
7127 SmallVector<EVT, 4> ResultsType = {MVT::i64, MVT::i64, N->getValueType(1),
7128 N->getValueType(2)};
7129 SmallVector<SDValue, 3> NewOps = {Chain, Reg, Glue};
7130
7131 SDValue NewValue = DAG.getNode(ISD::CopyFromReg, DL, ResultsType, NewOps);
7132 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128,
7133 {NewValue.getValue(0), NewValue.getValue(1)});
7134
7135 Results.push_back(Pair);
7136 Results.push_back(NewValue.getValue(2));
7137 Results.push_back(NewValue.getValue(3));
7138}
7139
7141 const TargetLowering &TLI,
7143 SDValue Chain = N->getOperand(0);
7144 SDValue Reg = N->getOperand(1);
7145
7146 MVT VT = TLI.getRegisterType(*DAG.getContext(), Reg.getValueType());
7147
7148 SDValue NewReg = DAG.getAnyExtOrTrunc(Reg, SDLoc(N), VT);
7149 SDValue NewProxy =
7150 DAG.getNode(NVPTXISD::ProxyReg, SDLoc(N), VT, {Chain, NewReg});
7151 SDValue Res = DAG.getAnyExtOrTrunc(NewProxy, SDLoc(N), N->getValueType(0));
7152
7153 Results.push_back(Res);
7154}
7155
7157 const NVPTXSubtarget &STI,
7159 assert(N->getValueType(0) == MVT::i128 &&
7160 "Custom lowering for atomic128 only supports i128");
7161
7163 SDLoc dl(N);
7164
7165 if (!STI.hasAtomSwap128()) {
7168 "Support for b128 atomics introduced in PTX ISA version 8.3 and "
7169 "requires target sm_90.",
7170 dl.getDebugLoc()));
7171
7172 Results.push_back(DAG.getUNDEF(MVT::i128));
7173 Results.push_back(AN->getOperand(0)); // Chain
7174 return;
7175 }
7176
7178 Ops.push_back(AN->getOperand(0)); // Chain
7179 Ops.push_back(AN->getOperand(1)); // Ptr
7180 for (const auto &Op : AN->ops().drop_front(2)) {
7181 // Low part
7182 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
7183 DAG.getIntPtrConstant(0, dl)));
7184 // High part
7185 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
7186 DAG.getIntPtrConstant(1, dl)));
7187 }
7188 unsigned Opcode = N->getOpcode() == ISD::ATOMIC_SWAP
7191 SDVTList Tys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other);
7192 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, MVT::i128,
7193 AN->getMemOperand());
7194 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i128,
7195 {Result.getValue(0), Result.getValue(1)}));
7196 Results.push_back(Result.getValue(2));
7197}
7198
7199void NVPTXTargetLowering::ReplaceNodeResults(
7201 switch (N->getOpcode()) {
7202 default:
7203 report_fatal_error("Unhandled custom legalization");
7204 case ISD::BITCAST:
7205 ReplaceBITCAST(N, DAG, Results);
7206 return;
7207 case ISD::LOAD:
7208 case ISD::MLOAD:
7209 replaceLoadVector(N, DAG, Results, STI);
7210 return;
7213 return;
7214 case ISD::CopyFromReg:
7216 return;
7217 case NVPTXISD::ProxyReg:
7218 replaceProxyReg(N, DAG, *this, Results);
7219 return;
7221 case ISD::ATOMIC_SWAP:
7222 replaceAtomicSwap128(N, DAG, STI, Results);
7223 return;
7224 }
7225}
7226
7229 Type *Ty = AI->getValOperand()->getType();
7230
7231 if (AI->isFloatingPointOperation()) {
7233 if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&
7234 STI.getPTXVersion() >= 63)
7236 if (Ty->isBFloatTy() && STI.getSmVersion() >= 90 &&
7237 STI.getPTXVersion() >= 78)
7239 if (Ty->isFloatTy())
7241 if (Ty->isDoubleTy() && STI.hasAtomAddF64())
7243 }
7245 }
7246
7247 assert(Ty->isIntegerTy() && "Ty should be integer at this point");
7248 const unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
7249
7250 switch (AI->getOperation()) {
7251 default:
7254 if (BitWidth == 128)
7256 [[fallthrough]];
7260 switch (BitWidth) {
7261 case 8:
7262 case 16:
7264 case 32:
7266 case 64:
7267 if (STI.hasAtomBitwise64())
7270 case 128:
7272 default:
7273 llvm_unreachable("unsupported width encountered");
7274 }
7281 switch (BitWidth) {
7282 case 8:
7283 case 16:
7285 case 32:
7287 case 64:
7288 if (STI.hasAtomMinMax64())
7291 case 128:
7293 default:
7294 llvm_unreachable("unsupported width encountered");
7295 }
7298 switch (BitWidth) {
7299 case 32:
7301 case 8:
7302 case 16:
7303 case 64:
7304 case 128:
7306 default:
7307 llvm_unreachable("unsupported width encountered");
7308 }
7309 }
7310
7312}
7313
7315 const Instruction *I) const {
7316 auto *CI = dyn_cast<AtomicCmpXchgInst>(I);
7317 // When CAS bitwidth is not supported on the hardware, the CAS is emulated
7318 // using a retry loop that uses a higher-bitwidth monotonic CAS. We enforce
7319 // the memory order using explicit fences around the retry loop.
7320 // The memory order of natively supported CAS operations can be enforced
7321 // by lowering to an atom.cas with the right memory synchronizing effect.
7322 // However, atom.cas only supports relaxed, acquire, release and acq_rel.
7323 // So we also use explicit fences for enforcing memory order for
7324 // seq_cast CAS with natively-supported bitwidths.
7325 return CI &&
7326 (cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() <
7327 STI.getMinCmpXchgSizeInBits() ||
7328 CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent);
7329}
7330
7332 const Instruction *I) const {
7333 auto *CI = dyn_cast<AtomicCmpXchgInst>(I);
7334 bool BitwidthSupportedAndIsSeqCst =
7335 CI && CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent &&
7336 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() >=
7337 STI.getMinCmpXchgSizeInBits();
7338 return BitwidthSupportedAndIsSeqCst ? AtomicOrdering::Acquire
7340}
7341
7343 Instruction *Inst,
7344 AtomicOrdering Ord) const {
7345 if (!isa<AtomicCmpXchgInst>(Inst))
7346 return TargetLoweringBase::emitLeadingFence(Builder, Inst, Ord);
7347
7348 // Specialize for cmpxchg
7349 // Emit a fence.sc leading fence for cmpxchg seq_cst which are not emulated
7350 SyncScope::ID SSID = cast<AtomicCmpXchgInst>(Inst)->getSyncScopeID();
7351 if (isReleaseOrStronger(Ord))
7352 return Builder.CreateFence(Ord == AtomicOrdering::SequentiallyConsistent
7353 ? Ord
7355 SSID);
7356
7357 return nullptr;
7358}
7359
7361 Instruction *Inst,
7362 AtomicOrdering Ord) const {
7363 // Specialize for cmpxchg
7364 if (!isa<AtomicCmpXchgInst>(Inst))
7365 return TargetLoweringBase::emitTrailingFence(Builder, Inst, Ord);
7366
7367 auto *CI = cast<AtomicCmpXchgInst>(Inst);
7368 auto CASWidth =
7369 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth();
7370 SyncScope::ID SSID = CI->getSyncScopeID();
7371 // Do not emit a trailing fence for cmpxchg seq_cst which are not emulated
7372 if (isAcquireOrStronger(Ord) &&
7374 CASWidth < STI.getMinCmpXchgSizeInBits()))
7375 return Builder.CreateFence(AtomicOrdering::Acquire, SSID);
7376
7377 return nullptr;
7378}
7379
7380// Rather than default to SINT when both UINT and SINT are custom, we only
7381// change the opcode when UINT is not legal and SINT is. UINT is preferred when
7382// both are custom since unsigned CVT instructions can lead to slightly better
7383// SASS code with fewer instructions.
7385 EVT ToVT) const {
7386 if (isOperationLegal(Op, ToVT))
7387 return Op;
7388 switch (Op) {
7389 case ISD::FP_TO_UINT:
7391 return ISD::FP_TO_SINT;
7392 break;
7396 break;
7397 case ISD::VP_FP_TO_UINT:
7398 if (isOperationLegal(ISD::VP_FP_TO_SINT, ToVT))
7399 return ISD::VP_FP_TO_SINT;
7400 break;
7401 default:
7402 break;
7403 }
7404 return Op;
7405}
7406
7407// Pin NVPTXTargetObjectFile's vtables to this file.
7409
7414
7416 const SelectionDAG &DAG, unsigned Depth) {
7417 SDValue A = Op.getOperand(0);
7418 SDValue B = Op.getOperand(1);
7419 ConstantSDNode *Selector = dyn_cast<ConstantSDNode>(Op.getOperand(2));
7420 unsigned Mode = Op.getConstantOperandVal(3);
7421
7422 if (!Selector)
7423 return;
7424
7425 KnownBits AKnown = DAG.computeKnownBits(A, Depth);
7426 KnownBits BKnown = DAG.computeKnownBits(B, Depth);
7427
7428 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
7429 assert(AKnown.getBitWidth() == 32 && BKnown.getBitWidth() == 32 &&
7430 "PRMT must have i32 operands");
7431 assert(Known.getBitWidth() == 32 && "PRMT must have i32 result");
7432 KnownBits BitField = BKnown.concat(AKnown);
7433
7434 APInt SelectorVal = getPRMTSelector(Selector->getAPIntValue(), Mode);
7435 for (unsigned I : llvm::seq(4)) {
7436 APInt Sel = SelectorVal.extractBits(4, I * 4);
7437 unsigned Idx = Sel.getLoBits(3).getZExtValue();
7438 unsigned Sign = Sel.getHiBits(1).getZExtValue();
7439 KnownBits Byte = BitField.extractBits(8, Idx * 8);
7440 if (Sign)
7441 Byte = KnownBits::ashr(Byte, 8);
7442 Known.insertBits(Byte, I * 8);
7443 }
7444}
7445
7446static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known) {
7448
7449 // We can't do anything without knowing the sign bit.
7450 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
7451 if (ExtType == ISD::SEXTLOAD)
7452 return;
7453
7454 // ExtLoading to vector types is weird and may not work well with known bits.
7455 auto DestVT = LD->getValueType(0);
7456 if (DestVT.isVector())
7457 return;
7458
7459 assert(Known.getBitWidth() == DestVT.getSizeInBits());
7460 auto ElementBitWidth = NVPTXDAGToDAGISel::getFromTypeWidthForLoad(LD);
7461 Known.Zero.setHighBits(Known.getBitWidth() - ElementBitWidth);
7462}
7463
7465 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
7466 const SelectionDAG &DAG, unsigned Depth) const {
7467 Known.resetAll();
7468
7469 switch (Op.getOpcode()) {
7470 case NVPTXISD::PRMT:
7471 computeKnownBitsForPRMT(Op, Known, DAG, Depth);
7472 break;
7473 case NVPTXISD::LoadV2:
7474 case NVPTXISD::LoadV4:
7475 case NVPTXISD::LoadV8:
7477 break;
7478 default:
7479 break;
7480 }
7481}
7482
7483static std::pair<APInt, APInt> getPRMTDemandedBits(const APInt &SelectorVal,
7484 const APInt &DemandedBits) {
7485 APInt DemandedLHS = APInt(32, 0);
7486 APInt DemandedRHS = APInt(32, 0);
7487
7488 for (unsigned I : llvm::seq(4)) {
7489 if (DemandedBits.extractBits(8, I * 8).isZero())
7490 continue;
7491
7492 APInt Sel = SelectorVal.extractBits(4, I * 4);
7493 unsigned Idx = Sel.getLoBits(3).getZExtValue();
7494 unsigned Sign = Sel.getHiBits(1).getZExtValue();
7495
7496 APInt &Src = Idx < 4 ? DemandedLHS : DemandedRHS;
7497 unsigned ByteStart = (Idx % 4) * 8;
7498 if (Sign)
7499 Src.setBit(ByteStart + 7);
7500 else
7501 Src.setBits(ByteStart, ByteStart + 8);
7502 }
7503
7504 return {DemandedLHS, DemandedRHS};
7505}
7506
7507// Replace undef with 0 as this is easier for other optimizations such as
7508// known bits.
7510 if (!Op)
7511 return SDValue();
7512 if (Op.isUndef())
7513 return DAG.getConstant(0, SDLoc(), MVT::i32);
7514 return Op;
7515}
7516
7518 const APInt &DemandedBits,
7519 SelectionDAG &DAG,
7520 const TargetLowering &TLI,
7521 unsigned Depth) {
7522 assert(PRMT.getOpcode() == NVPTXISD::PRMT);
7523 SDValue Op0 = PRMT.getOperand(0);
7524 SDValue Op1 = PRMT.getOperand(1);
7525 auto *SelectorConst = dyn_cast<ConstantSDNode>(PRMT.getOperand(2));
7526 if (!SelectorConst)
7527 return SDValue();
7528
7529 unsigned Mode = PRMT.getConstantOperandVal(3);
7530 const APInt Selector = getPRMTSelector(SelectorConst->getAPIntValue(), Mode);
7531
7532 // Try to simplify the PRMT to one of the inputs if the used bytes are all
7533 // from the same input in the correct order.
7534 const unsigned LeadingBytes = DemandedBits.countLeadingZeros() / 8;
7535 const unsigned SelBits = (4 - LeadingBytes) * 4;
7536 if (Selector.getLoBits(SelBits) == APInt(32, 0x3210).getLoBits(SelBits))
7537 return Op0;
7538 if (Selector.getLoBits(SelBits) == APInt(32, 0x7654).getLoBits(SelBits))
7539 return Op1;
7540
7541 auto [DemandedLHS, DemandedRHS] = getPRMTDemandedBits(Selector, DemandedBits);
7542
7543 // Attempt to avoid multi-use ops if we don't need anything from them.
7544 SDValue DemandedOp0 =
7545 TLI.SimplifyMultipleUseDemandedBits(Op0, DemandedLHS, DAG, Depth + 1);
7546 SDValue DemandedOp1 =
7547 TLI.SimplifyMultipleUseDemandedBits(Op1, DemandedRHS, DAG, Depth + 1);
7548
7549 DemandedOp0 = canonicalizePRMTInput(DemandedOp0, DAG);
7550 DemandedOp1 = canonicalizePRMTInput(DemandedOp1, DAG);
7551 if ((DemandedOp0 && DemandedOp0 != Op0) ||
7552 (DemandedOp1 && DemandedOp1 != Op1)) {
7553 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
7554 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
7555 return getPRMT(Op0, Op1, Selector.getZExtValue(), SDLoc(PRMT), DAG);
7556 }
7557
7558 return SDValue();
7559}
7560
7562 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
7563 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
7564 Known.resetAll();
7565
7566 switch (Op.getOpcode()) {
7567 case NVPTXISD::PRMT:
7569 *this, Depth)) {
7570 TLO.CombineTo(Op, Result);
7571 return true;
7572 }
7573 break;
7574 default:
7575 break;
7576 }
7577
7578 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
7579 return false;
7580}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
constexpr LLT F32
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains the declarations of entities that describe floating point environment and related ...
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
Register Reg
Register const TargetRegisterInfo * TRI
#define T
NVPTX address space definition.
static SDValue reportInvalidTensormapReplaceUsage(SDValue Op, SelectionDAG &DAG, unsigned Val)
static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)
static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG)
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static SDValue PerformSELECTShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Transform patterns like: (select (ugt shift_amt, BitWidth-1), 0, (srl/shl x, shift_amt)) (select (ult...
static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)
static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)
static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)
static unsigned getMinMax3Opcode(unsigned MinMax2Opcode)
Get 3-input version of a 2-input min/max opcode.
static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
#define TCGEN05_LD_RED_INST(SHAPE, NUM, TYPE)
static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static unsigned getTcgen05LdRedID(Intrinsic::ID IID)
static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)
Fold packing movs into a store.
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)
static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)
static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)
static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)
static SDValue PerformFMinMaxCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned PTXVersion, unsigned SmVersion)
PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into (fmaxnum3 a, b, c).
static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static std::optional< unsigned > getScalar3OpcodeForReduction(unsigned ReductionOpcode)
Get 3-input scalar reduction opcode.
static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG)
static bool isConstZero(const SDValue &Operand)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG)
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static unsigned getTcgen05MMADisableOutputLane(unsigned IID)
static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)
static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)
static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue lowerBSWAP(SDValue Op, SelectionDAG &DAG)
static SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)
static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Fold unpacking movs into a load by increasing the number of return values.
#define TCGEN05_LD_RED_INTR(SHAPE, NUM, TYPE)
static SDValue lowerTensormapReplaceElemtype(SDValue Op, SelectionDAG &DAG)
static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset=false)
static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
replaceLoadVector - Convert vector loads into multi-output scalar loads.
static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static std::pair< MemSDNode *, uint32_t > convertMLOADToLoadWithUsedBytesMask(MemSDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)
Reduces the elements using the scalar operations provided.
static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)
static SDValue matchMADConstOnePattern(SDValue Add)
static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)
static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)
static EVT promoteScalarIntegerPTX(const EVT VT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static std::optional< std::tuple< SDValue, SDValue, SDValue > > lowerTcgen05LdRed(SDNode *N, SelectionDAG &DAG)
static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)
static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)
static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)
static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)
static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)
static SDValue lowerTensormapReplaceSwizzleMode(SDValue Op, SelectionDAG &DAG)
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)
static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))
static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))
Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....
static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)
static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
MachineInstr unsigned OpIdx
uint64_t High
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
Contains matchers for matching SelectionDAG nodes and values.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
BinaryOperator * Mul
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1161
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition APInt.cpp:644
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1549
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition APInt.h:1400
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:639
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1339
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1497
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1131
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
Definition APInt.cpp:482
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1238
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
an instruction that atomically reads a memory location, combines it with another value,...
@ Add
*p = old + v
@ FAdd
*p = old + v
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
const APInt & getAPIntValue() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:640
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
static constexpr unsigned NoRegister
Definition MCRegister.h:60
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition MCSection.h:517
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
SimpleValueType SimpleTy
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto fp_fixedlen_vector_valuetypes()
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
Align getAlign() const
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)
bool hasTensormapReplaceSwizzleModeSupport(unsigned value) const
bool hasUsedBytesMaskPragma() const
bool hasTensormapReplaceElemtypeSupport(unsigned value) const
bool hasAtomSwap128() const
bool hasF32x2Instructions() const
bool has256BitVectorLoadStore(unsigned AS) const
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
AtomicExpansionKind shouldExpandAtomicRMWInIR(const AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
iterator_range< user_iterator > users()
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
TargetOptions Options
MCSymbol * getSymbol(const GlobalValue *GV) const
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
A raw_ostream that writes to an std::string.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt pow(const APInt &X, int64_t N)
Compute X^N for N>=0.
Definition APInt.cpp:3166
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:818
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:787
@ POISON
POISON - A poison node.
Definition ISDOpcodes.h:236
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:275
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:778
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:294
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:264
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:852
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:518
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition ISDOpcodes.h:220
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:879
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:584
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:417
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:746
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:280
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:992
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:254
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:843
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:352
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:374
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:795
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:233
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition ISDOpcodes.h:247
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:230
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:348
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:703
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:764
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition ISDOpcodes.h:649
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:614
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:576
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:224
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:849
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:810
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:386
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:356
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:887
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:726
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:977
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:804
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:328
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ STRICT_FP_TO_UINT
Definition ISDOpcodes.h:478
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:477
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:925
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:738
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:205
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:304
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition ISDOpcodes.h:241
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:565
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:958
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition ISDOpcodes.h:996
@ VECREDUCE_FMINIMUM
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:855
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:832
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:534
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:365
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:338
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition ISDOpcodes.h:213
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:556
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
bool isPackedVectorTy(EVT VT)
DivPrecisionLevel
Definition NVPTX.h:257
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
MaybeAlign getAlign(const CallInst &I, unsigned Index)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
Definition MathExtras.h:385
bool isReleaseOrStronger(AtomicOrdering AO)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:2016
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
unsigned promoteScalarArgumentSize(unsigned size)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
bool shouldPassAsArray(Type *Ty)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
@ Default
-O2, -Os, -Oz
Definition CodeGen.h:85
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:395
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:74
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
Definition ValueTypes.h:121
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:284
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:300
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
Definition ValueTypes.h:147
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
bool is32BitVector() const
Return true if this is a 32-bit vector type.
Definition ValueTypes.h:197
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:323
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Definition ValueTypes.h:256
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:328
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
Definition ValueTypes.h:113
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:157
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:336
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:152
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
Definition KnownBits.h:246
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition KnownBits.h:232
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasAllowContract() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...