LLVM 22.0.0git
NVPTXISelLowering.cpp
Go to the documentation of this file.
1//===-- NVPTXISelLowering.cpp - NVPTX DAG Lowering Implementation ---------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that NVPTX uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "NVPTXISelLowering.h"
16#include "NVPTX.h"
17#include "NVPTXISelDAGToDAG.h"
19#include "NVPTXSubtarget.h"
20#include "NVPTXTargetMachine.h"
22#include "NVPTXUtilities.h"
23#include "llvm/ADT/APFloat.h"
24#include "llvm/ADT/APInt.h"
25#include "llvm/ADT/STLExtras.h"
27#include "llvm/ADT/StringRef.h"
40#include "llvm/IR/Argument.h"
41#include "llvm/IR/Attributes.h"
42#include "llvm/IR/Constants.h"
43#include "llvm/IR/DataLayout.h"
46#include "llvm/IR/FPEnv.h"
47#include "llvm/IR/Function.h"
48#include "llvm/IR/GlobalValue.h"
49#include "llvm/IR/IRBuilder.h"
50#include "llvm/IR/Instruction.h"
52#include "llvm/IR/IntrinsicsNVPTX.h"
53#include "llvm/IR/Module.h"
54#include "llvm/IR/Type.h"
55#include "llvm/IR/Value.h"
67#include <algorithm>
68#include <cassert>
69#include <cmath>
70#include <cstdint>
71#include <iterator>
72#include <optional>
73#include <string>
74#include <tuple>
75#include <utility>
76#include <vector>
77
78#define DEBUG_TYPE "nvptx-lower"
79
80using namespace llvm;
81
83 "nvptx-sched4reg",
84 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
85
87 "nvptx-fma-level", cl::Hidden,
88 cl::desc("NVPTX Specific: FMA contraction (0: don't do it"
89 " 1: do it 2: do it aggressively"),
90 cl::init(2));
91
93 "nvptx-prec-divf32", cl::Hidden,
95 "NVPTX Specific: Override the precision of the lowering for f32 fdiv"),
97 clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"),
98 clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"),
100 "Use IEEE Compliant F32 div.rnd if available (default)"),
102 "Use IEEE Compliant F32 div.rnd if available, no FTZ")),
104
106 "nvptx-prec-sqrtf32", cl::Hidden,
107 cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."),
108 cl::init(true));
109
110/// Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it
111/// does NOT use lg2.approx for log2, so this is disabled by default.
113 "nvptx-approx-log2f32",
114 cl::desc("NVPTX Specific: whether to use lg2.approx for log2"),
115 cl::init(false));
116
118 "nvptx-force-min-byval-param-align", cl::Hidden,
119 cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval"
120 " params of device functions."),
121 cl::init(false));
122
125 const SDNode &N) const {
126 // If nvptx-prec-div32=N is used on the command-line, always honor it
127 if (UsePrecDivF32.getNumOccurrences() > 0)
128 return UsePrecDivF32;
129
130 const SDNodeFlags Flags = N.getFlags();
131 if (Flags.hasApproximateFuncs())
133
135}
136
138 // If nvptx-prec-sqrtf32 is used on the command-line, always honor it
139 if (UsePrecSqrtF32.getNumOccurrences() > 0)
140 return UsePrecSqrtF32;
141
142 if (N) {
143 const SDNodeFlags Flags = N->getFlags();
144 if (Flags.hasApproximateFuncs())
145 return false;
146 }
147
148 return true;
149}
150
155
156static bool IsPTXVectorType(MVT VT) {
157 switch (VT.SimpleTy) {
158 default:
159 return false;
160 case MVT::v2i1:
161 case MVT::v4i1:
162 case MVT::v2i8:
163 case MVT::v4i8:
164 case MVT::v8i8: // <2 x i8x4>
165 case MVT::v16i8: // <4 x i8x4>
166 case MVT::v2i16:
167 case MVT::v4i16:
168 case MVT::v8i16: // <4 x i16x2>
169 case MVT::v2i32:
170 case MVT::v4i32:
171 case MVT::v2i64:
172 case MVT::v2f16:
173 case MVT::v4f16:
174 case MVT::v8f16: // <4 x f16x2>
175 case MVT::v2bf16:
176 case MVT::v4bf16:
177 case MVT::v8bf16: // <4 x bf16x2>
178 case MVT::v2f32:
179 case MVT::v4f32:
180 case MVT::v2f64:
181 case MVT::v4i64:
182 case MVT::v4f64:
183 case MVT::v8i32:
184 case MVT::v8f32:
185 case MVT::v16f16: // <8 x f16x2>
186 case MVT::v16bf16: // <8 x bf16x2>
187 case MVT::v16i16: // <8 x i16x2>
188 case MVT::v32i8: // <8 x i8x4>
189 return true;
190 }
191}
192
193// When legalizing vector loads/stores, this function is called, which does two
194// things:
195// 1. Determines Whether the vector is something we want to custom lower,
196// std::nullopt is returned if we do not want to custom lower it.
197// 2. If we do want to handle it, returns two parameters:
198// - unsigned int NumElts - The number of elements in the final vector
199// - EVT EltVT - The type of the elements in the final vector
200static std::optional<std::pair<unsigned int, MVT>>
202 unsigned AddressSpace) {
203 const bool CanLowerTo256Bit = STI.has256BitVectorLoadStore(AddressSpace);
204
205 if (CanLowerTo256Bit && VectorEVT.isScalarInteger() &&
206 VectorEVT.getSizeInBits() == 256)
207 return {{4, MVT::i64}};
208
209 if (!VectorEVT.isSimple())
210 return std::nullopt;
211 const MVT VectorVT = VectorEVT.getSimpleVT();
212
213 if (!VectorVT.isVector()) {
214 if (VectorVT == MVT::i128 || VectorVT == MVT::f128)
215 return {{2, MVT::i64}};
216 return std::nullopt;
217 }
218
219 const MVT EltVT = VectorVT.getVectorElementType();
220 const unsigned NumElts = VectorVT.getVectorNumElements();
221
222 // The size of the PTX virtual register that holds a packed type.
223 unsigned PackRegSize;
224
225 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
226 // legal. We can (and should) split that into 2 stores of <2 x double> here
227 // but I'm leaving that as a TODO for now.
228 switch (VectorVT.SimpleTy) {
229 default:
230 return std::nullopt;
231
232 case MVT::v4i64:
233 case MVT::v4f64:
234 // This is a "native" vector type iff the address space is global and the
235 // target supports 256-bit loads/stores
236 if (!CanLowerTo256Bit)
237 return std::nullopt;
238 [[fallthrough]];
239 case MVT::v2i8:
240 case MVT::v2i64:
241 case MVT::v2f64:
242 // This is a "native" vector type
243 return std::pair(NumElts, EltVT);
244
245 case MVT::v16f16: // <8 x f16x2>
246 case MVT::v16bf16: // <8 x bf16x2>
247 case MVT::v16i16: // <8 x i16x2>
248 case MVT::v32i8: // <8 x i8x4>
249 // This can be upsized into a "native" vector type iff the address space is
250 // global and the target supports 256-bit loads/stores.
251 if (!CanLowerTo256Bit)
252 return std::nullopt;
253 [[fallthrough]];
254 case MVT::v2i16: // <1 x i16x2>
255 case MVT::v2f16: // <1 x f16x2>
256 case MVT::v2bf16: // <1 x bf16x2>
257 case MVT::v4i8: // <1 x i8x4>
258 case MVT::v4i16: // <2 x i16x2>
259 case MVT::v4f16: // <2 x f16x2>
260 case MVT::v4bf16: // <2 x bf16x2>
261 case MVT::v8i8: // <2 x i8x4>
262 case MVT::v8f16: // <4 x f16x2>
263 case MVT::v8bf16: // <4 x bf16x2>
264 case MVT::v8i16: // <4 x i16x2>
265 case MVT::v16i8: // <4 x i8x4>
266 PackRegSize = 32;
267 break;
268
269 case MVT::v8f32: // <4 x f32x2>
270 case MVT::v8i32: // <4 x i32x2>
271 // This is a "native" vector type iff the address space is global and the
272 // target supports 256-bit loads/stores
273 if (!CanLowerTo256Bit)
274 return std::nullopt;
275 [[fallthrough]];
276 case MVT::v2f32: // <1 x f32x2>
277 case MVT::v4f32: // <2 x f32x2>
278 case MVT::v2i32: // <1 x i32x2>
279 case MVT::v4i32: // <2 x i32x2>
280 if (!STI.hasF32x2Instructions())
281 return std::pair(NumElts, EltVT);
282 PackRegSize = 64;
283 break;
284 }
285
286 // If we reach here, then we can pack 2 or more elements into a single 32-bit
287 // or 64-bit PTX register and treat the vector as a new vector containing
288 // packed elements.
289
290 // Number of elements to pack in one word.
291 const unsigned NPerReg = PackRegSize / EltVT.getSizeInBits();
292
293 return std::pair(NumElts / NPerReg, MVT::getVectorVT(EltVT, NPerReg));
294}
295
296/// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
297/// legal-ish MVTs that compose it. Unlike ComputeValueVTs, this will legalize
298/// the types as required by the calling convention (with special handling for
299/// i8s).
300/// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
301/// same number of types as the Ins/Outs arrays in LowerFormalArguments,
302/// LowerCall, and LowerReturn.
303static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
304 LLVMContext &Ctx, CallingConv::ID CallConv,
305 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
307 uint64_t StartingOffset = 0) {
308 SmallVector<EVT, 16> TempVTs;
309 SmallVector<uint64_t, 16> TempOffsets;
310 ComputeValueVTs(TLI, DL, Ty, TempVTs, /*MemVTs=*/nullptr, &TempOffsets,
311 StartingOffset);
312
313 for (const auto [VT, Off] : zip(TempVTs, TempOffsets)) {
314 MVT RegisterVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT);
315 unsigned NumRegs = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT);
316
317 // Since we actually can load/store b8, we need to ensure that we'll use
318 // the original sized type for any i8s or i8 vectors.
319 if (VT.getScalarType() == MVT::i8) {
320 if (RegisterVT == MVT::i16)
321 RegisterVT = MVT::i8;
322 else if (RegisterVT == MVT::v2i16)
323 RegisterVT = MVT::v2i8;
324 else
325 assert(RegisterVT == MVT::v4i8 &&
326 "Expected v4i8, v2i16, or i16 for i8 RegisterVT");
327 }
328
329 // TODO: This is horribly incorrect for cases where the vector elements are
330 // not a multiple of bytes (ex i1) and legal or i8. However, this problem
331 // has existed for as long as NVPTX has and no one has complained, so we'll
332 // leave it for now.
333 for (unsigned I : seq(NumRegs)) {
334 ValueVTs.push_back(RegisterVT);
335 Offsets.push_back(Off + I * RegisterVT.getStoreSize());
336 }
337 }
338}
339
340// We return an EVT that can hold N VTs
341// If the VT is a vector, the resulting EVT is a flat vector with the same
342// element type as VT's element type.
343static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C) {
344 if (N == 1)
345 return VT;
346
347 return VT.isVector() ? EVT::getVectorVT(C, VT.getScalarType(),
348 VT.getVectorNumElements() * N)
349 : EVT::getVectorVT(C, VT, N);
350}
351
353 const SDLoc &dl, SelectionDAG &DAG) {
354 if (V.getValueType() == VT) {
355 assert(I == 0 && "Index must be 0 for scalar value");
356 return V;
357 }
358
359 if (!VT.isVector())
360 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, V,
361 DAG.getVectorIdxConstant(I, dl));
362
363 return DAG.getNode(
364 ISD::EXTRACT_SUBVECTOR, dl, VT, V,
366}
367
368template <typename T>
369static inline SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl,
370 SelectionDAG &DAG, T GetElement) {
371 if (N == 1)
372 return GetElement(0);
373
375 for (const unsigned I : llvm::seq(N)) {
376 SDValue Val = GetElement(I);
377 if (Val.getValueType().isVector())
378 DAG.ExtractVectorElements(Val, Values);
379 else
380 Values.push_back(Val);
381 }
382
383 EVT VT = EVT::getVectorVT(*DAG.getContext(), Values[0].getValueType(),
384 Values.size());
385 return DAG.getBuildVector(VT, dl, Values);
386}
387
388/// PromoteScalarIntegerPTX
389/// Used to make sure the arguments/returns are suitable for passing
390/// and promote them to a larger size if they're not.
391///
392/// The promoted type is placed in \p PromoteVT if the function returns true.
394 if (VT.isScalarInteger()) {
395 switch (PowerOf2Ceil(VT.getFixedSizeInBits())) {
396 default:
398 "Promotion is not suitable for scalars of size larger than 64-bits");
399 case 1:
400 return MVT::i1;
401 case 2:
402 case 4:
403 case 8:
404 return MVT::i8;
405 case 16:
406 return MVT::i16;
407 case 32:
408 return MVT::i32;
409 case 64:
410 return MVT::i64;
411 }
412 }
413 return VT;
414}
415
416// Check whether we can merge loads/stores of some of the pieces of a
417// flattened function parameter or return value into a single vector
418// load/store.
419//
420// The flattened parameter is represented as a list of EVTs and
421// offsets, and the whole structure is aligned to ParamAlignment. This
422// function determines whether we can load/store pieces of the
423// parameter starting at index Idx using a single vectorized op of
424// size AccessSize. If so, it returns the number of param pieces
425// covered by the vector op. Otherwise, it returns 1.
426template <typename T>
428 unsigned Idx, uint32_t AccessSize, const SmallVectorImpl<EVT> &ValueVTs,
429 const SmallVectorImpl<T> &Offsets, Align ParamAlignment) {
430
431 // Can't vectorize if param alignment is not sufficient.
432 if (ParamAlignment < AccessSize)
433 return 1;
434 // Can't vectorize if offset is not aligned.
435 if (Offsets[Idx] & (AccessSize - 1))
436 return 1;
437
438 EVT EltVT = ValueVTs[Idx];
439 unsigned EltSize = EltVT.getStoreSize();
440
441 // Element is too large to vectorize.
442 if (EltSize >= AccessSize)
443 return 1;
444
445 unsigned NumElts = AccessSize / EltSize;
446 // Can't vectorize if AccessBytes if not a multiple of EltSize.
447 if (AccessSize != EltSize * NumElts)
448 return 1;
449
450 // We don't have enough elements to vectorize.
451 if (Idx + NumElts > ValueVTs.size())
452 return 1;
453
454 // PTX ISA can only deal with 2- and 4-element vector ops.
455 if (NumElts != 4 && NumElts != 2)
456 return 1;
457
458 for (unsigned j = Idx + 1; j < Idx + NumElts; ++j) {
459 // Types do not match.
460 if (ValueVTs[j] != EltVT)
461 return 1;
462
463 // Elements are not contiguous.
464 if (Offsets[j] - Offsets[j - 1] != EltSize)
465 return 1;
466 }
467 // OK. We can vectorize ValueVTs[i..i+NumElts)
468 return NumElts;
469}
470
471// Computes whether and how we can vectorize the loads/stores of a
472// flattened function parameter or return value.
473//
474// The flattened parameter is represented as the list of ValueVTs and
475// Offsets, and is aligned to ParamAlignment bytes. We return a vector
476// of the same size as ValueVTs indicating how each piece should be
477// loaded/stored (i.e. as a scalar, or as part of a vector
478// load/store).
479template <typename T>
482 const SmallVectorImpl<T> &Offsets, Align ParamAlignment,
483 bool IsVAArg = false) {
484 // Set vector size to match ValueVTs and mark all elements as
485 // scalars by default.
486
487 if (IsVAArg)
488 return SmallVector<unsigned>(ValueVTs.size(), 1);
489
490 SmallVector<unsigned, 16> VectorInfo;
491
492 const auto GetNumElts = [&](unsigned I) -> unsigned {
493 for (const unsigned AccessSize : {16, 8, 4, 2}) {
494 const unsigned NumElts = canMergeParamLoadStoresStartingAt(
495 I, AccessSize, ValueVTs, Offsets, ParamAlignment);
496 assert((NumElts == 1 || NumElts == 2 || NumElts == 4) &&
497 "Unexpected vectorization size");
498 if (NumElts != 1)
499 return NumElts;
500 }
501 return 1;
502 };
503
504 // Check what we can vectorize using 128/64/32-bit accesses.
505 for (unsigned I = 0, E = ValueVTs.size(); I != E;) {
506 const unsigned NumElts = GetNumElts(I);
507 VectorInfo.push_back(NumElts);
508 I += NumElts;
509 }
510 assert(std::accumulate(VectorInfo.begin(), VectorInfo.end(), 0u) ==
511 ValueVTs.size());
512 return VectorInfo;
513}
514
515// NVPTXTargetLowering Constructor.
517 const NVPTXSubtarget &STI)
518 : TargetLowering(TM, STI), nvTM(&TM), STI(STI), GlobalUniqueCallSite(0) {
519 // always lower memset, memcpy, and memmove intrinsics to load/store
520 // instructions, rather
521 // then generating calls to memset, mempcy or memmove.
525
528
529 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
530 // condition branches.
531 setJumpIsExpensive(true);
532
533 // Wide divides are _very_ slow. Try to reduce the width of the divide if
534 // possible.
535 addBypassSlowDiv(64, 32);
536
537 // By default, use the Source scheduling
538 if (sched4reg)
540 else
542
543 auto setFP16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
544 LegalizeAction NoF16Action) {
545 bool IsOpSupported = STI.allowFP16Math();
546 switch (Op) {
547 // Several FP16 instructions are available on sm_80 only.
548 case ISD::FMINNUM:
549 case ISD::FMAXNUM:
552 case ISD::FMAXIMUM:
553 case ISD::FMINIMUM:
554 case ISD::FMAXIMUMNUM:
555 case ISD::FMINIMUMNUM:
556 IsOpSupported &= STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
557 break;
558 case ISD::FEXP2:
559 IsOpSupported &= STI.getSmVersion() >= 75 && STI.getPTXVersion() >= 70;
560 break;
561 }
562 setOperationAction(Op, VT, IsOpSupported ? Action : NoF16Action);
563 };
564
565 auto setBF16OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
566 LegalizeAction NoBF16Action) {
567 bool IsOpSupported = STI.hasNativeBF16Support(Op);
569 Op, VT, IsOpSupported ? Action : NoBF16Action);
570 };
571
572 auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
573 LegalizeAction NoI16x2Action) {
574 bool IsOpSupported = false;
575 // instructions are available on sm_90 only
576 switch (Op) {
577 case ISD::ADD:
578 case ISD::SMAX:
579 case ISD::SMIN:
580 case ISD::UMIN:
581 case ISD::UMAX:
582 IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
583 break;
584 }
585 setOperationAction(Op, VT, IsOpSupported ? Action : NoI16x2Action);
586 };
587
588 addRegisterClass(MVT::i1, &NVPTX::B1RegClass);
589 addRegisterClass(MVT::i16, &NVPTX::B16RegClass);
590 addRegisterClass(MVT::v2i16, &NVPTX::B32RegClass);
591 addRegisterClass(MVT::v4i8, &NVPTX::B32RegClass);
592 addRegisterClass(MVT::i32, &NVPTX::B32RegClass);
593 addRegisterClass(MVT::i64, &NVPTX::B64RegClass);
594 addRegisterClass(MVT::f32, &NVPTX::B32RegClass);
595 addRegisterClass(MVT::f64, &NVPTX::B64RegClass);
596 addRegisterClass(MVT::f16, &NVPTX::B16RegClass);
597 addRegisterClass(MVT::v2f16, &NVPTX::B32RegClass);
598 addRegisterClass(MVT::bf16, &NVPTX::B16RegClass);
599 addRegisterClass(MVT::v2bf16, &NVPTX::B32RegClass);
600
601 if (STI.hasF32x2Instructions()) {
602 addRegisterClass(MVT::v2f32, &NVPTX::B64RegClass);
603 addRegisterClass(MVT::v2i32, &NVPTX::B64RegClass);
604 }
605
606 // Conversion to/from FP16/FP16x2 is always legal.
611
613 if (STI.getSmVersion() >= 30 && STI.getPTXVersion() > 31)
615
616 setFP16OperationAction(ISD::SETCC, MVT::f16, Legal, Promote);
617 setFP16OperationAction(ISD::SETCC, MVT::v2f16, Legal, Expand);
618
619 // Conversion to/from BFP16/BFP16x2 is always legal.
624
625 setBF16OperationAction(ISD::SETCC, MVT::v2bf16, Legal, Expand);
626 setBF16OperationAction(ISD::SETCC, MVT::bf16, Legal, Promote);
627 if (getOperationAction(ISD::SETCC, MVT::bf16) == Promote)
628 AddPromotedToType(ISD::SETCC, MVT::bf16, MVT::f32);
629
630 // Conversion to/from i16/i16x2 is always legal.
635
640
641 // No support for these operations with v2f32/v2i32
642 setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand);
643 setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand);
644
647 MVT::v2i32, Expand);
648
649 // Need custom lowering in case the index is dynamic.
650 if (STI.hasF32x2Instructions())
651 setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32},
652 Custom);
653
654 // Custom conversions to/from v2i8.
656
657 // Only logical ops can be done on v4i8/v2i32 directly, others must be done
658 // elementwise.
675 {MVT::v4i8, MVT::v2i32}, Expand);
676
677 // Operations not directly supported by NVPTX.
678 for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
679 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
680 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {
683 }
684
685 // We don't want ops like FMINIMUM or UMAX to be lowered to SETCC+VSELECT.
686 setOperationAction(ISD::VSELECT, {MVT::v2f32, MVT::v2i32}, Expand);
687
688 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
689 // For others we will expand to a SHL/SRA pair.
695 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i16, MVT::v2i32}, Expand);
696
703
706
708 {MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64},
709 Expand);
710
711 if (STI.hasHWROT32()) {
714 Custom);
715 }
716
717 setOperationAction(ISD::BR_JT, MVT::Other, STI.hasBrx() ? Legal : Expand);
719
720 // We want to legalize constant related memmove and memcopy
721 // intrinsics.
723
724 // FP extload/truncstore is not legal in PTX. We need to expand all these.
725 for (auto FloatVTs :
727 for (MVT ValVT : FloatVTs) {
728 for (MVT MemVT : FloatVTs) {
729 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Expand);
730 setTruncStoreAction(ValVT, MemVT, Expand);
731 }
732 }
733 }
734
735 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
736 // how they'll be lowered in ISel anyway, and by doing this a little earlier
737 // we allow for more DAG combine opportunities.
738 for (auto IntVTs :
740 for (MVT ValVT : IntVTs)
741 for (MVT MemVT : IntVTs)
742 if (isTypeLegal(ValVT))
743 setLoadExtAction(ISD::EXTLOAD, ValVT, MemVT, Custom);
744
745 // PTX does not support load / store predicate registers
747 for (MVT VT : MVT::integer_valuetypes()) {
749 Promote);
750 setTruncStoreAction(VT, MVT::i1, Expand);
751 }
752
753 // Disable generations of extload/truncstore for v2i32/v2i16/v2i8. The generic
754 // expansion for these nodes when they are unaligned is incorrect if the
755 // type is a vector.
756 //
757 // TODO: Fix the generic expansion for these nodes found in
758 // TargetLowering::expandUnalignedLoad/Store.
760 MVT::v2i8, Expand);
762 {MVT::v2i8, MVT::v2i16}, Expand);
763 setTruncStoreAction(MVT::v2i16, MVT::v2i8, Expand);
764 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
765 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Expand);
766
767 // Register custom handling for illegal type loads/stores. We'll try to custom
768 // lower almost all illegal types and logic in the lowering will discard cases
769 // we can't handle.
770 setOperationAction({ISD::LOAD, ISD::STORE}, {MVT::i128, MVT::i256, MVT::f128},
771 Custom);
773 if (!isTypeLegal(VT) && VT.getStoreSizeInBits() <= 256)
775 Custom);
776
777 // Custom legalization for LDU intrinsics.
778 // TODO: The logic to lower these is not very robust and we should rewrite it.
779 // Perhaps LDU should not be represented as an intrinsic at all.
782 if (IsPTXVectorType(VT))
784
788 MVT::i1, Expand);
789
790 // This is legal in NVPTX
795
796 setOperationAction(ISD::DYNAMIC_STACKALLOC, {MVT::i32, MVT::i64}, Custom);
798
799 // TRAP can be lowered to PTX trap
800 setOperationAction(ISD::TRAP, MVT::Other, Legal);
801 // DEBUGTRAP can be lowered to PTX brkpt
803
804 // Support varargs.
809
811 {MVT::i16, MVT::i32, MVT::i64}, Legal);
812
814 Promote);
817
818 setI16x2OperationAction(ISD::ABS, MVT::v2i16, Legal, Custom);
819 setI16x2OperationAction(ISD::SMIN, MVT::v2i16, Legal, Custom);
820 setI16x2OperationAction(ISD::SMAX, MVT::v2i16, Legal, Custom);
821 setI16x2OperationAction(ISD::UMIN, MVT::v2i16, Legal, Custom);
822 setI16x2OperationAction(ISD::UMAX, MVT::v2i16, Legal, Custom);
823 setI16x2OperationAction(ISD::CTPOP, MVT::v2i16, Legal, Expand);
824 setI16x2OperationAction(ISD::CTLZ, MVT::v2i16, Legal, Expand);
825
826 setI16x2OperationAction(ISD::ADD, MVT::v2i16, Legal, Custom);
827 setI16x2OperationAction(ISD::SUB, MVT::v2i16, Legal, Custom);
828 setI16x2OperationAction(ISD::MUL, MVT::v2i16, Legal, Custom);
829 setI16x2OperationAction(ISD::SHL, MVT::v2i16, Legal, Custom);
830 setI16x2OperationAction(ISD::SREM, MVT::v2i16, Legal, Custom);
831 setI16x2OperationAction(ISD::UREM, MVT::v2i16, Legal, Custom);
832
833 // Other arithmetic and logic ops are unsupported.
837 {MVT::v2i16, MVT::v2i32}, Expand);
838
839 // v2i32 is not supported for any arithmetic operations
844 MVT::v2i32, Expand);
845
850 if (STI.getPTXVersion() >= 43) {
855 }
856
858 setOperationAction(ISD::CTTZ, {MVT::v2i16, MVT::v2i32}, Expand);
861
862 // PTX does not directly support SELP of i1, so promote to i32 first
864
865 // PTX cannot multiply two i64s in a single instruction.
868
869 // We have some custom DAG combine patterns for these nodes
879
880 // setcc for f16x2 and bf16x2 needs special handling to prevent
881 // legalizer's attempt to scalarize it due to v2i1 not being legal.
882 if (STI.allowFP16Math() || STI.hasBF16Math())
884
885 // Vector reduction operations. These may be turned into shuffle or tree
886 // reductions depending on what instructions are available for each type.
888 MVT EltVT = VT.getVectorElementType();
889 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
892 VT, Custom);
893 }
894 }
895
896 // Promote fp16 arithmetic if fp16 hardware isn't available or the
897 // user passed --nvptx-no-fp16-math. The flag is useful because,
898 // although sm_53+ GPUs have some sort of FP16 support in
899 // hardware, only sm_53 and sm_60 have full implementation. Others
900 // only have token amount of hardware and are likely to run faster
901 // by using fp32 units instead.
902 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB, ISD::FMA}) {
903 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
904 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
905 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
906 // bf16 must be promoted to f32.
907 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
908 if (getOperationAction(Op, MVT::bf16) == Promote)
909 AddPromotedToType(Op, MVT::bf16, MVT::f32);
910 setOperationAction(Op, MVT::v2f32,
911 STI.hasF32x2Instructions() ? Legal : Expand);
912 }
913
914 // On SM80, we select add/mul/sub as fma to avoid promotion to float
915 for (const auto &Op : {ISD::FADD, ISD::FMUL, ISD::FSUB}) {
916 for (const auto &VT : {MVT::bf16, MVT::v2bf16}) {
917 if (!STI.hasNativeBF16Support(Op) && STI.hasNativeBF16Support(ISD::FMA)) {
919 }
920 }
921 }
922
923 // f16/f16x2 neg was introduced in PTX 60, SM_53.
924 const bool IsFP16FP16x2NegAvailable = STI.getSmVersion() >= 53 &&
925 STI.getPTXVersion() >= 60 &&
926 STI.allowFP16Math();
927 for (const auto &VT : {MVT::f16, MVT::v2f16})
929 IsFP16FP16x2NegAvailable ? Legal : Expand);
930
931 setBF16OperationAction(ISD::FNEG, MVT::bf16, Legal, Expand);
932 setBF16OperationAction(ISD::FNEG, MVT::v2bf16, Legal, Expand);
933 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
934 // (would be) Library functions.
935
936 // These map to conversion instructions for scalar FP types.
937 for (const auto &Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, ISD::FRINT,
939 setOperationAction(Op, MVT::f16, Legal);
940 setOperationAction(Op, MVT::f32, Legal);
941 setOperationAction(Op, MVT::f64, Legal);
942 setOperationAction(Op, MVT::v2f16, Expand);
943 setOperationAction(Op, MVT::v2bf16, Expand);
944 setOperationAction(Op, MVT::v2f32, Expand);
945 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
946 if (getOperationAction(Op, MVT::bf16) == Promote)
947 AddPromotedToType(Op, MVT::bf16, MVT::f32);
948 }
949
950 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71) {
952 }
953 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
954 for (MVT VT : {MVT::bf16, MVT::f32, MVT::f64}) {
957 }
958 }
959
960 // Expand v2f32 = fp_extend
962 // Expand v2[b]f16 = fp_round v2f32
963 setOperationAction(ISD::FP_ROUND, {MVT::v2bf16, MVT::v2f16}, Expand);
964
965 // sm_80 only has conversions between f32 and bf16. Custom lower all other
966 // bf16 conversions.
967 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
968 for (MVT VT : {MVT::i1, MVT::i16, MVT::i32, MVT::i64}) {
971 VT, Custom);
972 }
975 MVT::bf16, Custom);
976 }
977
984 AddPromotedToType(ISD::FROUND, MVT::bf16, MVT::f32);
985
986 // 'Expand' implements FCOPYSIGN without calling an external library.
993
994 // These map to corresponding instructions for f32/f64. f16 must be
995 // promoted to f32. v2f16 is expanded to f16, which is then promoted
996 // to f32.
997 for (const auto &Op :
999 setOperationAction(Op, MVT::f16, Promote);
1000 setOperationAction(Op, MVT::f32, Legal);
1001 // only div/rem/sqrt are legal for f64
1002 if (Op == ISD::FDIV || Op == ISD::FREM || Op == ISD::FSQRT) {
1003 setOperationAction(Op, MVT::f64, Legal);
1004 }
1005 setOperationAction(Op, {MVT::v2f16, MVT::v2bf16, MVT::v2f32}, Expand);
1006 setOperationAction(Op, MVT::bf16, Promote);
1007 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1008 }
1009 setOperationAction(ISD::FREM, {MVT::f32, MVT::f64}, Custom);
1010
1011 setOperationAction(ISD::FABS, {MVT::f32, MVT::f64}, Legal);
1012 setOperationAction(ISD::FABS, MVT::v2f32, Expand);
1013 if (STI.getPTXVersion() >= 65) {
1014 setFP16OperationAction(ISD::FABS, MVT::f16, Legal, Promote);
1015 setFP16OperationAction(ISD::FABS, MVT::v2f16, Legal, Expand);
1016 } else {
1018 setOperationAction(ISD::FABS, MVT::v2f16, Expand);
1019 }
1020 setBF16OperationAction(ISD::FABS, MVT::v2bf16, Legal, Expand);
1021 setBF16OperationAction(ISD::FABS, MVT::bf16, Legal, Promote);
1022 if (getOperationAction(ISD::FABS, MVT::bf16) == Promote)
1023 AddPromotedToType(ISD::FABS, MVT::bf16, MVT::f32);
1024
1025 for (const auto &Op :
1027 setOperationAction(Op, MVT::f32, Legal);
1028 setOperationAction(Op, MVT::f64, Legal);
1029 setFP16OperationAction(Op, MVT::f16, Legal, Promote);
1030 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1031 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1032 setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
1033 if (getOperationAction(Op, MVT::bf16) == Promote)
1034 AddPromotedToType(Op, MVT::bf16, MVT::f32);
1035 setOperationAction(Op, MVT::v2f32, Expand);
1036 }
1037 bool SupportsF32MinMaxNaN =
1038 STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
1039 for (const auto &Op : {ISD::FMINIMUM, ISD::FMAXIMUM}) {
1040 setOperationAction(Op, MVT::f32, SupportsF32MinMaxNaN ? Legal : Expand);
1041 setFP16OperationAction(Op, MVT::f16, Legal, Expand);
1042 setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
1043 setBF16OperationAction(Op, MVT::bf16, Legal, Expand);
1044 setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
1045 setOperationAction(Op, MVT::v2f32, Expand);
1046 }
1047
1048 // Custom lowering for inline asm with 128-bit operands
1051
1052 // FEXP2 support:
1053 // - f32
1054 // - f16/f16x2 (sm_70+, PTX 7.0+)
1055 // - bf16/bf16x2 (sm_90+, PTX 7.8+)
1056 // When f16/bf16 types aren't supported, they are promoted/expanded to f32.
1058 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
1059 setFP16OperationAction(ISD::FEXP2, MVT::f16, Legal, Promote);
1060 setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand);
1061 setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote);
1062 setBF16OperationAction(ISD::FEXP2, MVT::v2bf16, Legal, Expand);
1063
1064 // FLOG2 supports f32 only
1065 // f16/bf16 types aren't supported, but they are promoted/expanded to f32.
1066 if (UseApproxLog2F32) {
1068 setOperationPromotedToType(ISD::FLOG2, MVT::f16, MVT::f32);
1069 setOperationPromotedToType(ISD::FLOG2, MVT::bf16, MVT::f32);
1070 setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32},
1071 Expand);
1072 }
1073
1074 setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom);
1075
1076 setOperationAction(ISD::ATOMIC_LOAD_SUB, {MVT::i32, MVT::i64}, Expand);
1077
1078 // atom.b128 is legal in PTX but since we don't represent i128 as a legal
1079 // type, we need to custom lower it.
1081 Custom);
1082
1083 // Now deduce the information based on the above mentioned
1084 // actions
1085 computeRegisterProperties(STI.getRegisterInfo());
1086
1087 // PTX support for 16-bit CAS is emulated. Only use 32+
1088 setMinCmpXchgSizeInBits(STI.getMinCmpXchgSizeInBits());
1089 setMaxAtomicSizeInBitsSupported(STI.hasAtomSwap128() ? 128 : 64);
1091
1092 // Custom lowering for tcgen05.ld vector operands
1094 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1095 MVT::v32i32, MVT::v64i32, MVT::v128i32},
1096 Custom);
1097
1098 // Custom lowering for tcgen05.st vector operands
1100 {MVT::v2i32, MVT::v4i32, MVT::v8i32, MVT::v16i32,
1101 MVT::v32i32, MVT::v64i32, MVT::v128i32, MVT::Other},
1102 Custom);
1103
1104 // Enable custom lowering for the following:
1105 // * MVT::i128 - clusterlaunchcontrol
1106 // * MVT::i32 - prmt
1107 // * MVT::v4f32 - cvt_rs fp{4/6/8}x4 intrinsics
1108 // * MVT::Other - internal.addrspace.wrap
1110 {MVT::i32, MVT::i128, MVT::v4f32, MVT::Other}, Custom);
1111
1112 // Custom lowering for bswap
1113 setOperationAction(ISD::BSWAP, {MVT::i16, MVT::i32, MVT::i64, MVT::v2i16},
1114 Custom);
1115}
1116
1119 if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
1120 VT.getScalarType() == MVT::i1)
1121 return TypeSplitVector;
1123}
1124
1126 int Enabled, int &ExtraSteps,
1127 bool &UseOneConst,
1128 bool Reciprocal) const {
1131 return SDValue();
1132
1133 if (ExtraSteps == ReciprocalEstimate::Unspecified)
1134 ExtraSteps = 0;
1135
1136 SDLoc DL(Operand);
1137 EVT VT = Operand.getValueType();
1138 bool Ftz = useF32FTZ(DAG.getMachineFunction());
1139
1140 auto MakeIntrinsicCall = [&](Intrinsic::ID IID) {
1141 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
1142 DAG.getConstant(IID, DL, MVT::i32), Operand);
1143 };
1144
1145 // The sqrt and rsqrt refinement processes assume we always start out with an
1146 // approximation of the rsqrt. Therefore, if we're going to do any refinement
1147 // (i.e. ExtraSteps > 0), we must return an rsqrt. But if we're *not* doing
1148 // any refinement, we must return a regular sqrt.
1149 if (Reciprocal || ExtraSteps > 0) {
1150 if (VT == MVT::f32)
1151 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_rsqrt_approx_ftz_f
1152 : Intrinsic::nvvm_rsqrt_approx_f);
1153 else if (VT == MVT::f64)
1154 return MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d);
1155 else
1156 return SDValue();
1157 } else {
1158 if (VT == MVT::f32)
1159 return MakeIntrinsicCall(Ftz ? Intrinsic::nvvm_sqrt_approx_ftz_f
1160 : Intrinsic::nvvm_sqrt_approx_f);
1161 else {
1162 // There's no sqrt.approx.f64 instruction, so we emit
1163 // reciprocal(rsqrt(x)). This is faster than
1164 // select(x == 0, 0, x * rsqrt(x)). (In fact, it's faster than plain
1165 // x * rsqrt(x).)
1166 return DAG.getNode(
1168 DAG.getConstant(Intrinsic::nvvm_rcp_approx_ftz_d, DL, MVT::i32),
1169 MakeIntrinsicCall(Intrinsic::nvvm_rsqrt_approx_d));
1170 }
1171 }
1172}
1173
1175 const DataLayout &DL, Type *RetTy, const ArgListTy &Args,
1177 std::optional<unsigned> FirstVAArg, const CallBase &CB,
1178 unsigned UniqueCallSite) const {
1179 auto PtrVT = getPointerTy(DL);
1180
1181 std::string Prototype;
1182 raw_string_ostream O(Prototype);
1183 O << "prototype_" << UniqueCallSite << " : .callprototype ";
1184
1185 if (RetTy->isVoidTy()) {
1186 O << "()";
1187 } else {
1188 O << "(";
1189 if (shouldPassAsArray(RetTy)) {
1190 const Align RetAlign = getArgumentAlignment(&CB, RetTy, 0, DL);
1191 O << ".param .align " << RetAlign.value() << " .b8 _["
1192 << DL.getTypeAllocSize(RetTy) << "]";
1193 } else if (RetTy->isFloatingPointTy() || RetTy->isIntegerTy()) {
1194 unsigned size = 0;
1195 if (auto *ITy = dyn_cast<IntegerType>(RetTy)) {
1196 size = ITy->getBitWidth();
1197 } else {
1198 assert(RetTy->isFloatingPointTy() &&
1199 "Floating point type expected here");
1200 size = RetTy->getPrimitiveSizeInBits();
1201 }
1202 // PTX ABI requires all scalar return values to be at least 32
1203 // bits in size. fp16 normally uses .b16 as its storage type in
1204 // PTX, so its size must be adjusted here, too.
1206
1207 O << ".param .b" << size << " _";
1208 } else if (isa<PointerType>(RetTy)) {
1209 O << ".param .b" << PtrVT.getSizeInBits() << " _";
1210 } else {
1211 llvm_unreachable("Unknown return type");
1212 }
1213 O << ") ";
1214 }
1215 O << "_ (";
1216
1217 bool first = true;
1218
1219 const unsigned NumArgs = FirstVAArg.value_or(Args.size());
1220 auto AllOuts = ArrayRef(Outs);
1221 for (const unsigned I : llvm::seq(NumArgs)) {
1222 const auto ArgOuts =
1223 AllOuts.take_while([I](auto O) { return O.OrigArgIndex == I; });
1224 AllOuts = AllOuts.drop_front(ArgOuts.size());
1225
1226 Type *Ty = Args[I].Ty;
1227 if (!first) {
1228 O << ", ";
1229 }
1230 first = false;
1231
1232 if (ArgOuts[0].Flags.isByVal()) {
1233 // Indirect calls need strict ABI alignment so we disable optimizations by
1234 // not providing a function to optimize.
1235 Type *ETy = Args[I].IndirectType;
1236 Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1237 Align ParamByValAlign =
1238 getFunctionByValParamAlign(/*F=*/nullptr, ETy, InitialAlign, DL);
1239
1240 O << ".param .align " << ParamByValAlign.value() << " .b8 _["
1241 << ArgOuts[0].Flags.getByValSize() << "]";
1242 } else {
1243 if (shouldPassAsArray(Ty)) {
1244 Align ParamAlign =
1245 getArgumentAlignment(&CB, Ty, I + AttributeList::FirstArgIndex, DL);
1246 O << ".param .align " << ParamAlign.value() << " .b8 _["
1247 << DL.getTypeAllocSize(Ty) << "]";
1248 continue;
1249 }
1250 // i8 types in IR will be i16 types in SDAG
1251 assert((getValueType(DL, Ty) == ArgOuts[0].VT ||
1252 (getValueType(DL, Ty) == MVT::i8 && ArgOuts[0].VT == MVT::i16)) &&
1253 "type mismatch between callee prototype and arguments");
1254 // scalar type
1255 unsigned sz = 0;
1256 if (auto *ITy = dyn_cast<IntegerType>(Ty)) {
1257 sz = promoteScalarArgumentSize(ITy->getBitWidth());
1258 } else if (isa<PointerType>(Ty)) {
1259 sz = PtrVT.getSizeInBits();
1260 } else {
1261 sz = Ty->getPrimitiveSizeInBits();
1262 }
1263 O << ".param .b" << sz << " _";
1264 }
1265 }
1266
1267 if (FirstVAArg)
1268 O << (first ? "" : ",") << " .param .align "
1269 << STI.getMaxRequiredAlignment() << " .b8 _[]";
1270 O << ")";
1271 if (shouldEmitPTXNoReturn(&CB, *nvTM))
1272 O << " .noreturn";
1273 O << ";";
1274
1275 return Prototype;
1276}
1277
1279 const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const {
1280 return getAlign(*F, Idx).value_or(getFunctionParamOptimizedAlign(F, Ty, DL));
1281}
1282
1283Align NVPTXTargetLowering::getArgumentAlignment(const CallBase *CB, Type *Ty,
1284 unsigned Idx,
1285 const DataLayout &DL) const {
1286 if (!CB) {
1287 // CallSite is zero, fallback to ABI type alignment
1288 return DL.getABITypeAlign(Ty);
1289 }
1290
1291 const Function *DirectCallee = CB->getCalledFunction();
1292
1293 if (!DirectCallee) {
1294 // We don't have a direct function symbol, but that may be because of
1295 // constant cast instructions in the call.
1296
1297 // With bitcast'd call targets, the instruction will be the call
1298 if (const auto *CI = dyn_cast<CallInst>(CB)) {
1299 // Check if we have call alignment metadata
1300 if (MaybeAlign StackAlign = getAlign(*CI, Idx))
1301 return StackAlign.value();
1302 }
1303 DirectCallee = getMaybeBitcastedCallee(CB);
1304 }
1305
1306 // Check for function alignment information if we found that the
1307 // ultimate target is a Function
1308 if (DirectCallee)
1309 return getFunctionArgumentAlignment(DirectCallee, Ty, Idx, DL);
1310
1311 // Call is indirect, fall back to the ABI type alignment
1312 return DL.getABITypeAlign(Ty);
1313}
1314
1316 const GlobalAddressSDNode *Func) {
1317 if (!Func)
1318 return false;
1319 if (auto *CalleeFunc = dyn_cast<Function>(Func->getGlobal()))
1320 return CB->getFunctionType() != CalleeFunc->getFunctionType();
1321 return false;
1322}
1323
1325 const DataLayout &DL,
1326 const TargetLowering &TL) {
1327 if (Ptr->getOpcode() == ISD::FrameIndex) {
1328 auto Ty = TL.getPointerTy(DL, ADDRESS_SPACE_LOCAL);
1329 Ptr = DAG.getAddrSpaceCast(SDLoc(), Ty, Ptr, ADDRESS_SPACE_GENERIC,
1331
1333 }
1334
1335 // Peel of an addrspacecast to generic and load directly from the specific
1336 // address space.
1337 if (Ptr->getOpcode() == ISD::ADDRSPACECAST) {
1338 const auto *ASC = cast<AddrSpaceCastSDNode>(Ptr);
1339 if (ASC->getDestAddressSpace() == ADDRESS_SPACE_GENERIC) {
1340 Ptr = ASC->getOperand(0);
1341 return MachinePointerInfo(ASC->getSrcAddressSpace());
1342 }
1343 }
1344
1345 return MachinePointerInfo();
1346}
1347
1349 if (Flags.isSExt())
1350 return ISD::SIGN_EXTEND;
1351 if (Flags.isZExt())
1352 return ISD::ZERO_EXTEND;
1353 return ISD::ANY_EXTEND;
1354}
1355
1357 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
1358 SDLoc dl) {
1359 const EVT ActualVT = V.getValueType();
1360 assert((ActualVT == ExpectedVT ||
1361 (ExpectedVT.isInteger() && ActualVT.isInteger())) &&
1362 "Non-integer argument type size mismatch");
1363 if (ExpectedVT.bitsGT(ActualVT))
1364 return DAG.getNode(getExtOpcode(Flags), dl, ExpectedVT, V);
1365 if (ExpectedVT.bitsLT(ActualVT))
1366 return DAG.getNode(ISD::TRUNCATE, dl, ExpectedVT, V);
1367
1368 return V;
1369}
1370
1372 SmallVectorImpl<SDValue> &InVals) const {
1373
1374 if (CLI.IsVarArg && (STI.getPTXVersion() < 60 || STI.getSmVersion() < 30))
1376 "Support for variadic functions (unsized array parameter) introduced "
1377 "in PTX ISA version 6.0 and requires target sm_30.");
1378
1379 SelectionDAG &DAG = CLI.DAG;
1380 SDLoc dl = CLI.DL;
1381 const SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1382 SDValue Callee = CLI.Callee;
1383 ArgListTy &Args = CLI.getArgs();
1384 Type *RetTy = CLI.RetTy;
1385 const CallBase *CB = CLI.CB;
1386 const DataLayout &DL = DAG.getDataLayout();
1387 LLVMContext &Ctx = *DAG.getContext();
1388
1389 const auto GetI32 = [&](const unsigned I) {
1390 return DAG.getConstant(I, dl, MVT::i32);
1391 };
1392
1393 const unsigned UniqueCallSite = GlobalUniqueCallSite++;
1394 const SDValue CallChain = CLI.Chain;
1395 const SDValue StartChain =
1396 DAG.getCALLSEQ_START(CallChain, UniqueCallSite, 0, dl);
1397 SDValue DeclareGlue = StartChain.getValue(1);
1398
1399 SmallVector<SDValue, 16> CallPrereqs{StartChain};
1400
1401 const auto MakeDeclareScalarParam = [&](SDValue Symbol, unsigned Size) {
1402 // PTX ABI requires integral types to be at least 32 bits in size. FP16 is
1403 // loaded/stored using i16, so it's handled here as well.
1404 const unsigned SizeBits = promoteScalarArgumentSize(Size * 8);
1405 SDValue Declare =
1406 DAG.getNode(NVPTXISD::DeclareScalarParam, dl, {MVT::Other, MVT::Glue},
1407 {StartChain, Symbol, GetI32(SizeBits), DeclareGlue});
1408 CallPrereqs.push_back(Declare);
1409 DeclareGlue = Declare.getValue(1);
1410 return Declare;
1411 };
1412
1413 const auto MakeDeclareArrayParam = [&](SDValue Symbol, Align Align,
1414 unsigned Size) {
1415 SDValue Declare = DAG.getNode(
1416 NVPTXISD::DeclareArrayParam, dl, {MVT::Other, MVT::Glue},
1417 {StartChain, Symbol, GetI32(Align.value()), GetI32(Size), DeclareGlue});
1418 CallPrereqs.push_back(Declare);
1419 DeclareGlue = Declare.getValue(1);
1420 return Declare;
1421 };
1422
1423 // Variadic arguments.
1424 //
1425 // Normally, for each argument, we declare a param scalar or a param
1426 // byte array in the .param space, and store the argument value to that
1427 // param scalar or array starting at offset 0.
1428 //
1429 // In the case of the first variadic argument, we declare a vararg byte array
1430 // with size 0. The exact size of this array isn't known at this point, so
1431 // it'll be patched later. All the variadic arguments will be stored to this
1432 // array at a certain offset (which gets tracked by 'VAOffset'). The offset is
1433 // initially set to 0, so it can be used for non-variadic arguments (which use
1434 // 0 offset) to simplify the code.
1435 //
1436 // After all vararg is processed, 'VAOffset' holds the size of the
1437 // vararg byte array.
1438 assert((CLI.IsVarArg || CLI.Args.size() == CLI.NumFixedArgs) &&
1439 "Non-VarArg function with extra arguments");
1440
1441 const unsigned FirstVAArg = CLI.NumFixedArgs; // position of first variadic
1442 unsigned VAOffset = 0; // current offset in the param array
1443
1444 const SDValue VADeclareParam =
1445 CLI.Args.size() > FirstVAArg
1446 ? MakeDeclareArrayParam(getCallParamSymbol(DAG, FirstVAArg, MVT::i32),
1447 Align(STI.getMaxRequiredAlignment()), 0)
1448 : SDValue();
1449
1450 // Args.size() and Outs.size() need not match.
1451 // Outs.size() will be larger
1452 // * if there is an aggregate argument with multiple fields (each field
1453 // showing up separately in Outs)
1454 // * if there is a vector argument with more than typical vector-length
1455 // elements (generally if more than 4) where each vector element is
1456 // individually present in Outs.
1457 // So a different index should be used for indexing into Outs/OutVals.
1458 // See similar issue in LowerFormalArguments.
1459 auto AllOuts = ArrayRef(CLI.Outs);
1460 auto AllOutVals = ArrayRef(CLI.OutVals);
1461 assert(AllOuts.size() == AllOutVals.size() &&
1462 "Outs and OutVals must be the same size");
1463 // Declare the .params or .reg need to pass values
1464 // to the function
1465 for (const auto E : llvm::enumerate(Args)) {
1466 const auto ArgI = E.index();
1467 const auto Arg = E.value();
1468 const auto ArgOuts =
1469 AllOuts.take_while([&](auto O) { return O.OrigArgIndex == ArgI; });
1470 const auto ArgOutVals = AllOutVals.take_front(ArgOuts.size());
1471 AllOuts = AllOuts.drop_front(ArgOuts.size());
1472 AllOutVals = AllOutVals.drop_front(ArgOuts.size());
1473
1474 const bool IsVAArg = (ArgI >= FirstVAArg);
1475 const bool IsByVal = Arg.IsByVal;
1476
1477 const SDValue ParamSymbol =
1478 getCallParamSymbol(DAG, IsVAArg ? FirstVAArg : ArgI, MVT::i32);
1479
1480 assert((!IsByVal || Arg.IndirectType) &&
1481 "byval arg must have indirect type");
1482 Type *ETy = (IsByVal ? Arg.IndirectType : Arg.Ty);
1483
1484 const Align ArgAlign = [&]() {
1485 if (IsByVal) {
1486 // The ByValAlign in the Outs[OIdx].Flags is always set at this point,
1487 // so we don't need to worry whether it's naturally aligned or not.
1488 // See TargetLowering::LowerCallTo().
1489 const Align InitialAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1491 InitialAlign, DL);
1492 }
1493 return getArgumentAlignment(CB, Arg.Ty, ArgI + 1, DL);
1494 }();
1495
1496 const unsigned TySize = DL.getTypeAllocSize(ETy);
1497 assert((!IsByVal || TySize == ArgOuts[0].Flags.getByValSize()) &&
1498 "type size mismatch");
1499
1500 const SDValue ArgDeclare = [&]() {
1501 if (IsVAArg)
1502 return VADeclareParam;
1503
1504 if (IsByVal || shouldPassAsArray(Arg.Ty))
1505 return MakeDeclareArrayParam(ParamSymbol, ArgAlign, TySize);
1506
1507 assert(ArgOuts.size() == 1 && "We must pass only one value as non-array");
1508 assert((ArgOuts[0].VT.isInteger() || ArgOuts[0].VT.isFloatingPoint()) &&
1509 "Only int and float types are supported as non-array arguments");
1510
1511 return MakeDeclareScalarParam(ParamSymbol, TySize);
1512 }();
1513
1514 if (IsByVal) {
1515 assert(ArgOutVals.size() == 1 && "We must pass only one value as byval");
1516 SDValue SrcPtr = ArgOutVals[0];
1517 const auto PointerInfo = refinePtrAS(SrcPtr, DAG, DL, *this);
1518 const Align BaseSrcAlign = ArgOuts[0].Flags.getNonZeroByValAlign();
1519
1520 if (IsVAArg)
1521 VAOffset = alignTo(VAOffset, ArgAlign);
1522
1523 SmallVector<EVT, 4> ValueVTs, MemVTs;
1525 ComputeValueVTs(*this, DL, ETy, ValueVTs, &MemVTs, &Offsets);
1526
1527 unsigned J = 0;
1528 const auto VI = VectorizePTXValueVTs(MemVTs, Offsets, ArgAlign, IsVAArg);
1529 for (const unsigned NumElts : VI) {
1530 EVT LoadVT = getVectorizedVT(MemVTs[J], NumElts, Ctx);
1531 Align SrcAlign = commonAlignment(BaseSrcAlign, Offsets[J]);
1532 SDValue SrcAddr = DAG.getObjectPtrOffset(dl, SrcPtr, Offsets[J]);
1533 SDValue SrcLoad =
1534 DAG.getLoad(LoadVT, dl, CallChain, SrcAddr, PointerInfo, SrcAlign);
1535
1536 TypeSize ParamOffset = Offsets[J].getWithIncrement(VAOffset);
1537 Align ParamAlign = commonAlignment(ArgAlign, ParamOffset);
1538 SDValue ParamAddr =
1539 DAG.getObjectPtrOffset(dl, ParamSymbol, ParamOffset);
1540 SDValue StoreParam =
1541 DAG.getStore(ArgDeclare, dl, SrcLoad, ParamAddr,
1543 CallPrereqs.push_back(StoreParam);
1544
1545 J += NumElts;
1546 }
1547 if (IsVAArg)
1548 VAOffset += TySize;
1549 } else {
1552 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, Arg.Ty, VTs, Offsets,
1553 VAOffset);
1554 assert(VTs.size() == Offsets.size() && "Size mismatch");
1555 assert(VTs.size() == ArgOuts.size() && "Size mismatch");
1556
1557 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter
1558 // than 32-bits are sign extended or zero extended, depending on
1559 // whether they are signed or unsigned types. This case applies
1560 // only to scalar parameters and not to aggregate values.
1561 const bool ExtendIntegerParam =
1562 Arg.Ty->isIntegerTy() && DL.getTypeAllocSizeInBits(Arg.Ty) < 32;
1563
1564 const auto GetStoredValue = [&](const unsigned I) {
1565 SDValue StVal = ArgOutVals[I];
1567 StVal.getValueType() &&
1568 "OutVal type should always be legal");
1569
1570 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1571 const EVT StoreVT =
1572 ExtendIntegerParam ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1573
1574 return correctParamType(StVal, StoreVT, ArgOuts[I].Flags, DAG, dl);
1575 };
1576
1577 unsigned J = 0;
1578 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign, IsVAArg);
1579 for (const unsigned NumElts : VI) {
1580 const EVT EltVT = promoteScalarIntegerPTX(VTs[J]);
1581
1582 unsigned Offset;
1583 if (IsVAArg) {
1584 // TODO: We may need to support vector types that can be passed
1585 // as scalars in variadic arguments.
1586 assert(NumElts == 1 &&
1587 "Vectorization should be disabled for vaargs.");
1588
1589 // Align each part of the variadic argument to their type.
1590 VAOffset = alignTo(VAOffset, DAG.getEVTAlign(EltVT));
1591 Offset = VAOffset;
1592
1593 const EVT TheStoreType = ExtendIntegerParam ? MVT::i32 : EltVT;
1594 VAOffset += DL.getTypeAllocSize(TheStoreType.getTypeForEVT(Ctx));
1595 } else {
1596 assert(VAOffset == 0 && "VAOffset must be 0 for non-VA args");
1597 Offset = Offsets[J];
1598 }
1599
1600 SDValue Ptr =
1601 DAG.getObjectPtrOffset(dl, ParamSymbol, TypeSize::getFixed(Offset));
1602
1603 const MaybeAlign CurrentAlign = ExtendIntegerParam
1604 ? MaybeAlign(std::nullopt)
1605 : commonAlignment(ArgAlign, Offset);
1606
1607 SDValue Val =
1608 getBuildVectorizedValue(NumElts, dl, DAG, [&](unsigned K) {
1609 return GetStoredValue(J + K);
1610 });
1611
1612 SDValue StoreParam =
1613 DAG.getStore(ArgDeclare, dl, Val, Ptr,
1615 CallPrereqs.push_back(StoreParam);
1616
1617 J += NumElts;
1618 }
1619 }
1620 }
1621
1622 // Handle Result
1623 if (!Ins.empty()) {
1624 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1625 const unsigned ResultSize = DL.getTypeAllocSize(RetTy);
1626 if (shouldPassAsArray(RetTy)) {
1627 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1628 MakeDeclareArrayParam(RetSymbol, RetAlign, ResultSize);
1629 } else {
1630 MakeDeclareScalarParam(RetSymbol, ResultSize);
1631 }
1632 }
1633
1634 // Set the size of the vararg param byte array if the callee is a variadic
1635 // function and the variadic part is not empty.
1636 if (VADeclareParam) {
1637 SDValue DeclareParamOps[] = {VADeclareParam.getOperand(0),
1638 VADeclareParam.getOperand(1),
1639 VADeclareParam.getOperand(2), GetI32(VAOffset),
1640 VADeclareParam.getOperand(4)};
1641 DAG.MorphNodeTo(VADeclareParam.getNode(), VADeclareParam.getOpcode(),
1642 VADeclareParam->getVTList(), DeclareParamOps);
1643 }
1644
1645 const auto *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
1646 // If the type of the callsite does not match that of the function, convert
1647 // the callsite to an indirect call.
1648 const bool ConvertToIndirectCall = shouldConvertToIndirectCall(CB, Func);
1649
1650 // Both indirect calls and libcalls have nullptr Func. In order to distinguish
1651 // between them we must rely on the call site value which is valid for
1652 // indirect calls but is always null for libcalls.
1653 const bool IsIndirectCall = (!Func && CB) || ConvertToIndirectCall;
1654
1655 if (isa<ExternalSymbolSDNode>(Callee)) {
1656 Function* CalleeFunc = nullptr;
1657
1658 // Try to find the callee in the current module.
1659 Callee = DAG.getSymbolFunctionGlobalAddress(Callee, &CalleeFunc);
1660 assert(CalleeFunc != nullptr && "Libcall callee must be set.");
1661
1662 // Set the "libcall callee" attribute to indicate that the function
1663 // must always have a declaration.
1664 CalleeFunc->addFnAttr("nvptx-libcall-callee", "true");
1665 }
1666
1667 if (IsIndirectCall) {
1668 // This is indirect function call case : PTX requires a prototype of the
1669 // form
1670 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
1671 // to be emitted, and the label has to used as the last arg of call
1672 // instruction.
1673 // The prototype is embedded in a string and put as the operand for a
1674 // CallPrototype SDNode which will print out to the value of the string.
1675 const bool HasVAArgs = CLI.IsVarArg && (CLI.Args.size() > CLI.NumFixedArgs);
1676 std::string Proto =
1677 getPrototype(DL, RetTy, Args, CLI.Outs,
1678 HasVAArgs ? std::optional(FirstVAArg) : std::nullopt, *CB,
1679 UniqueCallSite);
1680 const char *ProtoStr = nvTM->getStrPool().save(Proto).data();
1681 const SDValue PrototypeDeclare = DAG.getNode(
1682 NVPTXISD::CallPrototype, dl, MVT::Other,
1683 {StartChain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32)});
1684 CallPrereqs.push_back(PrototypeDeclare);
1685 }
1686
1687 const unsigned Proto = IsIndirectCall ? UniqueCallSite : 0;
1688 const unsigned NumArgs =
1689 std::min<unsigned>(CLI.NumFixedArgs + 1, Args.size());
1690 /// CALL(Chain, IsConvergent, IsIndirectCall/IsUniform, NumReturns,
1691 /// NumParams, Callee, Proto)
1692 const SDValue CallToken = DAG.getTokenFactor(dl, CallPrereqs);
1693 const SDValue Call = DAG.getNode(
1694 NVPTXISD::CALL, dl, MVT::Other,
1695 {CallToken, GetI32(CLI.IsConvergent), GetI32(IsIndirectCall),
1696 GetI32(Ins.empty() ? 0 : 1), GetI32(NumArgs), Callee, GetI32(Proto)});
1697
1698 SmallVector<SDValue, 16> LoadChains{Call};
1699 SmallVector<SDValue, 16> ProxyRegOps;
1700 if (!Ins.empty()) {
1703 ComputePTXValueVTs(*this, DL, Ctx, CLI.CallConv, RetTy, VTs, Offsets);
1704 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1705
1706 const Align RetAlign = getArgumentAlignment(CB, RetTy, 0, DL);
1707 const SDValue RetSymbol = DAG.getExternalSymbol("retval0", MVT::i32);
1708
1709 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
1710 // 32-bits are sign extended or zero extended, depending on whether
1711 // they are signed or unsigned types.
1712 const bool ExtendIntegerRetVal =
1713 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
1714
1715 unsigned I = 0;
1716 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
1717 for (const unsigned NumElts : VI) {
1718 const MaybeAlign CurrentAlign =
1719 ExtendIntegerRetVal ? MaybeAlign(std::nullopt)
1720 : commonAlignment(RetAlign, Offsets[I]);
1721
1722 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
1723 const EVT LoadVT =
1724 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
1725 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
1726 SDValue Ptr =
1727 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
1728
1729 SDValue R =
1730 DAG.getLoad(VecVT, dl, Call, Ptr,
1732
1733 LoadChains.push_back(R.getValue(1));
1734 for (const unsigned J : llvm::seq(NumElts))
1735 ProxyRegOps.push_back(getExtractVectorizedValue(R, J, LoadVT, dl, DAG));
1736 I += NumElts;
1737 }
1738 }
1739
1740 const SDValue EndToken = DAG.getTokenFactor(dl, LoadChains);
1741 const SDValue CallEnd = DAG.getCALLSEQ_END(EndToken, UniqueCallSite,
1742 UniqueCallSite + 1, SDValue(), dl);
1743
1744 // Append ProxyReg instructions to the chain to make sure that `callseq_end`
1745 // will not get lost. Otherwise, during libcalls expansion, the nodes can become
1746 // dangling.
1747 for (const auto [I, Reg] : llvm::enumerate(ProxyRegOps)) {
1748 SDValue Proxy =
1749 DAG.getNode(NVPTXISD::ProxyReg, dl, Reg.getValueType(), {CallEnd, Reg});
1750 SDValue Ret = correctParamType(Proxy, Ins[I].VT, Ins[I].Flags, DAG, dl);
1751 InVals.push_back(Ret);
1752 }
1753
1754 // set IsTailCall to false for now, until we figure out how to express
1755 // tail call optimization in PTX
1756 CLI.IsTailCall = false;
1757 return CallEnd;
1758}
1759
1761 SelectionDAG &DAG) const {
1762
1763 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1764 const Function &Fn = DAG.getMachineFunction().getFunction();
1765
1767 Fn,
1768 "Support for dynamic alloca introduced in PTX ISA version 7.3 and "
1769 "requires target sm_52.",
1770 SDLoc(Op).getDebugLoc()));
1771 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()),
1772 Op.getOperand(0)};
1773 return DAG.getMergeValues(Ops, SDLoc());
1774 }
1775
1776 SDLoc DL(Op.getNode());
1777 SDValue Chain = Op.getOperand(0);
1778 SDValue Size = Op.getOperand(1);
1779 uint64_t Align = Op.getConstantOperandVal(2);
1780
1781 // The alignment on a ISD::DYNAMIC_STACKALLOC node may be 0 to indicate that
1782 // the default stack alignment should be used.
1783 if (Align == 0)
1785
1786 // The size for ptx alloca instruction is 64-bit for m64 and 32-bit for m32.
1787 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1788
1789 SDValue Alloc =
1790 DAG.getNode(NVPTXISD::DYNAMIC_STACKALLOC, DL, {LocalVT, MVT::Other},
1791 {Chain, DAG.getZExtOrTrunc(Size, DL, LocalVT),
1792 DAG.getTargetConstant(Align, DL, MVT::i32)});
1793
1794 SDValue ASC = DAG.getAddrSpaceCast(
1796
1797 return DAG.getMergeValues({ASC, SDValue(Alloc.getNode(), 1)}, DL);
1798}
1799
1801 SelectionDAG &DAG) const {
1802 SDLoc DL(Op.getNode());
1803 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1804 const Function &Fn = DAG.getMachineFunction().getFunction();
1805
1807 Fn,
1808 "Support for stackrestore requires PTX ISA version >= 7.3 and target "
1809 ">= sm_52.",
1810 DL.getDebugLoc()));
1811 return Op.getOperand(0);
1812 }
1813
1814 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1815 SDValue Chain = Op.getOperand(0);
1816 SDValue Ptr = Op.getOperand(1);
1817 SDValue ASC = DAG.getAddrSpaceCast(DL, LocalVT, Ptr, ADDRESS_SPACE_GENERIC,
1819 return DAG.getNode(NVPTXISD::STACKRESTORE, DL, MVT::Other, {Chain, ASC});
1820}
1821
1823 SelectionDAG &DAG) const {
1824 SDLoc DL(Op.getNode());
1825 if (STI.getPTXVersion() < 73 || STI.getSmVersion() < 52) {
1826 const Function &Fn = DAG.getMachineFunction().getFunction();
1827
1829 Fn,
1830 "Support for stacksave requires PTX ISA version >= 7.3 and target >= "
1831 "sm_52.",
1832 DL.getDebugLoc()));
1833 auto Ops = {DAG.getConstant(0, DL, Op.getValueType()), Op.getOperand(0)};
1834 return DAG.getMergeValues(Ops, DL);
1835 }
1836
1837 const MVT LocalVT = getPointerTy(DAG.getDataLayout(), ADDRESS_SPACE_LOCAL);
1838 SDValue Chain = Op.getOperand(0);
1839 SDValue SS =
1840 DAG.getNode(NVPTXISD::STACKSAVE, DL, {LocalVT, MVT::Other}, Chain);
1841 SDValue ASC = DAG.getAddrSpaceCast(
1842 DL, Op.getValueType(), SS, ADDRESS_SPACE_LOCAL, ADDRESS_SPACE_GENERIC);
1843 return DAG.getMergeValues({ASC, SDValue(SS.getNode(), 1)}, DL);
1844}
1845
1846// By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1847// (see LegalizeDAG.cpp). This is slow and uses local memory.
1848// We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1849SDValue
1850NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1851 SDNode *Node = Op.getNode();
1852 SDLoc dl(Node);
1854 unsigned NumOperands = Node->getNumOperands();
1855 for (unsigned i = 0; i < NumOperands; ++i) {
1856 SDValue SubOp = Node->getOperand(i);
1857 EVT VVT = SubOp.getNode()->getValueType(0);
1858 EVT EltVT = VVT.getVectorElementType();
1859 unsigned NumSubElem = VVT.getVectorNumElements();
1860 for (unsigned j = 0; j < NumSubElem; ++j) {
1861 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1862 DAG.getIntPtrConstant(j, dl)));
1863 }
1864 }
1865 return DAG.getBuildVector(Node->getValueType(0), dl, Ops);
1866}
1867
1869 SelectionDAG &DAG,
1870 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1871 assert(A.getValueType() == MVT::i32 && B.getValueType() == MVT::i32 &&
1872 Selector.getValueType() == MVT::i32 && "PRMT must have i32 operands");
1873 return DAG.getNode(NVPTXISD::PRMT, DL, MVT::i32,
1874 {A, B, Selector, DAG.getConstant(Mode, DL, MVT::i32)});
1875}
1876
1878 SelectionDAG &DAG,
1879 unsigned Mode = NVPTX::PTXPrmtMode::NONE) {
1880 return getPRMT(A, B, DAG.getConstant(Selector, DL, MVT::i32), DL, DAG, Mode);
1881}
1882
1883/// Reduces the elements using the scalar operations provided. The operations
1884/// are sorted descending in number of inputs they take. The flags on the
1885/// original reduction operation will be propagated to each scalar operation.
1886/// Nearby elements are grouped in tree reduction, unlike the shuffle reduction
1887/// used in ExpandReductions and SelectionDAG.
1889 const SmallVector<SDValue> &Elements, EVT EltTy,
1890 ArrayRef<std::pair<unsigned /*NodeType*/, unsigned /*NumInputs*/>> Ops,
1891 const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG) {
1892 // Build the reduction tree at each level, starting with all the elements.
1893 SmallVector<SDValue> Level = Elements;
1894
1895 unsigned OpIdx = 0;
1896 while (Level.size() > 1) {
1897 // Try to reduce this level using the current operator.
1898 const auto [Op, NumInputs] = Ops[OpIdx];
1899
1900 // Build the next level by partially reducing all elements.
1901 SmallVector<SDValue> ReducedLevel;
1902 unsigned I = 0, E = Level.size();
1903 for (; I + NumInputs <= E; I += NumInputs) {
1904 // Reduce elements in groups of [NumInputs], as much as possible.
1905 ReducedLevel.push_back(DAG.getNode(
1906 Op, DL, EltTy, ArrayRef<SDValue>(Level).slice(I, NumInputs), Flags));
1907 }
1908
1909 if (I < E) {
1910 // Handle leftover elements.
1911
1912 if (ReducedLevel.empty()) {
1913 // We didn't reduce anything at this level. We need to pick a smaller
1914 // operator.
1915 ++OpIdx;
1916 assert(OpIdx < Ops.size() && "no smaller operators for reduction");
1917 continue;
1918 }
1919
1920 // We reduced some things but there's still more left, meaning the
1921 // operator's number of inputs doesn't evenly divide this level size. Move
1922 // these elements to the next level.
1923 for (; I < E; ++I)
1924 ReducedLevel.push_back(Level[I]);
1925 }
1926
1927 // Process the next level.
1928 Level = ReducedLevel;
1929 }
1930
1931 return *Level.begin();
1932}
1933
1934// Get scalar reduction opcode
1935static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode) {
1936 switch (ReductionOpcode) {
1938 return ISD::FMAXNUM;
1940 return ISD::FMINNUM;
1942 return ISD::FMAXIMUM;
1944 return ISD::FMINIMUM;
1945 default:
1946 llvm_unreachable("unhandled reduction opcode");
1947 }
1948}
1949
1950/// Get 3-input scalar reduction opcode
1951static std::optional<unsigned>
1952getScalar3OpcodeForReduction(unsigned ReductionOpcode) {
1953 switch (ReductionOpcode) {
1955 return NVPTXISD::FMAXNUM3;
1957 return NVPTXISD::FMINNUM3;
1959 return NVPTXISD::FMAXIMUM3;
1961 return NVPTXISD::FMINIMUM3;
1962 default:
1963 return std::nullopt;
1964 }
1965}
1966
1967/// Lower reductions to either a sequence of operations or a tree if
1968/// reassociations are allowed. This method will use larger operations like
1969/// max3/min3 when the target supports them.
1970SDValue NVPTXTargetLowering::LowerVECREDUCE(SDValue Op,
1971 SelectionDAG &DAG) const {
1972 SDLoc DL(Op);
1973 const SDNodeFlags Flags = Op->getFlags();
1974 SDValue Vector = Op.getOperand(0);
1975
1976 const unsigned Opcode = Op->getOpcode();
1977 const EVT EltTy = Vector.getValueType().getVectorElementType();
1978
1979 // Whether we can use 3-input min/max when expanding the reduction.
1980 const bool CanUseMinMax3 =
1981 EltTy == MVT::f32 && STI.getSmVersion() >= 100 &&
1982 STI.getPTXVersion() >= 88 &&
1983 (Opcode == ISD::VECREDUCE_FMAX || Opcode == ISD::VECREDUCE_FMIN ||
1984 Opcode == ISD::VECREDUCE_FMAXIMUM || Opcode == ISD::VECREDUCE_FMINIMUM);
1985
1986 // A list of SDNode opcodes with equivalent semantics, sorted descending by
1987 // number of inputs they take.
1988 SmallVector<std::pair<unsigned /*Op*/, unsigned /*NumIn*/>, 2> ScalarOps;
1989
1990 if (auto Opcode3Elem = getScalar3OpcodeForReduction(Opcode);
1991 CanUseMinMax3 && Opcode3Elem)
1992 ScalarOps.push_back({*Opcode3Elem, 3});
1993 ScalarOps.push_back({getScalarOpcodeForReduction(Opcode), 2});
1994
1996 DAG.ExtractVectorElements(Vector, Elements);
1997
1998 return buildTreeReduction(Elements, EltTy, ScalarOps, DL, Flags, DAG);
1999}
2000
2001SDValue NVPTXTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
2002 // Handle bitcasting from v2i8 without hitting the default promotion
2003 // strategy which goes through stack memory.
2004 EVT FromVT = Op->getOperand(0)->getValueType(0);
2005 if (FromVT != MVT::v2i8) {
2006 return Op;
2007 }
2008
2009 // Pack vector elements into i16 and bitcast to final type
2010 SDLoc DL(Op);
2011 SDValue Vec0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2012 Op->getOperand(0), DAG.getIntPtrConstant(0, DL));
2013 SDValue Vec1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8,
2014 Op->getOperand(0), DAG.getIntPtrConstant(1, DL));
2015 SDValue Extend0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec0);
2016 SDValue Extend1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i16, Vec1);
2017 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
2018 SDValue AsInt = DAG.getNode(
2019 ISD::OR, DL, MVT::i16,
2020 {Extend0, DAG.getNode(ISD::SHL, DL, MVT::i16, {Extend1, Const8})});
2021 EVT ToVT = Op->getValueType(0);
2022 return DAG.getBitcast(ToVT, AsInt);
2023}
2024
2025// We can init constant f16x2/v2i16/v4i8 with a single .b32 move. Normally it
2026// would get lowered as two constant loads and vector-packing move.
2027// Instead we want just a constant move:
2028// mov.b32 %r2, 0x40003C00
2029SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
2030 SelectionDAG &DAG) const {
2031 EVT VT = Op->getValueType(0);
2032 if (!(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector()))
2033 return Op;
2034 SDLoc DL(Op);
2035
2036 if (!llvm::all_of(Op->ops(), [](SDValue Operand) {
2037 return Operand->isUndef() || isa<ConstantSDNode>(Operand) ||
2038 isa<ConstantFPSDNode>(Operand);
2039 })) {
2040 if (VT != MVT::v4i8)
2041 return Op;
2042 // Lower non-const v4i8 vector as byte-wise constructed i32, which allows us
2043 // to optimize calculation of constant parts.
2044 auto GetPRMT = [&](const SDValue Left, const SDValue Right, bool Cast,
2045 uint64_t SelectionValue) -> SDValue {
2046 SDValue L = Left;
2047 SDValue R = Right;
2048 if (Cast) {
2049 L = DAG.getAnyExtOrTrunc(L, DL, MVT::i32);
2050 R = DAG.getAnyExtOrTrunc(R, DL, MVT::i32);
2051 }
2052 return getPRMT(L, R, SelectionValue, DL, DAG);
2053 };
2054 auto PRMT__10 = GetPRMT(Op->getOperand(0), Op->getOperand(1), true, 0x3340);
2055 auto PRMT__32 = GetPRMT(Op->getOperand(2), Op->getOperand(3), true, 0x3340);
2056 auto PRMT3210 = GetPRMT(PRMT__10, PRMT__32, false, 0x5410);
2057 return DAG.getBitcast(VT, PRMT3210);
2058 }
2059
2060 // Get value or the Nth operand as an APInt(32). Undef values treated as 0.
2061 auto GetOperand = [](SDValue Op, int N) -> APInt {
2062 const SDValue &Operand = Op->getOperand(N);
2063 EVT VT = Op->getValueType(0);
2064 if (Operand->isUndef())
2065 return APInt(32, 0);
2066 APInt Value;
2067 if (VT == MVT::v2f16 || VT == MVT::v2bf16)
2068 Value = cast<ConstantFPSDNode>(Operand)->getValueAPF().bitcastToAPInt();
2069 else if (VT == MVT::v2i16 || VT == MVT::v4i8)
2070 Value = Operand->getAsAPIntVal();
2071 else
2072 llvm_unreachable("Unsupported type");
2073 // i8 values are carried around as i16, so we need to zero out upper bits,
2074 // so they do not get in the way of combining individual byte values
2075 if (VT == MVT::v4i8)
2076 Value = Value.trunc(8);
2077 return Value.zext(32);
2078 };
2079
2080 // Construct a 32-bit constant by shifting into place smaller values
2081 // (elements of the vector type VT).
2082 // For example, if VT has 2 elements, then N == 2:
2083 // ShiftAmount = 32 / N = 16
2084 // Value |= Op0 (b16) << 0
2085 // Value |= Op1 (b16) << 16
2086 // If N == 4:
2087 // ShiftAmount = 32 / N = 8
2088 // Value |= Op0 (b8) << 0
2089 // Value |= Op1 (b8) << 8
2090 // Value |= Op2 (b8) << 16
2091 // Value |= Op3 (b8) << 24
2092 // ...etc
2093 APInt Value(32, 0);
2094 const unsigned NumElements = VT.getVectorNumElements();
2095 assert(32 % NumElements == 0 && "must evenly divide bit length");
2096 const unsigned ShiftAmount = 32 / NumElements;
2097 for (unsigned ElementNo : seq(NumElements))
2098 Value |= GetOperand(Op, ElementNo).shl(ElementNo * ShiftAmount);
2099 SDValue Const = DAG.getConstant(Value, DL, MVT::i32);
2100 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), Const);
2101}
2102
2103SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
2104 SelectionDAG &DAG) const {
2105 SDValue Index = Op->getOperand(1);
2106 SDValue Vector = Op->getOperand(0);
2107 SDLoc DL(Op);
2108 EVT VectorVT = Vector.getValueType();
2109
2110 if (VectorVT == MVT::v4i8) {
2111 SDValue Selector = DAG.getNode(ISD::OR, DL, MVT::i32,
2112 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2113 DAG.getConstant(0x7770, DL, MVT::i32));
2114 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, Vector),
2115 DAG.getConstant(0, DL, MVT::i32), Selector, DL, DAG);
2116 SDValue Ext = DAG.getAnyExtOrTrunc(PRMT, DL, Op->getValueType(0));
2117 SDNodeFlags Flags;
2118 Flags.setNoSignedWrap(Ext.getScalarValueSizeInBits() > 8);
2119 Flags.setNoUnsignedWrap(Ext.getScalarValueSizeInBits() >= 8);
2120 Ext->setFlags(Flags);
2121 return Ext;
2122 }
2123
2124 // Constant index will be matched by tablegen.
2125 if (isa<ConstantSDNode>(Index.getNode()))
2126 return Op;
2127
2128 // Extract individual elements and select one of them.
2129 assert(NVPTX::isPackedVectorTy(VectorVT) &&
2130 VectorVT.getVectorNumElements() == 2 && "Unexpected vector type.");
2131 EVT EltVT = VectorVT.getVectorElementType();
2132
2133 SDLoc dl(Op.getNode());
2134 SDValue E0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2135 DAG.getIntPtrConstant(0, dl));
2136 SDValue E1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Vector,
2137 DAG.getIntPtrConstant(1, dl));
2138 return DAG.getSelectCC(dl, Index, DAG.getIntPtrConstant(0, dl), E0, E1,
2140}
2141
2142SDValue NVPTXTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
2143 SelectionDAG &DAG) const {
2144 SDValue Vector = Op->getOperand(0);
2145 EVT VectorVT = Vector.getValueType();
2146
2147 if (VectorVT != MVT::v4i8)
2148 return Op;
2149 SDLoc DL(Op);
2150 SDValue Value = Op->getOperand(1);
2151 if (Value->isUndef())
2152 return Vector;
2153
2154 SDValue Index = Op->getOperand(2);
2155
2156 SDValue BFI =
2157 DAG.getNode(NVPTXISD::BFI, DL, MVT::i32,
2158 {DAG.getZExtOrTrunc(Value, DL, MVT::i32), Vector,
2159 DAG.getNode(ISD::MUL, DL, MVT::i32,
2160 DAG.getZExtOrTrunc(Index, DL, MVT::i32),
2161 DAG.getConstant(8, DL, MVT::i32)),
2162 DAG.getConstant(8, DL, MVT::i32)});
2163 return DAG.getNode(ISD::BITCAST, DL, Op->getValueType(0), BFI);
2164}
2165
2166SDValue NVPTXTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
2167 SelectionDAG &DAG) const {
2168 SDValue V1 = Op.getOperand(0);
2169 EVT VectorVT = V1.getValueType();
2170 if (VectorVT != MVT::v4i8 || Op.getValueType() != MVT::v4i8)
2171 return Op;
2172
2173 // Lower shuffle to PRMT instruction.
2174 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2175 SDValue V2 = Op.getOperand(1);
2176 uint32_t Selector = 0;
2177 for (auto I : llvm::enumerate(SVN->getMask())) {
2178 if (I.value() != -1) // -1 is a placeholder for undef.
2179 Selector |= (I.value() << (I.index() * 4));
2180 }
2181
2182 SDLoc DL(Op);
2183 SDValue PRMT = getPRMT(DAG.getBitcast(MVT::i32, V1),
2184 DAG.getBitcast(MVT::i32, V2), Selector, DL, DAG);
2185 return DAG.getBitcast(Op.getValueType(), PRMT);
2186}
2187/// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
2188/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2189/// amount, or
2190/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2191/// amount.
2192SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
2193 SelectionDAG &DAG) const {
2194 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2195 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
2196
2197 EVT VT = Op.getValueType();
2198 unsigned VTBits = VT.getSizeInBits();
2199 SDLoc dl(Op);
2200 SDValue ShOpLo = Op.getOperand(0);
2201 SDValue ShOpHi = Op.getOperand(1);
2202 SDValue ShAmt = Op.getOperand(2);
2203 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
2204
2205 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2206 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2207 // {dHi, dLo} = {aHi, aLo} >> Amt
2208 // dHi = aHi >> Amt
2209 // dLo = shf.r.clamp aLo, aHi, Amt
2210
2211 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2212 SDValue Lo =
2213 DAG.getNode(NVPTXISD::FSHR_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2214
2215 SDValue Ops[2] = { Lo, Hi };
2216 return DAG.getMergeValues(Ops, dl);
2217 }
2218 else {
2219 // {dHi, dLo} = {aHi, aLo} >> Amt
2220 // - if (Amt>=size) then
2221 // dLo = aHi >> (Amt-size)
2222 // dHi = aHi >> Amt (this is either all 0 or all 1)
2223 // else
2224 // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
2225 // dHi = aHi >> Amt
2226
2227 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2228 DAG.getConstant(VTBits, dl, MVT::i32),
2229 ShAmt);
2230 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
2231 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2232 DAG.getConstant(VTBits, dl, MVT::i32));
2233 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
2234 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2235 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
2236
2237 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2238 DAG.getConstant(VTBits, dl, MVT::i32),
2239 ISD::SETGE);
2240 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
2241 SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2242
2243 SDValue Ops[2] = { Lo, Hi };
2244 return DAG.getMergeValues(Ops, dl);
2245 }
2246}
2247
2248/// LowerShiftLeftParts - Lower SHL_PARTS, which
2249/// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
2250/// amount, or
2251/// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
2252/// amount.
2253SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
2254 SelectionDAG &DAG) const {
2255 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
2256 assert(Op.getOpcode() == ISD::SHL_PARTS);
2257
2258 EVT VT = Op.getValueType();
2259 unsigned VTBits = VT.getSizeInBits();
2260 SDLoc dl(Op);
2261 SDValue ShOpLo = Op.getOperand(0);
2262 SDValue ShOpHi = Op.getOperand(1);
2263 SDValue ShAmt = Op.getOperand(2);
2264
2265 if (VTBits == 32 && STI.getSmVersion() >= 35) {
2266 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
2267 // {dHi, dLo} = {aHi, aLo} << Amt
2268 // dHi = shf.l.clamp aLo, aHi, Amt
2269 // dLo = aLo << Amt
2270
2271 SDValue Hi =
2272 DAG.getNode(NVPTXISD::FSHL_CLAMP, dl, VT, ShOpHi, ShOpLo, ShAmt);
2273 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2274
2275 SDValue Ops[2] = { Lo, Hi };
2276 return DAG.getMergeValues(Ops, dl);
2277 }
2278 else {
2279 // {dHi, dLo} = {aHi, aLo} << Amt
2280 // - if (Amt>=size) then
2281 // dLo = aLo << Amt (all 0)
2282 // dLo = aLo << (Amt-size)
2283 // else
2284 // dLo = aLo << Amt
2285 // dHi = (aHi << Amt) | (aLo >> (size-Amt))
2286
2287 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
2288 DAG.getConstant(VTBits, dl, MVT::i32),
2289 ShAmt);
2290 SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
2291 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
2292 DAG.getConstant(VTBits, dl, MVT::i32));
2293 SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
2294 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
2295 SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
2296
2297 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
2298 DAG.getConstant(VTBits, dl, MVT::i32),
2299 ISD::SETGE);
2300 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
2301 SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
2302
2303 SDValue Ops[2] = { Lo, Hi };
2304 return DAG.getMergeValues(Ops, dl);
2305 }
2306}
2307
2308/// If the types match, convert the generic copysign to the NVPTXISD version,
2309/// otherwise bail ensuring that mismatched cases are properly expaned.
2310SDValue NVPTXTargetLowering::LowerFCOPYSIGN(SDValue Op,
2311 SelectionDAG &DAG) const {
2312 EVT VT = Op.getValueType();
2313 SDLoc DL(Op);
2314
2315 SDValue In1 = Op.getOperand(0);
2316 SDValue In2 = Op.getOperand(1);
2317 EVT SrcVT = In2.getValueType();
2318
2319 if (!SrcVT.bitsEq(VT))
2320 return SDValue();
2321
2322 return DAG.getNode(NVPTXISD::FCOPYSIGN, DL, VT, In1, In2);
2323}
2324
2325SDValue NVPTXTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2326 EVT VT = Op.getValueType();
2327
2328 if (VT == MVT::f32)
2329 return LowerFROUND32(Op, DAG);
2330
2331 if (VT == MVT::f64)
2332 return LowerFROUND64(Op, DAG);
2333
2334 llvm_unreachable("unhandled type");
2335}
2336
2337// This is the the rounding method used in CUDA libdevice in C like code:
2338// float roundf(float A)
2339// {
2340// float RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f));
2341// RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2342// return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2343// }
2344SDValue NVPTXTargetLowering::LowerFROUND32(SDValue Op,
2345 SelectionDAG &DAG) const {
2346 SDLoc SL(Op);
2347 SDValue A = Op.getOperand(0);
2348 EVT VT = Op.getValueType();
2349
2350 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2351
2352 // RoundedA = (float) (int) ( A > 0 ? (A + 0.5f) : (A - 0.5f))
2353 SDValue Bitcast = DAG.getNode(ISD::BITCAST, SL, MVT::i32, A);
2354 const unsigned SignBitMask = 0x80000000;
2355 SDValue Sign = DAG.getNode(ISD::AND, SL, MVT::i32, Bitcast,
2356 DAG.getConstant(SignBitMask, SL, MVT::i32));
2357 const unsigned PointFiveInBits = 0x3F000000;
2358 SDValue PointFiveWithSignRaw =
2359 DAG.getNode(ISD::OR, SL, MVT::i32, Sign,
2360 DAG.getConstant(PointFiveInBits, SL, MVT::i32));
2361 SDValue PointFiveWithSign =
2362 DAG.getNode(ISD::BITCAST, SL, VT, PointFiveWithSignRaw);
2363 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, A, PointFiveWithSign);
2364 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2365
2366 // RoundedA = abs(A) > 0x1.0p23 ? A : RoundedA;
2367 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2368 SDValue IsLarge =
2369 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 23.0), SL, VT),
2370 ISD::SETOGT);
2371 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2372
2373 // return abs(A) < 0.5 ? (float)(int)A : RoundedA;
2374 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2375 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2376 SDValue RoundedAForSmallA = DAG.getNode(ISD::FTRUNC, SL, VT, A);
2377 return DAG.getNode(ISD::SELECT, SL, VT, IsSmall, RoundedAForSmallA, RoundedA);
2378}
2379
2380// The implementation of round(double) is similar to that of round(float) in
2381// that they both separate the value range into three regions and use a method
2382// specific to the region to round the values. However, round(double) first
2383// calculates the round of the absolute value and then adds the sign back while
2384// round(float) directly rounds the value with sign.
2385SDValue NVPTXTargetLowering::LowerFROUND64(SDValue Op,
2386 SelectionDAG &DAG) const {
2387 SDLoc SL(Op);
2388 SDValue A = Op.getOperand(0);
2389 EVT VT = Op.getValueType();
2390
2391 SDValue AbsA = DAG.getNode(ISD::FABS, SL, VT, A);
2392
2393 // double RoundedA = (double) (int) (abs(A) + 0.5f);
2394 SDValue AdjustedA = DAG.getNode(ISD::FADD, SL, VT, AbsA,
2395 DAG.getConstantFP(0.5, SL, VT));
2396 SDValue RoundedA = DAG.getNode(ISD::FTRUNC, SL, VT, AdjustedA);
2397
2398 // RoundedA = abs(A) < 0.5 ? (double)0 : RoundedA;
2399 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2400 SDValue IsSmall =DAG.getSetCC(SL, SetCCVT, AbsA,
2401 DAG.getConstantFP(0.5, SL, VT), ISD::SETOLT);
2402 RoundedA = DAG.getNode(ISD::SELECT, SL, VT, IsSmall,
2403 DAG.getConstantFP(0, SL, VT),
2404 RoundedA);
2405
2406 // Add sign to rounded_A
2407 RoundedA = DAG.getNode(ISD::FCOPYSIGN, SL, VT, RoundedA, A);
2408 DAG.getNode(ISD::FTRUNC, SL, VT, A);
2409
2410 // RoundedA = abs(A) > 0x1.0p52 ? A : RoundedA;
2411 SDValue IsLarge =
2412 DAG.getSetCC(SL, SetCCVT, AbsA, DAG.getConstantFP(pow(2.0, 52.0), SL, VT),
2413 ISD::SETOGT);
2414 return DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
2415}
2416
2418 EVT VT = N->getValueType(0);
2419 EVT NVT = MVT::f32;
2420 if (VT.isVector()) {
2421 NVT = EVT::getVectorVT(*DAG.getContext(), NVT, VT.getVectorElementCount());
2422 }
2423 SDLoc DL(N);
2424 SDValue Tmp0 = DAG.getFPExtendOrRound(N->getOperand(0), DL, NVT);
2425 SDValue Tmp1 = DAG.getFPExtendOrRound(N->getOperand(1), DL, NVT);
2426 SDValue Res = DAG.getNode(N->getOpcode(), DL, NVT, Tmp0, Tmp1, N->getFlags());
2427 return DAG.getFPExtendOrRound(Res, DL, VT);
2428}
2429
2430SDValue NVPTXTargetLowering::PromoteBinOpIfF32FTZ(SDValue Op,
2431 SelectionDAG &DAG) const {
2432 if (useF32FTZ(DAG.getMachineFunction())) {
2433 return PromoteBinOpToF32(Op.getNode(), DAG);
2434 }
2435 return Op;
2436}
2437
2438SDValue NVPTXTargetLowering::LowerINT_TO_FP(SDValue Op,
2439 SelectionDAG &DAG) const {
2440 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2441
2442 if (Op.getValueType() == MVT::bf16) {
2443 SDLoc Loc(Op);
2444 return DAG.getNode(
2445 ISD::FP_ROUND, Loc, MVT::bf16,
2446 DAG.getNode(Op.getOpcode(), Loc, MVT::f32, Op.getOperand(0)),
2447 DAG.getIntPtrConstant(0, Loc, /*isTarget=*/true));
2448 }
2449
2450 // Everything else is considered legal.
2451 return Op;
2452}
2453
2454SDValue NVPTXTargetLowering::LowerFP_TO_INT(SDValue Op,
2455 SelectionDAG &DAG) const {
2456 assert(STI.getSmVersion() < 90 || STI.getPTXVersion() < 78);
2457
2458 if (Op.getOperand(0).getValueType() == MVT::bf16) {
2459 SDLoc Loc(Op);
2460 return DAG.getNode(
2461 Op.getOpcode(), Loc, Op.getValueType(),
2462 DAG.getNode(ISD::FP_EXTEND, Loc, MVT::f32, Op.getOperand(0)));
2463 }
2464
2465 // Everything else is considered legal.
2466 return Op;
2467}
2468
2469SDValue NVPTXTargetLowering::LowerFP_ROUND(SDValue Op,
2470 SelectionDAG &DAG) const {
2471 EVT NarrowVT = Op.getValueType();
2472 SDValue Wide = Op.getOperand(0);
2473 EVT WideVT = Wide.getValueType();
2474 if (NarrowVT.getScalarType() == MVT::bf16) {
2475 const TargetLowering *TLI = STI.getTargetLowering();
2476 if (STI.getSmVersion() < 80 || STI.getPTXVersion() < 70) {
2477 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2478 }
2479 if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
2480 // This combination was the first to support f32 -> bf16.
2481 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70) {
2482 if (WideVT.getScalarType() == MVT::f32) {
2483 return Op;
2484 }
2485 if (WideVT.getScalarType() == MVT::f64) {
2486 SDLoc Loc(Op);
2487 // Round-inexact-to-odd f64 to f32, then do the final rounding using
2488 // the hardware f32 -> bf16 instruction.
2490 WideVT.changeElementType(*DAG.getContext(), MVT::f32), Wide, Loc,
2491 DAG);
2492 return DAG.getFPExtendOrRound(rod, Loc, NarrowVT);
2493 }
2494 }
2495 return TLI->expandFP_ROUND(Op.getNode(), DAG);
2496 }
2497 }
2498
2499 // Everything else is considered legal.
2500 return Op;
2501}
2502
2503SDValue NVPTXTargetLowering::LowerFP_EXTEND(SDValue Op,
2504 SelectionDAG &DAG) const {
2505 SDValue Narrow = Op.getOperand(0);
2506 EVT NarrowVT = Narrow.getValueType();
2507 EVT WideVT = Op.getValueType();
2508 if (NarrowVT.getScalarType() == MVT::bf16) {
2509 if (WideVT.getScalarType() == MVT::f32 &&
2510 (STI.getSmVersion() < 80 || STI.getPTXVersion() < 71)) {
2511 SDLoc Loc(Op);
2512 return DAG.getNode(ISD::BF16_TO_FP, Loc, WideVT, Narrow);
2513 }
2514 if (WideVT.getScalarType() == MVT::f64 &&
2515 (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78)) {
2516 EVT F32 = NarrowVT.changeElementType(*DAG.getContext(), MVT::f32);
2517 SDLoc Loc(Op);
2518 if (STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 71) {
2519 Op = DAG.getNode(ISD::FP_EXTEND, Loc, F32, Narrow);
2520 } else {
2521 Op = DAG.getNode(ISD::BF16_TO_FP, Loc, F32, Narrow);
2522 }
2523 return DAG.getNode(ISD::FP_EXTEND, Loc, WideVT, Op);
2524 }
2525 }
2526
2527 // Everything else is considered legal.
2528 return Op;
2529}
2530
2532 SDLoc DL(Op);
2533 if (Op.getValueType() != MVT::v2i16)
2534 return Op;
2535 EVT EltVT = Op.getValueType().getVectorElementType();
2536 SmallVector<SDValue> VecElements;
2537 for (int I = 0, E = Op.getValueType().getVectorNumElements(); I < E; I++) {
2538 SmallVector<SDValue> ScalarArgs;
2539 llvm::transform(Op->ops(), std::back_inserter(ScalarArgs),
2540 [&](const SDUse &O) {
2541 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
2542 O.get(), DAG.getIntPtrConstant(I, DL));
2543 });
2544 VecElements.push_back(DAG.getNode(Op.getOpcode(), DL, EltVT, ScalarArgs));
2545 }
2546 SDValue V =
2547 DAG.getNode(ISD::BUILD_VECTOR, DL, Op.getValueType(), VecElements);
2548 return V;
2549}
2550
2552 SDNode *N = Op.getNode();
2553 SDLoc DL(N);
2555
2556 // split the vector argument
2557 for (size_t I = 0; I < N->getNumOperands(); I++) {
2558 SDValue Val = N->getOperand(I);
2559 EVT ValVT = Val.getValueType();
2560 if (ValVT.isVector()) {
2561 EVT EltVT = ValVT.getVectorElementType();
2562 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2563 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2564 DAG.getIntPtrConstant(J, DL)));
2565 } else
2566 Ops.push_back(Val);
2567 }
2568
2570 SDValue Tcgen05StNode =
2571 DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, N->getVTList(), Ops,
2572 MemSD->getMemoryVT(), MemSD->getMemOperand());
2573
2574 return Tcgen05StNode;
2575}
2576
2578 SDLoc DL(Op);
2579 SDValue Src = Op.getOperand(0);
2580 EVT VT = Op.getValueType();
2581
2582 switch (VT.getSimpleVT().SimpleTy) {
2583 case MVT::i16: {
2584 SDValue Extended = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
2585 SDValue Swapped =
2586 getPRMT(Extended, DAG.getConstant(0, DL, MVT::i32), 0x7701, DL, DAG);
2587 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Swapped);
2588 }
2589 case MVT::i32: {
2590 return getPRMT(Src, DAG.getConstant(0, DL, MVT::i32), 0x0123, DL, DAG);
2591 }
2592 case MVT::v2i16: {
2593 SDValue Converted = DAG.getBitcast(MVT::i32, Src);
2594 SDValue Swapped =
2595 getPRMT(Converted, DAG.getConstant(0, DL, MVT::i32), 0x2301, DL, DAG);
2596 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i16, Swapped);
2597 }
2598 case MVT::i64: {
2599 SDValue UnpackSrc =
2600 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, Src);
2601 SDValue SwappedLow =
2602 getPRMT(UnpackSrc.getValue(0), DAG.getConstant(0, DL, MVT::i32), 0x0123,
2603 DL, DAG);
2604 SDValue SwappedHigh =
2605 getPRMT(UnpackSrc.getValue(1), DAG.getConstant(0, DL, MVT::i32), 0x0123,
2606 DL, DAG);
2607 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64,
2608 {SwappedHigh, SwappedLow});
2609 }
2610 default:
2611 llvm_unreachable("unsupported type for bswap");
2612 }
2613}
2614
2615static unsigned getTcgen05MMADisableOutputLane(unsigned IID) {
2616 switch (IID) {
2617 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2618 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG1;
2619 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2620 return NVPTXISD::TCGEN05_MMA_SHARED_DISABLE_OUTPUT_LANE_CG2;
2621 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2622 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2623 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2624 return NVPTXISD::TCGEN05_MMA_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2625 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2626 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2627 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2628 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2629 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2630 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2631 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2632 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2633 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2634 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2635 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2636 return NVPTXISD::TCGEN05_MMA_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2637 case Intrinsic::
2638 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2639 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2640 case Intrinsic::
2641 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2642 return NVPTXISD::TCGEN05_MMA_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2643 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2644 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG1;
2645 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2646 return NVPTXISD::TCGEN05_MMA_SP_SHARED_DISABLE_OUTPUT_LANE_CG2;
2647 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2648 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2649 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2650 return NVPTXISD::TCGEN05_MMA_SP_SHARED_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2651 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2652 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1;
2653 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2654 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2;
2655 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2656 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2657 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2658 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2659 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2660 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1;
2661 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2662 return NVPTXISD::TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2;
2663 case Intrinsic::
2664 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2665 return NVPTXISD::
2666 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG1_ASHIFT;
2667 case Intrinsic::
2668 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2669 return NVPTXISD::
2670 TCGEN05_MMA_SP_TENSOR_SCALE_D_DISABLE_OUTPUT_LANE_CG2_ASHIFT;
2671 };
2672 llvm_unreachable("unhandled tcgen05.mma.disable_output_lane intrinsic");
2673}
2674
2676 SDNode *N = Op.getNode();
2677 SDLoc DL(N);
2678 unsigned IID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2679
2681 // split the vector argument
2682 for (size_t I = 0; I < N->getNumOperands(); I++) {
2683 if (I == 1)
2684 continue; // skip IID
2685 SDValue Val = N->getOperand(I);
2686 EVT ValVT = Val.getValueType();
2687 if (ValVT.isVector()) {
2688 EVT EltVT = ValVT.getVectorElementType();
2689 for (unsigned J = 0, NElts = ValVT.getVectorNumElements(); J < NElts; J++)
2690 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
2691 DAG.getIntPtrConstant(J, DL)));
2692 } else
2693 Ops.push_back(Val);
2694 }
2695
2697 SDValue Tcgen05MMANode = DAG.getMemIntrinsicNode(
2698 getTcgen05MMADisableOutputLane(IID), DL, N->getVTList(), Ops,
2699 MemSD->getMemoryVT(), MemSD->getMemOperand());
2700
2701 return Tcgen05MMANode;
2702}
2703
2704// Lower vector return type of tcgen05.ld intrinsics
2705static std::optional<std::pair<SDValue, SDValue>>
2706lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset = false) {
2707 SDLoc DL(N);
2708 EVT ResVT = N->getValueType(0);
2709 if (!ResVT.isVector())
2710 return {}; // already legalized.
2711
2712 const unsigned NumElts = ResVT.getVectorNumElements();
2713
2714 // Create the return type of the instructions
2715 SmallVector<EVT, 5> ListVTs;
2716 for (unsigned i = 0; i < NumElts; ++i)
2717 ListVTs.push_back(MVT::i32);
2718
2719 ListVTs.push_back(N->getValueType(1)); // Chain
2720
2721 SDVTList ResVTs = DAG.getVTList(ListVTs);
2722
2723 SmallVector<SDValue, 8> Ops{N->getOperand(0), N->getOperand(1),
2724 N->getOperand(2)};
2725
2726 if (HasOffset) {
2727 Ops.push_back(N->getOperand(3)); // offset
2728 Ops.push_back(N->getOperand(4)); // Pack flag
2729 } else
2730 Ops.push_back(N->getOperand(3)); // Pack flag
2731
2733 SDValue NewNode =
2735 MemSD->getMemoryVT(), MemSD->getMemOperand());
2736
2737 // split the vector result
2738 SmallVector<SDValue, 4> ScalarRes;
2739 for (unsigned i = 0; i < NumElts; ++i) {
2740 SDValue Res = NewNode.getValue(i);
2741 ScalarRes.push_back(Res);
2742 }
2743
2744 SDValue Chain = NewNode.getValue(NumElts);
2745 SDValue BuildVector = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
2746 return {{BuildVector, Chain}};
2747}
2748
2750 unsigned Val) {
2751 SDNode *N = Op.getNode();
2752 SDLoc DL(N);
2753
2754 const Function &Fn = DAG.getMachineFunction().getFunction();
2755
2756 unsigned AS = 0;
2757 if (auto *MemN = dyn_cast<MemIntrinsicSDNode>(N))
2758 AS = MemN->getAddressSpace();
2759 Type *PtrTy = PointerType::get(*DAG.getContext(), AS);
2761
2763 Fn,
2764 "Intrinsic " +
2765 Intrinsic::getName(N->getConstantOperandVal(1), {PtrTy}, M) +
2766 " with value " + Twine(Val) +
2767 " is not supported on the given target.",
2768 DL.getDebugLoc()));
2769 return Op.getOperand(0);
2770}
2771
2773 SDNode *N = Op.getNode();
2774 SDLoc DL(N);
2775
2776 // immediate argument representing elemtype
2777 unsigned Val = N->getConstantOperandVal(3);
2778
2780 Val))
2781 return reportInvalidTensormapReplaceUsage(Op, DAG, Val);
2782
2783 return Op;
2784}
2785
2787 SDNode *N = Op.getNode();
2788 SDLoc DL(N);
2789
2790 // immediate argument representing swizzle mode
2791 unsigned Val = N->getConstantOperandVal(3);
2792
2794 Val))
2795 return reportInvalidTensormapReplaceUsage(Op, DAG, Val);
2796
2797 return Op;
2798}
2799
2801 SDNode *N = Op.getNode();
2802 SDValue Intrin = N->getOperand(1);
2803
2804 // Get the intrinsic ID
2805 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
2806 switch (IntrinNo) {
2807 default:
2808 break;
2809 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
2810 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
2811 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
2812 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
2813 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
2814 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
2815 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
2816 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
2817 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
2818 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
2819 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
2820 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
2821 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
2822 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
2823 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
2824 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
2825 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
2826 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
2827 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
2828 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
2829 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1:
2830 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2:
2831 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4:
2832 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8:
2833 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16:
2834 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32:
2835 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64:
2836 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128:
2837 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
2838 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
2839 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
2840 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
2841 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
2842 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
2843 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
2844 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
2845 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
2846 return lowerTcgen05St(Op, DAG);
2847 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
2848 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
2849 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
2850 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
2851 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
2852 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
2853 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
2854 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
2855 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
2856 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
2857 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
2858 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
2859 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
2860 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
2861 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
2862 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
2863 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
2864 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
2865 case Intrinsic::
2866 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
2867 case Intrinsic::
2868 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
2869 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
2870 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
2871 case Intrinsic::
2872 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift:
2873 case Intrinsic::
2874 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift:
2876 case Intrinsic::nvvm_tensormap_replace_elemtype:
2877 return lowerTensormapReplaceElemtype(Op, DAG);
2878 case Intrinsic::nvvm_tensormap_replace_swizzle_mode:
2880 }
2881 return Op;
2882}
2883
2885 SelectionDAG &DAG) {
2886
2887 SDNode *N = Op.getNode();
2888 if (N->getOperand(1).getValueType() != MVT::i128) {
2889 // return, if the operand is already lowered
2890 return SDValue();
2891 }
2892
2893 unsigned IID =
2894 cast<ConstantSDNode>(N->getOperand(0).getNode())->getZExtValue();
2895 auto Opcode = [&]() {
2896 switch (IID) {
2897 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
2898 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_IS_CANCELED;
2899 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
2900 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_X;
2901 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
2902 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Y;
2903 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
2904 return NVPTXISD::CLUSTERLAUNCHCONTROL_QUERY_CANCEL_GET_FIRST_CTAID_Z;
2905 default:
2906 llvm_unreachable("unsupported/unhandled intrinsic");
2907 }
2908 }();
2909
2910 SDLoc DL(N);
2911 SDValue TryCancelResponse = N->getOperand(1);
2912 SDValue Cast = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TryCancelResponse);
2913 SDValue TryCancelResponse0 =
2914 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2915 DAG.getIntPtrConstant(0, DL));
2916 SDValue TryCancelResponse1 =
2917 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
2918 DAG.getIntPtrConstant(1, DL));
2919
2920 return DAG.getNode(Opcode, DL, N->getVTList(),
2921 {TryCancelResponse0, TryCancelResponse1});
2922}
2923
2925 SDNode *N = Op.getNode();
2926 SDLoc DL(N);
2927 SDValue F32Vec = N->getOperand(1);
2928 SDValue RBits = N->getOperand(2);
2929
2930 unsigned IntrinsicID = N->getConstantOperandVal(0);
2931
2932 // Extract the 4 float elements from the vector
2934 for (unsigned i = 0; i < 4; ++i)
2935 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, F32Vec,
2936 DAG.getIntPtrConstant(i, DL)));
2937
2939
2940 auto [OpCode, RetTy, CvtModeFlag] =
2941 [&]() -> std::tuple<unsigned, MVT::SimpleValueType, uint32_t> {
2942 switch (IntrinsicID) {
2943 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
2944 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8,
2945 CvtMode::RS | CvtMode::RELU_FLAG};
2946 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
2947 return {NVPTXISD::CVT_E4M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2948 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
2949 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8,
2950 CvtMode::RS | CvtMode::RELU_FLAG};
2951 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
2952 return {NVPTXISD::CVT_E5M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2953 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
2954 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8,
2955 CvtMode::RS | CvtMode::RELU_FLAG};
2956 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
2957 return {NVPTXISD::CVT_E2M3X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2958 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
2959 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8,
2960 CvtMode::RS | CvtMode::RELU_FLAG};
2961 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
2962 return {NVPTXISD::CVT_E3M2X4_F32X4_RS_SF, MVT::v4i8, CvtMode::RS};
2963 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
2964 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16,
2965 CvtMode::RS | CvtMode::RELU_FLAG};
2966 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
2967 return {NVPTXISD::CVT_E2M1X4_F32X4_RS_SF, MVT::i16, CvtMode::RS};
2968 default:
2969 llvm_unreachable("unsupported/unhandled intrinsic");
2970 }
2971 }();
2972
2973 Ops.push_back(RBits);
2974 Ops.push_back(DAG.getConstant(CvtModeFlag, DL, MVT::i32));
2975
2976 return DAG.getNode(OpCode, DL, RetTy, Ops);
2977}
2978
2980 const unsigned Mode = [&]() {
2981 switch (Op->getConstantOperandVal(0)) {
2982 case Intrinsic::nvvm_prmt:
2984 case Intrinsic::nvvm_prmt_b4e:
2986 case Intrinsic::nvvm_prmt_ecl:
2988 case Intrinsic::nvvm_prmt_ecr:
2990 case Intrinsic::nvvm_prmt_f4e:
2992 case Intrinsic::nvvm_prmt_rc16:
2994 case Intrinsic::nvvm_prmt_rc8:
2996 default:
2997 llvm_unreachable("unsupported/unhandled intrinsic");
2998 }
2999 }();
3000 SDLoc DL(Op);
3001 SDValue A = Op->getOperand(1);
3002 SDValue B = Op.getNumOperands() == 4 ? Op.getOperand(2)
3003 : DAG.getConstant(0, DL, MVT::i32);
3004 SDValue Selector = (Op->op_end() - 1)->get();
3005 return getPRMT(A, B, Selector, DL, DAG, Mode);
3006}
3007
3009 switch (Op->getConstantOperandVal(1)) {
3010 default:
3011 return Op;
3012
3013 // These tcgen05 intrinsics return a v2i32, which is legal, so we have to
3014 // lower them through LowerOperation() instead of ReplaceNodeResults().
3015 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
3016 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
3017 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
3018 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG))
3019 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
3020 return SDValue();
3021
3022 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2:
3023 if (auto Res = lowerTcgen05Ld(Op.getNode(), DAG, /*HasOffset=*/true))
3024 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(Op));
3025 return SDValue();
3026 }
3027}
3028
3030 switch (Op->getConstantOperandVal(0)) {
3031 default:
3032 return Op;
3033 case Intrinsic::nvvm_prmt:
3034 case Intrinsic::nvvm_prmt_b4e:
3035 case Intrinsic::nvvm_prmt_ecl:
3036 case Intrinsic::nvvm_prmt_ecr:
3037 case Intrinsic::nvvm_prmt_f4e:
3038 case Intrinsic::nvvm_prmt_rc16:
3039 case Intrinsic::nvvm_prmt_rc8:
3040 return lowerPrmtIntrinsic(Op, DAG);
3041 case Intrinsic::nvvm_internal_addrspace_wrap:
3042 return Op.getOperand(1);
3043 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled:
3044 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x:
3045 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y:
3046 case Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z:
3048 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite:
3049 case Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite:
3050 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite:
3051 case Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite:
3052 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite:
3053 case Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite:
3054 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite:
3055 case Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite:
3056 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite:
3057 case Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite:
3058 return lowerCvtRSIntrinsics(Op, DAG);
3059 }
3060}
3061
3062// In PTX 64-bit CTLZ and CTPOP are supported, but they return a 32-bit value.
3063// Lower these into a node returning the correct type which is zero-extended
3064// back to the correct size.
3066 SDValue V = Op->getOperand(0);
3067 assert(V.getValueType() == MVT::i64 &&
3068 "Unexpected CTLZ/CTPOP type to legalize");
3069
3070 SDLoc DL(Op);
3071 SDValue CT = DAG.getNode(Op->getOpcode(), DL, MVT::i32, V);
3072 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, CT, SDNodeFlags::NonNeg);
3073}
3074
3076 unsigned Opcode, SelectionDAG &DAG) {
3077 assert(A.getValueType() == MVT::i64 && B.getValueType() == MVT::i64);
3078
3079 const auto *AmtConst = dyn_cast<ConstantSDNode>(ShiftAmount);
3080 if (!AmtConst)
3081 return SDValue();
3082 const auto Amt = AmtConst->getZExtValue() & 63;
3083
3084 SDValue UnpackA =
3085 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, A);
3086 SDValue UnpackB =
3087 DAG.getNode(NVPTXISD::UNPACK_VECTOR, DL, {MVT::i32, MVT::i32}, B);
3088
3089 // Arch is Little endiain: 0 = low bits, 1 = high bits
3090 SDValue ALo = UnpackA.getValue(0);
3091 SDValue AHi = UnpackA.getValue(1);
3092 SDValue BLo = UnpackB.getValue(0);
3093 SDValue BHi = UnpackB.getValue(1);
3094
3095 // The bitfeild consists of { AHi : ALo : BHi : BLo }
3096 //
3097 // * FSHL, Amt < 32 - The window will contain { AHi : ALo : BHi }
3098 // * FSHL, Amt >= 32 - The window will contain { ALo : BHi : BLo }
3099 // * FSHR, Amt < 32 - The window will contain { ALo : BHi : BLo }
3100 // * FSHR, Amt >= 32 - The window will contain { AHi : ALo : BHi }
3101 //
3102 // Note that Amt = 0 and Amt = 32 are special cases where 32-bit funnel shifts
3103 // are not needed at all. Amt = 0 is a no-op producing either A or B depending
3104 // on the direction. Amt = 32 can be implemented by a packing and unpacking
3105 // move to select and arrange the 32bit values. For simplicity, these cases
3106 // are not handled here explicitly and instead we rely on DAGCombiner to
3107 // remove the no-op funnel shifts we insert.
3108 auto [High, Mid, Low] = ((Opcode == ISD::FSHL) == (Amt < 32))
3109 ? std::make_tuple(AHi, ALo, BHi)
3110 : std::make_tuple(ALo, BHi, BLo);
3111
3112 SDValue NewAmt = DAG.getConstant(Amt & 31, DL, MVT::i32);
3113 SDValue RHi = DAG.getNode(Opcode, DL, MVT::i32, {High, Mid, NewAmt});
3114 SDValue RLo = DAG.getNode(Opcode, DL, MVT::i32, {Mid, Low, NewAmt});
3115
3116 return DAG.getNode(NVPTXISD::BUILD_VECTOR, DL, MVT::i64, {RLo, RHi});
3117}
3118
3120 return expandFSH64(Op->getOperand(0), Op->getOperand(1), Op->getOperand(2),
3121 SDLoc(Op), Op->getOpcode(), DAG);
3122}
3123
3125 unsigned Opcode = Op->getOpcode() == ISD::ROTL ? ISD::FSHL : ISD::FSHR;
3126 return expandFSH64(Op->getOperand(0), Op->getOperand(0), Op->getOperand(1),
3127 SDLoc(Op), Opcode, DAG);
3128}
3129
3131 // Lower (frem x, y) into (sub x, (mul (ftrunc (div x, y)) y)),
3132 // i.e. "poor man's fmod()". When y is infinite, x is returned. This matches
3133 // the semantics of LLVM's frem.
3134 SDLoc DL(Op);
3135 SDValue X = Op->getOperand(0);
3136 SDValue Y = Op->getOperand(1);
3137 EVT Ty = Op.getValueType();
3138 SDNodeFlags Flags = Op->getFlags();
3139
3140 SDValue Div = DAG.getNode(ISD::FDIV, DL, Ty, X, Y, Flags);
3141 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, Ty, Div, Flags);
3142 SDValue Mul = DAG.getNode(ISD::FMUL, DL, Ty, Trunc, Y,
3144 SDValue Sub = DAG.getNode(ISD::FSUB, DL, Ty, X, Mul,
3146
3147 if (Flags.hasNoInfs())
3148 return Sub;
3149
3150 // If Y is infinite, return X
3151 SDValue AbsY = DAG.getNode(ISD::FABS, DL, Ty, Y);
3152 SDValue Inf =
3153 DAG.getConstantFP(APFloat::getInf(Ty.getFltSemantics()), DL, Ty);
3154 SDValue IsInf = DAG.getSetCC(DL, MVT::i1, AbsY, Inf, ISD::SETEQ);
3155 return DAG.getSelect(DL, Ty, IsInf, X, Sub);
3156}
3157
3159 assert(Op.getValueType() == MVT::i1 && "Custom lowering enabled only for i1");
3160
3161 SDValue Cond = Op->getOperand(0);
3162 SDValue TrueVal = Op->getOperand(1);
3163 SDValue FalseVal = Op->getOperand(2);
3164 SDLoc DL(Op);
3165
3166 // If both operands are truncated, we push the select through the truncates.
3167 if (TrueVal.getOpcode() == ISD::TRUNCATE &&
3168 FalseVal.getOpcode() == ISD::TRUNCATE) {
3169 TrueVal = TrueVal.getOperand(0);
3170 FalseVal = FalseVal.getOperand(0);
3171
3172 EVT VT = TrueVal.getSimpleValueType().bitsLE(FalseVal.getSimpleValueType())
3173 ? TrueVal.getValueType()
3174 : FalseVal.getValueType();
3175 TrueVal = DAG.getAnyExtOrTrunc(TrueVal, DL, VT);
3176 FalseVal = DAG.getAnyExtOrTrunc(FalseVal, DL, VT);
3177 SDValue Select = DAG.getSelect(DL, VT, Cond, TrueVal, FalseVal);
3178 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Select);
3179 }
3180
3181 // Otherwise, expand the select into a series of logical operations. These
3182 // often can be folded into other operations either by us or ptxas.
3183 TrueVal = DAG.getFreeze(TrueVal);
3184 FalseVal = DAG.getFreeze(FalseVal);
3185 SDValue And1 = DAG.getNode(ISD::AND, DL, MVT::i1, Cond, TrueVal);
3186 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1);
3187 SDValue And2 = DAG.getNode(ISD::AND, DL, MVT::i1, NotCond, FalseVal);
3188 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i1, And1, And2);
3189 return Or;
3190}
3191
3193 SDNode *N = Op.getNode();
3194
3195 SDValue Chain = N->getOperand(0);
3196 SDValue Val = N->getOperand(1);
3197 SDValue BasePtr = N->getOperand(2);
3198 SDValue Offset = N->getOperand(3);
3199 SDValue Mask = N->getOperand(4);
3200
3201 SDLoc DL(N);
3202 EVT ValVT = Val.getValueType();
3203 MemSDNode *MemSD = cast<MemSDNode>(N);
3204 assert(ValVT.isVector() && "Masked vector store must have vector type");
3205 assert(MemSD->getAlign() >= DAG.getEVTAlign(ValVT) &&
3206 "Unexpected alignment for masked store");
3207
3208 unsigned Opcode = 0;
3209 switch (ValVT.getSimpleVT().SimpleTy) {
3210 default:
3211 llvm_unreachable("Unexpected masked vector store type");
3212 case MVT::v4i64:
3213 case MVT::v4f64: {
3214 Opcode = NVPTXISD::StoreV4;
3215 break;
3216 }
3217 case MVT::v8i32:
3218 case MVT::v8f32: {
3219 Opcode = NVPTXISD::StoreV8;
3220 break;
3221 }
3222 }
3223
3225
3226 // Construct the new SDNode. First operand is the chain.
3227 Ops.push_back(Chain);
3228
3229 // The next N operands are the values to store. Encode the mask into the
3230 // values using the sentinel register 0 to represent a masked-off element.
3231 assert(Mask.getValueType().isVector() &&
3232 Mask.getValueType().getVectorElementType() == MVT::i1 &&
3233 "Mask must be a vector of i1");
3234 assert(Mask.getOpcode() == ISD::BUILD_VECTOR &&
3235 "Mask expected to be a BUILD_VECTOR");
3236 assert(Mask.getValueType().getVectorNumElements() ==
3237 ValVT.getVectorNumElements() &&
3238 "Mask size must be the same as the vector size");
3239 for (auto [I, Op] : enumerate(Mask->ops())) {
3240 // Mask elements must be constants.
3241 if (Op.getNode()->getAsZExtVal() == 0) {
3242 // Append a sentinel register 0 to the Ops vector to represent a masked
3243 // off element, this will be handled in tablegen
3245 ValVT.getVectorElementType()));
3246 } else {
3247 // Extract the element from the vector to store
3248 SDValue ExtVal =
3250 Val, DAG.getIntPtrConstant(I, DL));
3251 Ops.push_back(ExtVal);
3252 }
3253 }
3254
3255 // Next, the pointer operand.
3256 Ops.push_back(BasePtr);
3257
3258 // Finally, the offset operand. We expect this to always be undef, and it will
3259 // be ignored in lowering, but to mirror the handling of the other vector
3260 // store instructions we include it in the new SDNode.
3261 assert(Offset.getOpcode() == ISD::UNDEF &&
3262 "Offset operand expected to be undef");
3263 Ops.push_back(Offset);
3264
3265 SDValue NewSt =
3266 DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
3267 MemSD->getMemoryVT(), MemSD->getMemOperand());
3268
3269 return NewSt;
3270}
3271
3272SDValue
3274 switch (Op.getOpcode()) {
3275 case ISD::RETURNADDR:
3276 return SDValue();
3277 case ISD::FRAMEADDR:
3278 return SDValue();
3279 case ISD::ADDRSPACECAST:
3280 return LowerADDRSPACECAST(Op, DAG);
3282 return lowerIntrinsicWChain(Op, DAG);
3284 return lowerIntrinsicWOChain(Op, DAG);
3286 return lowerIntrinsicVoid(Op, DAG);
3287 case ISD::BUILD_VECTOR:
3288 return LowerBUILD_VECTOR(Op, DAG);
3289 case ISD::BITCAST:
3290 return LowerBITCAST(Op, DAG);
3292 return Op;
3294 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
3296 return LowerINSERT_VECTOR_ELT(Op, DAG);
3298 return LowerVECTOR_SHUFFLE(Op, DAG);
3300 return LowerCONCAT_VECTORS(Op, DAG);
3305 return LowerVECREDUCE(Op, DAG);
3306 case ISD::STORE:
3307 return LowerSTORE(Op, DAG);
3308 case ISD::MSTORE: {
3309 assert(STI.has256BitVectorLoadStore(
3310 cast<MemSDNode>(Op.getNode())->getAddressSpace()) &&
3311 "Masked store vector not supported on subtarget.");
3312 return lowerMSTORE(Op, DAG);
3313 }
3314 case ISD::LOAD:
3315 return LowerLOAD(Op, DAG);
3316 case ISD::MLOAD:
3317 return LowerMLOAD(Op, DAG);
3318 case ISD::SHL_PARTS:
3319 return LowerShiftLeftParts(Op, DAG);
3320 case ISD::SRA_PARTS:
3321 case ISD::SRL_PARTS:
3322 return LowerShiftRightParts(Op, DAG);
3323 case ISD::SELECT:
3324 return lowerSELECT(Op, DAG);
3325 case ISD::FROUND:
3326 return LowerFROUND(Op, DAG);
3327 case ISD::FCOPYSIGN:
3328 return LowerFCOPYSIGN(Op, DAG);
3329 case ISD::SINT_TO_FP:
3330 case ISD::UINT_TO_FP:
3331 return LowerINT_TO_FP(Op, DAG);
3332 case ISD::FP_TO_SINT:
3333 case ISD::FP_TO_UINT:
3334 return LowerFP_TO_INT(Op, DAG);
3335 case ISD::FP_ROUND:
3336 return LowerFP_ROUND(Op, DAG);
3337 case ISD::FP_EXTEND:
3338 return LowerFP_EXTEND(Op, DAG);
3339 case ISD::VAARG:
3340 return LowerVAARG(Op, DAG);
3341 case ISD::VASTART:
3342 return LowerVASTART(Op, DAG);
3343 case ISD::FSHL:
3344 case ISD::FSHR:
3345 return lowerFSH(Op, DAG);
3346 case ISD::ROTL:
3347 case ISD::ROTR:
3348 return lowerROT(Op, DAG);
3349 case ISD::ABS:
3350 case ISD::SMIN:
3351 case ISD::SMAX:
3352 case ISD::UMIN:
3353 case ISD::UMAX:
3354 case ISD::ADD:
3355 case ISD::SUB:
3356 case ISD::MUL:
3357 case ISD::SHL:
3358 case ISD::SREM:
3359 case ISD::UREM:
3360 return LowerVectorArith(Op, DAG);
3362 return LowerDYNAMIC_STACKALLOC(Op, DAG);
3363 case ISD::STACKRESTORE:
3364 return LowerSTACKRESTORE(Op, DAG);
3365 case ISD::STACKSAVE:
3366 return LowerSTACKSAVE(Op, DAG);
3367 case ISD::CopyToReg:
3368 return LowerCopyToReg_128(Op, DAG);
3369 case ISD::FADD:
3370 case ISD::FSUB:
3371 case ISD::FMUL:
3372 // Used only for bf16 on SM80, where we select fma for non-ftz operation
3373 return PromoteBinOpIfF32FTZ(Op, DAG);
3374 case ISD::CTPOP:
3375 case ISD::CTLZ:
3376 return lowerCTLZCTPOP(Op, DAG);
3377 case ISD::FREM:
3378 return lowerFREM(Op, DAG);
3379 case ISD::BSWAP:
3380 return lowerBSWAP(Op, DAG);
3381 default:
3382 llvm_unreachable("Custom lowering not defined for operation");
3383 }
3384}
3385
3386// This will prevent AsmPrinter from trying to print the jump tables itself.
3390
3391SDValue NVPTXTargetLowering::LowerADDRSPACECAST(SDValue Op,
3392 SelectionDAG &DAG) const {
3394 unsigned SrcAS = N->getSrcAddressSpace();
3395 unsigned DestAS = N->getDestAddressSpace();
3396 if (SrcAS != llvm::ADDRESS_SPACE_GENERIC &&
3397 DestAS != llvm::ADDRESS_SPACE_GENERIC) {
3398 // Shared and SharedCluster can be converted to each other through generic
3399 // space
3400 if ((SrcAS == llvm::ADDRESS_SPACE_SHARED &&
3403 DestAS == llvm::ADDRESS_SPACE_SHARED)) {
3404 SDLoc DL(Op.getNode());
3405 const MVT GenerictVT =
3407 SDValue GenericConversion = DAG.getAddrSpaceCast(
3408 DL, GenerictVT, Op.getOperand(0), SrcAS, ADDRESS_SPACE_GENERIC);
3409 SDValue SharedClusterConversion =
3410 DAG.getAddrSpaceCast(DL, Op.getValueType(), GenericConversion,
3411 ADDRESS_SPACE_GENERIC, DestAS);
3412 return SharedClusterConversion;
3413 }
3414
3415 return DAG.getUNDEF(Op.getValueType());
3416 }
3417
3418 return Op;
3419}
3420
3421// This function is almost a copy of SelectionDAG::expandVAArg().
3422// The only diff is that this one produces loads from local address space.
3423SDValue NVPTXTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3424 const TargetLowering *TLI = STI.getTargetLowering();
3425 SDLoc DL(Op);
3426
3427 SDNode *Node = Op.getNode();
3428 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3429 EVT VT = Node->getValueType(0);
3430 auto *Ty = VT.getTypeForEVT(*DAG.getContext());
3431 SDValue Tmp1 = Node->getOperand(0);
3432 SDValue Tmp2 = Node->getOperand(1);
3433 const MaybeAlign MA(Node->getConstantOperandVal(3));
3434
3435 SDValue VAListLoad = DAG.getLoad(TLI->getPointerTy(DAG.getDataLayout()), DL,
3436 Tmp1, Tmp2, MachinePointerInfo(V));
3437 SDValue VAList = VAListLoad;
3438
3439 if (MA && *MA > TLI->getMinStackArgumentAlignment()) {
3440 VAList = DAG.getNode(
3441 ISD::ADD, DL, VAList.getValueType(), VAList,
3442 DAG.getConstant(MA->value() - 1, DL, VAList.getValueType()));
3443
3444 VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
3445 DAG.getSignedConstant(-(int64_t)MA->value(), DL,
3446 VAList.getValueType()));
3447 }
3448
3449 // Increment the pointer, VAList, to the next vaarg
3450 Tmp1 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
3452 DL, VAList.getValueType()));
3453
3454 // Store the incremented VAList to the legalized pointer
3455 Tmp1 = DAG.getStore(VAListLoad.getValue(1), DL, Tmp1, Tmp2,
3456 MachinePointerInfo(V));
3457
3458 const Value *SrcV = Constant::getNullValue(
3460
3461 // Load the actual argument out of the pointer VAList
3462 return DAG.getLoad(VT, DL, Tmp1, VAList, MachinePointerInfo(SrcV));
3463}
3464
3465SDValue NVPTXTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3466 const TargetLowering *TLI = STI.getTargetLowering();
3467 SDLoc DL(Op);
3468 EVT PtrVT = TLI->getPointerTy(DAG.getDataLayout());
3469
3470 // Store the address of unsized array <function>_vararg[] in the ap object.
3471 SDValue VAReg = getParamSymbol(DAG, /* vararg */ -1, PtrVT);
3472
3473 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3474 return DAG.getStore(Op.getOperand(0), DL, VAReg, Op.getOperand(1),
3475 MachinePointerInfo(SV));
3476}
3477
3478static std::pair<MemSDNode *, uint32_t>
3480 const NVPTXSubtarget &STI) {
3481 SDValue Chain = N->getOperand(0);
3482 SDValue BasePtr = N->getOperand(1);
3483 SDValue Mask = N->getOperand(3);
3484 [[maybe_unused]] SDValue Passthru = N->getOperand(4);
3485
3486 SDLoc DL(N);
3487 EVT ResVT = N->getValueType(0);
3488 assert(ResVT.isVector() && "Masked vector load must have vector type");
3489 // While we only expect poison passthru vectors as an input to the backend,
3490 // when the legalization framework splits a poison vector in half, it creates
3491 // two undef vectors, so we can technically expect those too.
3492 assert((Passthru.getOpcode() == ISD::POISON ||
3493 Passthru.getOpcode() == ISD::UNDEF) &&
3494 "Passthru operand expected to be poison or undef");
3495
3496 // Extract the mask and convert it to a uint32_t representing the used bytes
3497 // of the entire vector load
3498 uint32_t UsedBytesMask = 0;
3499 uint32_t ElementSizeInBits = ResVT.getVectorElementType().getSizeInBits();
3500 assert(ElementSizeInBits % 8 == 0 && "Unexpected element size");
3501 uint32_t ElementSizeInBytes = ElementSizeInBits / 8;
3502 uint32_t ElementMask = (1u << ElementSizeInBytes) - 1u;
3503
3504 for (SDValue Op : reverse(Mask->ops())) {
3505 // We technically only want to do this shift for every
3506 // iteration *but* the first, but in the first iteration UsedBytesMask is 0,
3507 // so this shift is a no-op.
3508 UsedBytesMask <<= ElementSizeInBytes;
3509
3510 // Mask elements must be constants.
3511 if (Op->getAsZExtVal() != 0)
3512 UsedBytesMask |= ElementMask;
3513 }
3514
3515 assert(UsedBytesMask != 0 && UsedBytesMask != UINT32_MAX &&
3516 "Unexpected masked load with elements masked all on or all off");
3517
3518 // Create a new load sd node to be handled normally by ReplaceLoadVector.
3519 MemSDNode *NewLD = cast<MemSDNode>(
3520 DAG.getLoad(ResVT, DL, Chain, BasePtr, N->getMemOperand()).getNode());
3521
3522 // If our subtarget does not support the used bytes mask pragma, "drop" the
3523 // mask by setting it to UINT32_MAX
3524 if (!STI.hasUsedBytesMaskPragma())
3525 UsedBytesMask = UINT32_MAX;
3526
3527 return {NewLD, UsedBytesMask};
3528}
3529
3530/// replaceLoadVector - Convert vector loads into multi-output scalar loads.
3531static std::optional<std::pair<SDValue, SDValue>>
3534 const EVT ResVT = LD->getValueType(0);
3535 const EVT MemVT = LD->getMemoryVT();
3536
3537 // If we're doing sign/zero extension as part of the load, avoid lowering to
3538 // a LoadV node. TODO: consider relaxing this restriction.
3539 if (ResVT != MemVT)
3540 return std::nullopt;
3541
3542 const auto NumEltsAndEltVT =
3543 getVectorLoweringShape(ResVT, STI, LD->getAddressSpace());
3544 if (!NumEltsAndEltVT)
3545 return std::nullopt;
3546 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3547
3548 Align Alignment = LD->getAlign();
3549 const auto &TD = DAG.getDataLayout();
3550 Align PrefAlign = TD.getPrefTypeAlign(MemVT.getTypeForEVT(*DAG.getContext()));
3551 if (Alignment < PrefAlign) {
3552 // This load is not sufficiently aligned, so bail out and let this vector
3553 // load be scalarized. Note that we may still be able to emit smaller
3554 // vector loads. For example, if we are loading a <4 x float> with an
3555 // alignment of 8, this check will fail but the legalizer will try again
3556 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3557 return std::nullopt;
3558 }
3559
3560 // If we have a masked load, convert it to a normal load now
3561 std::optional<uint32_t> UsedBytesMask = std::nullopt;
3562 if (LD->getOpcode() == ISD::MLOAD)
3563 std::tie(LD, UsedBytesMask) =
3565
3566 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
3567 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3568 // loaded type to i16 and propagate the "real" type as the memory type.
3569 const MVT LoadEltVT = (EltVT.getSizeInBits() < 16) ? MVT::i16 : EltVT;
3570
3571 unsigned Opcode;
3572 switch (NumElts) {
3573 default:
3574 return std::nullopt;
3575 case 2:
3576 Opcode = NVPTXISD::LoadV2;
3577 break;
3578 case 4:
3579 Opcode = NVPTXISD::LoadV4;
3580 break;
3581 case 8:
3582 Opcode = NVPTXISD::LoadV8;
3583 break;
3584 }
3585 auto ListVTs = SmallVector<EVT, 9>(NumElts, LoadEltVT);
3586 ListVTs.push_back(MVT::Other);
3587 SDVTList LdResVTs = DAG.getVTList(ListVTs);
3588
3589 SDLoc DL(LD);
3590
3591 // Copy regular operands
3592 SmallVector<SDValue, 8> OtherOps(LD->ops());
3593
3594 OtherOps.push_back(
3595 DAG.getConstant(UsedBytesMask.value_or(UINT32_MAX), DL, MVT::i32));
3596
3597 // The select routine does not have access to the LoadSDNode instance, so
3598 // pass along the extension information
3599 OtherOps.push_back(
3600 DAG.getIntPtrConstant(cast<LoadSDNode>(LD)->getExtensionType(), DL));
3601
3602 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps, MemVT,
3603 LD->getMemOperand());
3604
3605 SmallVector<SDValue> ScalarRes;
3606 if (EltVT.isVector()) {
3608 assert(NumElts * EltVT.getVectorNumElements() ==
3609 ResVT.getVectorNumElements());
3610 // Generate EXTRACT_VECTOR_ELTs to split v2[i,f,bf]16/v4i8 subvectors back
3611 // into individual elements.
3612 for (const unsigned I : llvm::seq(NumElts)) {
3613 SDValue SubVector = NewLD.getValue(I);
3614 DAG.ExtractVectorElements(SubVector, ScalarRes);
3615 }
3616 } else {
3617 for (const unsigned I : llvm::seq(NumElts)) {
3618 SDValue Res = NewLD.getValue(I);
3619 if (LoadEltVT != EltVT)
3620 Res = DAG.getNode(ISD::TRUNCATE, DL, EltVT, Res);
3621 ScalarRes.push_back(Res);
3622 }
3623 }
3624
3625 SDValue LoadChain = NewLD.getValue(NumElts);
3626
3627 const MVT BuildVecVT =
3628 MVT::getVectorVT(EltVT.getScalarType(), ScalarRes.size());
3629 SDValue BuildVec = DAG.getBuildVector(BuildVecVT, DL, ScalarRes);
3630 SDValue LoadValue = DAG.getBitcast(ResVT, BuildVec);
3631
3632 return {{LoadValue, LoadChain}};
3633}
3634
3637 const NVPTXSubtarget &STI) {
3638 if (auto Res = replaceLoadVector(N, DAG, STI))
3639 Results.append({Res->first, Res->second});
3640}
3641
3643 const NVPTXSubtarget &STI) {
3644 if (auto Res = replaceLoadVector(N, DAG, STI))
3645 return DAG.getMergeValues({Res->first, Res->second}, SDLoc(N));
3646 return SDValue();
3647}
3648
3649// v = ld i1* addr
3650// =>
3651// v1 = ld i8* addr (-> i16)
3652// v = trunc i16 to i1
3654 SDLoc dl(LD);
3655 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
3656 assert(LD->getValueType(0) == MVT::i1 && "Custom lowering for i1 load only");
3657 SDValue newLD = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i16, LD->getChain(),
3658 LD->getBasePtr(), LD->getPointerInfo(),
3659 MVT::i8, LD->getAlign(),
3660 LD->getMemOperand()->getFlags());
3661 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
3662 // The legalizer (the caller) is expecting two values from the legalized
3663 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
3664 // in LegalizeDAG.cpp which also uses MergeValues.
3665 return DAG.getMergeValues({result, LD->getChain()}, dl);
3666}
3667
3668SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
3669 LoadSDNode *LD = cast<LoadSDNode>(Op);
3670
3671 if (Op.getValueType() == MVT::i1)
3672 return lowerLOADi1(LD, DAG);
3673
3674 // To improve CodeGen we'll legalize any-extend loads to zext loads. This is
3675 // how they'll be lowered in ISel anyway, and by doing this a little earlier
3676 // we allow for more DAG combine opportunities.
3677 if (LD->getExtensionType() == ISD::EXTLOAD) {
3678 assert(LD->getValueType(0).isInteger() && LD->getMemoryVT().isInteger() &&
3679 "Unexpected fpext-load");
3680 return DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(Op), Op.getValueType(),
3681 LD->getChain(), LD->getBasePtr(), LD->getMemoryVT(),
3682 LD->getMemOperand());
3683 }
3684
3685 llvm_unreachable("Unexpected custom lowering for load");
3686}
3687
3688SDValue NVPTXTargetLowering::LowerMLOAD(SDValue Op, SelectionDAG &DAG) const {
3689 // v2f16/v2bf16/v2i16/v4i8 are legal, so we can't rely on legalizer to handle
3690 // masked loads of these types and have to handle them here.
3691 // v2f32 also needs to be handled here if the subtarget has f32x2
3692 // instructions, making it legal.
3693 //
3694 // Note: misaligned masked loads should never reach this point
3695 // because the override of isLegalMaskedLoad in NVPTXTargetTransformInfo.cpp
3696 // will validate alignment. Therefore, we do not need to special case handle
3697 // them here.
3698 EVT VT = Op.getValueType();
3699 if (NVPTX::isPackedVectorTy(VT)) {
3701 cast<MemSDNode>(Op.getNode()), DAG, STI);
3702 MemSDNode *LD = std::get<0>(Result);
3703 uint32_t UsedBytesMask = std::get<1>(Result);
3704
3705 SDLoc DL(LD);
3706
3707 // Copy regular operands
3708 SmallVector<SDValue, 8> OtherOps(LD->ops());
3709
3710 OtherOps.push_back(DAG.getConstant(UsedBytesMask, DL, MVT::i32));
3711
3712 // We currently are not lowering extending loads, but pass the extension
3713 // type anyway as later handling expects it.
3714 OtherOps.push_back(
3715 DAG.getIntPtrConstant(cast<LoadSDNode>(LD)->getExtensionType(), DL));
3716 SDValue NewLD =
3717 DAG.getMemIntrinsicNode(NVPTXISD::MLoad, DL, LD->getVTList(), OtherOps,
3718 LD->getMemoryVT(), LD->getMemOperand());
3719 return NewLD;
3720 }
3721 return SDValue();
3722}
3723
3725 const NVPTXSubtarget &STI) {
3726 MemSDNode *N = cast<MemSDNode>(Op.getNode());
3727 SDValue Val = N->getOperand(1);
3728 SDLoc DL(N);
3729 const EVT ValVT = Val.getValueType();
3730 const EVT MemVT = N->getMemoryVT();
3731
3732 // If we're truncating as part of the store, avoid lowering to a StoreV node.
3733 // TODO: consider relaxing this restriction.
3734 if (ValVT != MemVT)
3735 return SDValue();
3736
3737 const auto NumEltsAndEltVT =
3738 getVectorLoweringShape(ValVT, STI, N->getAddressSpace());
3739 if (!NumEltsAndEltVT)
3740 return SDValue();
3741 const auto [NumElts, EltVT] = NumEltsAndEltVT.value();
3742
3743 const DataLayout &TD = DAG.getDataLayout();
3744
3745 Align Alignment = N->getAlign();
3746 Align PrefAlign = TD.getPrefTypeAlign(ValVT.getTypeForEVT(*DAG.getContext()));
3747 if (Alignment < PrefAlign) {
3748 // This store is not sufficiently aligned, so bail out and let this vector
3749 // store be scalarized. Note that we may still be able to emit smaller
3750 // vector stores. For example, if we are storing a <4 x float> with an
3751 // alignment of 8, this check will fail but the legalizer will try again
3752 // with 2 x <2 x float>, which will succeed with an alignment of 8.
3753 return SDValue();
3754 }
3755
3756 unsigned Opcode;
3757 switch (NumElts) {
3758 default:
3759 return SDValue();
3760 case 2:
3761 Opcode = NVPTXISD::StoreV2;
3762 break;
3763 case 4:
3764 Opcode = NVPTXISD::StoreV4;
3765 break;
3766 case 8:
3767 Opcode = NVPTXISD::StoreV8;
3768 break;
3769 }
3770
3772
3773 // First is the chain
3774 Ops.push_back(N->getOperand(0));
3775
3776 // Then the split values
3777 if (EltVT.isVector()) {
3779 assert(NumElts * EltVT.getVectorNumElements() ==
3780 ValVT.getVectorNumElements());
3781 // Combine individual elements into v2[i,f,bf]16/v4i8 subvectors to be
3782 // stored as b32s
3783 const unsigned NumEltsPerSubVector = EltVT.getVectorNumElements();
3784 for (const unsigned I : llvm::seq(NumElts)) {
3785 SmallVector<SDValue, 4> SubVectorElts;
3786 DAG.ExtractVectorElements(Val, SubVectorElts, I * NumEltsPerSubVector,
3787 NumEltsPerSubVector);
3788 Ops.push_back(DAG.getBuildVector(EltVT, DL, SubVectorElts));
3789 }
3790 } else {
3791 SDValue V = DAG.getBitcast(MVT::getVectorVT(EltVT, NumElts), Val);
3792 for (const unsigned I : llvm::seq(NumElts)) {
3793 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, V,
3794 DAG.getIntPtrConstant(I, DL));
3795
3796 // Since StoreV2 is a target node, we cannot rely on DAG type
3797 // legalization. Therefore, we must ensure the type is legal. For i1 and
3798 // i8, we set the stored type to i16 and propagate the "real" type as the
3799 // memory type.
3800 if (EltVT.getSizeInBits() < 16)
3801 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
3802 Ops.push_back(ExtVal);
3803 }
3804 }
3805
3806 // Then any remaining arguments
3807 Ops.append(N->op_begin() + 2, N->op_end());
3808
3809 SDValue NewSt =
3810 DAG.getMemIntrinsicNode(Opcode, DL, DAG.getVTList(MVT::Other), Ops,
3811 N->getMemoryVT(), N->getMemOperand());
3812
3813 // return DCI.CombineTo(N, NewSt, true);
3814 return NewSt;
3815}
3816
3817SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
3818 StoreSDNode *Store = cast<StoreSDNode>(Op);
3819 EVT VT = Store->getMemoryVT();
3820
3821 if (VT == MVT::i1)
3822 return LowerSTOREi1(Op, DAG);
3823
3824 // Lower store of any other vector type, including v2f32 as we want to break
3825 // it apart since this is not a widely-supported type.
3826 return lowerSTOREVector(Op, DAG, STI);
3827}
3828
3829// st i1 v, addr
3830// =>
3831// v1 = zxt v to i16
3832// st.u8 i16, addr
3833SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
3834 SDNode *Node = Op.getNode();
3835 SDLoc dl(Node);
3836 StoreSDNode *ST = cast<StoreSDNode>(Node);
3837 SDValue Tmp1 = ST->getChain();
3838 SDValue Tmp2 = ST->getBasePtr();
3839 SDValue Tmp3 = ST->getValue();
3840 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
3841 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
3842 SDValue Result =
3843 DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), MVT::i8,
3844 ST->getAlign(), ST->getMemOperand()->getFlags());
3845 return Result;
3846}
3847
3848SDValue NVPTXTargetLowering::LowerCopyToReg_128(SDValue Op,
3849 SelectionDAG &DAG) const {
3850 // Change the CopyToReg to take in two 64-bit operands instead of a 128-bit
3851 // operand so that it can pass the legalization.
3852
3853 assert(Op.getOperand(1).getValueType() == MVT::i128 &&
3854 "Custom lowering for 128-bit CopyToReg only");
3855
3856 SDNode *Node = Op.getNode();
3857 SDLoc DL(Node);
3858
3859 SDValue Cast = DAG.getBitcast(MVT::v2i64, Op->getOperand(2));
3860 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
3861 DAG.getIntPtrConstant(0, DL));
3862 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, Cast,
3863 DAG.getIntPtrConstant(1, DL));
3864
3866 SmallVector<EVT, 3> ResultsType(Node->values());
3867
3868 NewOps[0] = Op->getOperand(0); // Chain
3869 NewOps[1] = Op->getOperand(1); // Dst Reg
3870 NewOps[2] = Lo; // Lower 64-bit
3871 NewOps[3] = Hi; // Higher 64-bit
3872 if (Op.getNumOperands() == 4)
3873 NewOps[4] = Op->getOperand(3); // Glue if exists
3874
3875 return DAG.getNode(ISD::CopyToReg, DL, ResultsType, NewOps);
3876}
3877
3878unsigned NVPTXTargetLowering::getNumRegisters(
3879 LLVMContext &Context, EVT VT,
3880 std::optional<MVT> RegisterVT = std::nullopt) const {
3881 if (VT == MVT::i128 && RegisterVT == MVT::i128)
3882 return 1;
3883 return TargetLoweringBase::getNumRegisters(Context, VT, RegisterVT);
3884}
3885
3886bool NVPTXTargetLowering::splitValueIntoRegisterParts(
3887 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
3888 unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
3889 if (Val.getValueType() == MVT::i128 && NumParts == 1) {
3890 Parts[0] = Val;
3891 return true;
3892 }
3893 return false;
3894}
3895
3896// This creates target external symbol for a function parameter.
3897// Name of the symbol is composed from its index and the function name.
3898// Negative index corresponds to special parameter (unsized array) used for
3899// passing variable arguments.
3900SDValue NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int I,
3901 EVT T) const {
3902 StringRef SavedStr = nvTM->getStrPool().save(
3904 return DAG.getExternalSymbol(SavedStr.data(), T);
3905}
3906
3907SDValue NVPTXTargetLowering::getCallParamSymbol(SelectionDAG &DAG, int I,
3908 EVT T) const {
3909 const StringRef SavedStr = nvTM->getStrPool().save("param" + Twine(I));
3910 return DAG.getExternalSymbol(SavedStr.data(), T);
3911}
3912
3914 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3915 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3916 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3917 const DataLayout &DL = DAG.getDataLayout();
3918 LLVMContext &Ctx = *DAG.getContext();
3919 auto PtrVT = getPointerTy(DAG.getDataLayout());
3920
3921 const Function &F = DAG.getMachineFunction().getFunction();
3922
3923 SDValue Root = DAG.getRoot();
3924 SmallVector<SDValue, 16> OutChains;
3925
3926 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
3927 // Ins.size() will be larger
3928 // * if there is an aggregate argument with multiple fields (each field
3929 // showing up separately in Ins)
3930 // * if there is a vector argument with more than typical vector-length
3931 // elements (generally if more than 4) where each vector element is
3932 // individually present in Ins.
3933 // So a different index should be used for indexing into Ins.
3934 // See similar issue in LowerCall.
3935
3936 auto AllIns = ArrayRef(Ins);
3937 for (const auto &Arg : F.args()) {
3938 const auto ArgIns = AllIns.take_while(
3939 [&](auto I) { return I.OrigArgIndex == Arg.getArgNo(); });
3940 AllIns = AllIns.drop_front(ArgIns.size());
3941
3942 Type *Ty = Arg.getType();
3943
3944 if (ArgIns.empty())
3945 report_fatal_error("Empty parameter types are not supported");
3946
3947 if (Arg.use_empty()) {
3948 // argument is dead
3949 for (const auto &In : ArgIns) {
3950 assert(!In.Used && "Arg.use_empty() is true but Arg is used?");
3951 InVals.push_back(DAG.getUNDEF(In.VT));
3952 }
3953 continue;
3954 }
3955
3956 SDValue ArgSymbol = getParamSymbol(DAG, Arg.getArgNo(), PtrVT);
3957
3958 // In the following cases, assign a node order of "i+1"
3959 // to newly created nodes. The SDNodes for params have to
3960 // appear in the same order as their order of appearance
3961 // in the original function. "i+1" holds that order.
3962 if (Arg.hasByValAttr()) {
3963 // Param has ByVal attribute
3964 // Return MoveParam(param symbol).
3965 // Ideally, the param symbol can be returned directly,
3966 // but when SDNode builder decides to use it in a CopyToReg(),
3967 // machine instruction fails because TargetExternalSymbol
3968 // (not lowered) is target dependent, and CopyToReg assumes
3969 // the source is lowered.
3970 assert(ArgIns.size() == 1 && "ByVal argument must be a pointer");
3971 const auto &ByvalIn = ArgIns[0];
3972 assert(getValueType(DL, Ty) == ByvalIn.VT &&
3973 "Ins type did not match function type");
3974 assert(ByvalIn.VT == PtrVT && "ByVal argument must be a pointer");
3975
3976 SDValue P;
3977 if (isKernelFunction(F)) {
3978 P = ArgSymbol;
3979 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3980 } else {
3981 P = DAG.getNode(NVPTXISD::MoveParam, dl, ByvalIn.VT, ArgSymbol);
3982 P.getNode()->setIROrder(Arg.getArgNo() + 1);
3983 P = DAG.getAddrSpaceCast(dl, ByvalIn.VT, P, ADDRESS_SPACE_LOCAL,
3985 }
3986 InVals.push_back(P);
3987 } else {
3990 ComputePTXValueVTs(*this, DL, Ctx, CallConv, Ty, VTs, Offsets);
3991 assert(VTs.size() == ArgIns.size() && "Size mismatch");
3992 assert(VTs.size() == Offsets.size() && "Size mismatch");
3993
3994 const Align ArgAlign = getFunctionArgumentAlignment(
3995 &F, Ty, Arg.getArgNo() + AttributeList::FirstArgIndex, DL);
3996
3997 unsigned I = 0;
3998 const auto VI = VectorizePTXValueVTs(VTs, Offsets, ArgAlign);
3999 for (const unsigned NumElts : VI) {
4000 // i1 is loaded/stored as i8
4001 const EVT LoadVT = VTs[I] == MVT::i1 ? MVT::i8 : VTs[I];
4002 const EVT VecVT = getVectorizedVT(LoadVT, NumElts, Ctx);
4003
4004 SDValue VecAddr = DAG.getObjectPtrOffset(
4005 dl, ArgSymbol, TypeSize::getFixed(Offsets[I]));
4006
4007 const Align PartAlign = commonAlignment(ArgAlign, Offsets[I]);
4008 SDValue P =
4009 DAG.getLoad(VecVT, dl, Root, VecAddr,
4013 P.getNode()->setIROrder(Arg.getArgNo() + 1);
4014 for (const unsigned J : llvm::seq(NumElts)) {
4015 SDValue Elt = getExtractVectorizedValue(P, J, LoadVT, dl, DAG);
4016
4017 Elt = correctParamType(Elt, ArgIns[I + J].VT, ArgIns[I + J].Flags,
4018 DAG, dl);
4019 InVals.push_back(Elt);
4020 }
4021 I += NumElts;
4022 }
4023 }
4024 }
4025
4026 if (!OutChains.empty())
4027 DAG.setRoot(DAG.getTokenFactor(dl, OutChains));
4028
4029 return Chain;
4030}
4031
4032SDValue
4034 bool isVarArg,
4036 const SmallVectorImpl<SDValue> &OutVals,
4037 const SDLoc &dl, SelectionDAG &DAG) const {
4038 const Function &F = DAG.getMachineFunction().getFunction();
4039 Type *RetTy = F.getReturnType();
4040
4041 if (RetTy->isVoidTy()) {
4042 assert(OutVals.empty() && Outs.empty() && "Return value expected for void");
4043 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4044 }
4045
4046 const DataLayout &DL = DAG.getDataLayout();
4047 LLVMContext &Ctx = *DAG.getContext();
4048
4049 const SDValue RetSymbol = DAG.getExternalSymbol("func_retval0", MVT::i32);
4050 const auto RetAlign = getFunctionParamOptimizedAlign(&F, RetTy, DL);
4051
4052 // PTX Interoperability Guide 3.3(A): [Integer] Values shorter than
4053 // 32-bits are sign extended or zero extended, depending on whether
4054 // they are signed or unsigned types.
4055 const bool ExtendIntegerRetVal =
4056 RetTy->isIntegerTy() && DL.getTypeAllocSizeInBits(RetTy) < 32;
4057
4060 ComputePTXValueVTs(*this, DL, Ctx, CallConv, RetTy, VTs, Offsets);
4061 assert(VTs.size() == OutVals.size() && "Bad return value decomposition");
4062
4063 const auto GetRetVal = [&](unsigned I) -> SDValue {
4064 SDValue RetVal = OutVals[I];
4066 RetVal.getValueType() &&
4067 "OutVal type should always be legal");
4068
4069 const EVT VTI = promoteScalarIntegerPTX(VTs[I]);
4070 const EVT StoreVT =
4071 ExtendIntegerRetVal ? MVT::i32 : (VTI == MVT::i1 ? MVT::i8 : VTI);
4072 return correctParamType(RetVal, StoreVT, Outs[I].Flags, DAG, dl);
4073 };
4074
4075 unsigned I = 0;
4076 const auto VI = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
4077 for (const unsigned NumElts : VI) {
4078 const MaybeAlign CurrentAlign = ExtendIntegerRetVal
4079 ? MaybeAlign(std::nullopt)
4080 : commonAlignment(RetAlign, Offsets[I]);
4081
4083 NumElts, dl, DAG, [&](unsigned K) { return GetRetVal(I + K); });
4084
4085 SDValue Ptr =
4086 DAG.getObjectPtrOffset(dl, RetSymbol, TypeSize::getFixed(Offsets[I]));
4087
4088 Chain = DAG.getStore(Chain, dl, Val, Ptr,
4090
4091 I += NumElts;
4092 }
4093
4094 return DAG.getNode(NVPTXISD::RET_GLUE, dl, MVT::Other, Chain);
4095}
4096
4098 SDValue Op, StringRef Constraint, std::vector<SDValue> &Ops,
4099 SelectionDAG &DAG) const {
4100 if (Constraint.size() > 1)
4101 return;
4103}
4104
4105// llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
4106// TgtMemIntrinsic
4107// because we need the information that is only available in the "Value" type
4108// of destination
4109// pointer. In particular, the address space information.
4111 const CallBase &I,
4112 MachineFunction &MF,
4113 unsigned Intrinsic) const {
4114 switch (Intrinsic) {
4115 default:
4116 return false;
4117 case Intrinsic::nvvm_match_all_sync_i32p:
4118 case Intrinsic::nvvm_match_all_sync_i64p:
4119 Info.opc = ISD::INTRINSIC_W_CHAIN;
4120 // memVT is bogus. These intrinsics have IntrInaccessibleMemOnly attribute
4121 // in order to model data exchange with other threads, but perform no real
4122 // memory accesses.
4123 Info.memVT = MVT::i1;
4124
4125 // Our result depends on both our and other thread's arguments.
4127 return true;
4128 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col:
4129 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row:
4130 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride:
4131 case Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride:
4132 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col:
4133 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row:
4134 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride:
4135 case Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride:
4136 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col:
4137 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row:
4138 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride:
4139 case Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride:
4140 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col:
4141 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row:
4142 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride:
4143 case Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride:
4144 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col:
4145 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row:
4146 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride:
4147 case Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride:
4148 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col:
4149 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row:
4150 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride:
4151 case Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride: {
4152 Info.opc = ISD::INTRINSIC_W_CHAIN;
4153 Info.memVT = MVT::v8f16;
4154 Info.ptrVal = I.getArgOperand(0);
4155 Info.offset = 0;
4156 Info.flags = MachineMemOperand::MOLoad;
4157 Info.align = Align(16);
4158 return true;
4159 }
4160 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col:
4161 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_col_stride:
4162 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col_stride:
4163 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_col:
4164 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row:
4165 case Intrinsic::nvvm_wmma_m16n16k16_load_a_s8_row_stride:
4166 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row_stride:
4167 case Intrinsic::nvvm_wmma_m16n16k16_load_a_u8_row:
4168 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col:
4169 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_col_stride:
4170 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row:
4171 case Intrinsic::nvvm_wmma_m8n32k16_load_a_bf16_row_stride:
4172 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col:
4173 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_col_stride:
4174 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col_stride:
4175 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_col:
4176 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row:
4177 case Intrinsic::nvvm_wmma_m16n16k16_load_b_s8_row_stride:
4178 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row_stride:
4179 case Intrinsic::nvvm_wmma_m16n16k16_load_b_u8_row:
4180 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col:
4181 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_col_stride:
4182 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row:
4183 case Intrinsic::nvvm_wmma_m32n8k16_load_b_bf16_row_stride: {
4184 Info.opc = ISD::INTRINSIC_W_CHAIN;
4185 Info.memVT = MVT::v2i32;
4186 Info.ptrVal = I.getArgOperand(0);
4187 Info.offset = 0;
4188 Info.flags = MachineMemOperand::MOLoad;
4189 Info.align = Align(8);
4190 return true;
4191 }
4192
4193 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col:
4194 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_col_stride:
4195 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col_stride:
4196 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_col:
4197 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row:
4198 case Intrinsic::nvvm_wmma_m32n8k16_load_a_s8_row_stride:
4199 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row_stride:
4200 case Intrinsic::nvvm_wmma_m32n8k16_load_a_u8_row:
4201 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col:
4202 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_col_stride:
4203 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row:
4204 case Intrinsic::nvvm_wmma_m16n16k16_load_a_bf16_row_stride:
4205 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col:
4206 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_col_stride:
4207 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row:
4208 case Intrinsic::nvvm_wmma_m16n16k8_load_a_tf32_row_stride:
4209
4210 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col:
4211 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_col_stride:
4212 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col_stride:
4213 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_col:
4214 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row:
4215 case Intrinsic::nvvm_wmma_m8n32k16_load_b_s8_row_stride:
4216 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row_stride:
4217 case Intrinsic::nvvm_wmma_m8n32k16_load_b_u8_row:
4218 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col:
4219 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_col_stride:
4220 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row:
4221 case Intrinsic::nvvm_wmma_m16n16k16_load_b_bf16_row_stride:
4222 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col:
4223 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_col_stride:
4224 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row:
4225 case Intrinsic::nvvm_wmma_m16n16k8_load_b_tf32_row_stride:
4226 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_b16:
4227 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x4_trans_b16:
4228 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8:
4229 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b4x16_p64:
4230 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x2_trans_b8x16_b6x16_p32:
4231 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b4x16_p64:
4232 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x4_b8x16_b6x16_p32: {
4233 Info.opc = ISD::INTRINSIC_W_CHAIN;
4234 Info.memVT = MVT::v4i32;
4235 Info.ptrVal = I.getArgOperand(0);
4236 Info.offset = 0;
4237 Info.flags = MachineMemOperand::MOLoad;
4238 Info.align = Align(16);
4239 return true;
4240 }
4241
4242 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col:
4243 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_col_stride:
4244 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col_stride:
4245 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_col:
4246 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row:
4247 case Intrinsic::nvvm_wmma_m32n8k16_load_b_s8_row_stride:
4248 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row_stride:
4249 case Intrinsic::nvvm_wmma_m32n8k16_load_b_u8_row:
4250
4251 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col:
4252 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_col_stride:
4253 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col_stride:
4254 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_col:
4255 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row:
4256 case Intrinsic::nvvm_wmma_m8n32k16_load_a_s8_row_stride:
4257 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row_stride:
4258 case Intrinsic::nvvm_wmma_m8n32k16_load_a_u8_row:
4259 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row:
4260 case Intrinsic::nvvm_wmma_m8n8k128_load_a_b1_row_stride:
4261 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col:
4262 case Intrinsic::nvvm_wmma_m8n8k128_load_b_b1_col_stride:
4263 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row:
4264 case Intrinsic::nvvm_wmma_m8n8k32_load_a_s4_row_stride:
4265 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row_stride:
4266 case Intrinsic::nvvm_wmma_m8n8k32_load_a_u4_row:
4267 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col:
4268 case Intrinsic::nvvm_wmma_m8n8k32_load_b_s4_col_stride:
4269 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col_stride:
4270 case Intrinsic::nvvm_wmma_m8n8k32_load_b_u4_col:
4271 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_b16:
4272 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x1_trans_b16:
4273 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b4x16_p64:
4274 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x1_b8x16_b6x16_p32: {
4275 Info.opc = ISD::INTRINSIC_W_CHAIN;
4276 Info.memVT = MVT::i32;
4277 Info.ptrVal = I.getArgOperand(0);
4278 Info.offset = 0;
4279 Info.flags = MachineMemOperand::MOLoad;
4280 Info.align = Align(4);
4281 return true;
4282 }
4283
4284 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col:
4285 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row:
4286 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride:
4287 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride:
4288 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col:
4289 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row:
4290 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride:
4291 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride:
4292 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col:
4293 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row:
4294 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride:
4295 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride: {
4296 Info.opc = ISD::INTRINSIC_W_CHAIN;
4297 Info.memVT = MVT::v4f16;
4298 Info.ptrVal = I.getArgOperand(0);
4299 Info.offset = 0;
4300 Info.flags = MachineMemOperand::MOLoad;
4301 Info.align = Align(16);
4302 return true;
4303 }
4304
4305 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col:
4306 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row:
4307 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride:
4308 case Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride:
4309 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col:
4310 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row:
4311 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride:
4312 case Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride:
4313 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col:
4314 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row:
4315 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride:
4316 case Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride:
4317 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col:
4318 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row:
4319 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_col_stride:
4320 case Intrinsic::nvvm_wmma_m16n16k8_load_c_f32_row_stride: {
4321 Info.opc = ISD::INTRINSIC_W_CHAIN;
4322 Info.memVT = MVT::v8f32;
4323 Info.ptrVal = I.getArgOperand(0);
4324 Info.offset = 0;
4325 Info.flags = MachineMemOperand::MOLoad;
4326 Info.align = Align(16);
4327 return true;
4328 }
4329
4330 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col:
4331 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_col_stride:
4332 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row:
4333 case Intrinsic::nvvm_wmma_m32n8k16_load_a_bf16_row_stride:
4334
4335 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col:
4336 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_col_stride:
4337 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row:
4338 case Intrinsic::nvvm_wmma_m8n32k16_load_b_bf16_row_stride:
4339
4340 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col:
4341 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_col_stride:
4342 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row:
4343 case Intrinsic::nvvm_wmma_m16n16k16_load_c_s32_row_stride:
4344 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col:
4345 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_col_stride:
4346 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row:
4347 case Intrinsic::nvvm_wmma_m32n8k16_load_c_s32_row_stride:
4348 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col:
4349 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_col_stride:
4350 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row:
4351 case Intrinsic::nvvm_wmma_m8n32k16_load_c_s32_row_stride: {
4352 Info.opc = ISD::INTRINSIC_W_CHAIN;
4353 Info.memVT = MVT::v8i32;
4354 Info.ptrVal = I.getArgOperand(0);
4355 Info.offset = 0;
4356 Info.flags = MachineMemOperand::MOLoad;
4357 Info.align = Align(16);
4358 return true;
4359 }
4360
4361 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col:
4362 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_col_stride:
4363 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row:
4364 case Intrinsic::nvvm_wmma_m8n8k128_load_c_s32_row_stride:
4365 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col:
4366 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_col_stride:
4367 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row:
4368 case Intrinsic::nvvm_wmma_m8n8k32_load_c_s32_row_stride:
4369 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_b16:
4370 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n8_x2_trans_b16:
4371 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8:
4372 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b4x16_p64:
4373 case Intrinsic::nvvm_ldmatrix_sync_aligned_m16n16_x1_trans_b8x16_b6x16_p32:
4374 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b4x16_p64:
4375 case Intrinsic::nvvm_ldmatrix_sync_aligned_m8n16_x2_b8x16_b6x16_p32: {
4376 Info.opc = ISD::INTRINSIC_W_CHAIN;
4377 Info.memVT = MVT::v2i32;
4378 Info.ptrVal = I.getArgOperand(0);
4379 Info.offset = 0;
4380 Info.flags = MachineMemOperand::MOLoad;
4381 Info.align = Align(8);
4382 return true;
4383 }
4384
4385 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col:
4386 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_col_stride:
4387 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row:
4388 case Intrinsic::nvvm_wmma_m8n8k4_load_a_f64_row_stride:
4389
4390 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col:
4391 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_col_stride:
4392 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row:
4393 case Intrinsic::nvvm_wmma_m8n8k4_load_b_f64_row_stride: {
4394 Info.opc = ISD::INTRINSIC_W_CHAIN;
4395 Info.memVT = MVT::f64;
4396 Info.ptrVal = I.getArgOperand(0);
4397 Info.offset = 0;
4398 Info.flags = MachineMemOperand::MOLoad;
4399 Info.align = Align(8);
4400 return true;
4401 }
4402
4403 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col:
4404 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_col_stride:
4405 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row:
4406 case Intrinsic::nvvm_wmma_m8n8k4_load_c_f64_row_stride: {
4407 Info.opc = ISD::INTRINSIC_W_CHAIN;
4408 Info.memVT = MVT::v2f64;
4409 Info.ptrVal = I.getArgOperand(0);
4410 Info.offset = 0;
4411 Info.flags = MachineMemOperand::MOLoad;
4412 Info.align = Align(16);
4413 return true;
4414 }
4415
4416 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col:
4417 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row:
4418 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride:
4419 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride:
4420 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col:
4421 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row:
4422 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride:
4423 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride:
4424 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col:
4425 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row:
4426 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride:
4427 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride: {
4428 Info.opc = ISD::INTRINSIC_VOID;
4429 Info.memVT = MVT::v4f16;
4430 Info.ptrVal = I.getArgOperand(0);
4431 Info.offset = 0;
4432 Info.flags = MachineMemOperand::MOStore;
4433 Info.align = Align(16);
4434 return true;
4435 }
4436
4437 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col:
4438 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row:
4439 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride:
4440 case Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride:
4441 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col:
4442 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row:
4443 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride:
4444 case Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride:
4445 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col:
4446 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row:
4447 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride:
4448 case Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride:
4449 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col:
4450 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row:
4451 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_col_stride:
4452 case Intrinsic::nvvm_wmma_m16n16k8_store_d_f32_row_stride: {
4453 Info.opc = ISD::INTRINSIC_VOID;
4454 Info.memVT = MVT::v8f32;
4455 Info.ptrVal = I.getArgOperand(0);
4456 Info.offset = 0;
4457 Info.flags = MachineMemOperand::MOStore;
4458 Info.align = Align(16);
4459 return true;
4460 }
4461
4462 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col:
4463 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_col_stride:
4464 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row:
4465 case Intrinsic::nvvm_wmma_m16n16k16_store_d_s32_row_stride:
4466 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col:
4467 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_col_stride:
4468 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row:
4469 case Intrinsic::nvvm_wmma_m32n8k16_store_d_s32_row_stride:
4470 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col:
4471 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_col_stride:
4472 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row:
4473 case Intrinsic::nvvm_wmma_m8n32k16_store_d_s32_row_stride: {
4474 Info.opc = ISD::INTRINSIC_VOID;
4475 Info.memVT = MVT::v8i32;
4476 Info.ptrVal = I.getArgOperand(0);
4477 Info.offset = 0;
4478 Info.flags = MachineMemOperand::MOStore;
4479 Info.align = Align(16);
4480 return true;
4481 }
4482
4483 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col:
4484 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_col_stride:
4485 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row:
4486 case Intrinsic::nvvm_wmma_m8n8k128_store_d_s32_row_stride:
4487 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col:
4488 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_col_stride:
4489 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row:
4490 case Intrinsic::nvvm_wmma_m8n8k32_store_d_s32_row_stride:
4491 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_b16:
4492 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x2_trans_b16:
4493 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x2_trans_b8: {
4494 Info.opc = ISD::INTRINSIC_VOID;
4495 Info.memVT = MVT::v2i32;
4496 Info.ptrVal = I.getArgOperand(0);
4497 Info.offset = 0;
4498 Info.flags = MachineMemOperand::MOStore;
4499 Info.align = Align(8);
4500 return true;
4501 }
4502
4503 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col:
4504 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_col_stride:
4505 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row:
4506 case Intrinsic::nvvm_wmma_m8n8k4_store_d_f64_row_stride: {
4507 Info.opc = ISD::INTRINSIC_VOID;
4508 Info.memVT = MVT::v2f64;
4509 Info.ptrVal = I.getArgOperand(0);
4510 Info.offset = 0;
4511 Info.flags = MachineMemOperand::MOStore;
4512 Info.align = Align(16);
4513 return true;
4514 }
4515
4516 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_b16:
4517 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x1_trans_b16:
4518 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x1_trans_b8: {
4519 Info.opc = ISD::INTRINSIC_VOID;
4520 Info.memVT = MVT::i32;
4521 Info.ptrVal = I.getArgOperand(0);
4522 Info.offset = 0;
4523 Info.flags = MachineMemOperand::MOStore;
4524 Info.align = Align(4);
4525 return true;
4526 }
4527
4528 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_b16:
4529 case Intrinsic::nvvm_stmatrix_sync_aligned_m8n8_x4_trans_b16:
4530 case Intrinsic::nvvm_stmatrix_sync_aligned_m16n8_x4_trans_b8: {
4531 Info.opc = ISD::INTRINSIC_VOID;
4532 Info.memVT = MVT::v4i32;
4533 Info.ptrVal = I.getArgOperand(0);
4534 Info.offset = 0;
4535 Info.flags = MachineMemOperand::MOStore;
4536 Info.align = Align(16);
4537 return true;
4538 }
4539
4540 case Intrinsic::nvvm_atomic_add_gen_f_cta:
4541 case Intrinsic::nvvm_atomic_add_gen_f_sys:
4542 case Intrinsic::nvvm_atomic_add_gen_i_cta:
4543 case Intrinsic::nvvm_atomic_add_gen_i_sys:
4544 case Intrinsic::nvvm_atomic_and_gen_i_cta:
4545 case Intrinsic::nvvm_atomic_and_gen_i_sys:
4546 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
4547 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
4548 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
4549 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
4550 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
4551 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
4552 case Intrinsic::nvvm_atomic_max_gen_i_cta:
4553 case Intrinsic::nvvm_atomic_max_gen_i_sys:
4554 case Intrinsic::nvvm_atomic_min_gen_i_cta:
4555 case Intrinsic::nvvm_atomic_min_gen_i_sys:
4556 case Intrinsic::nvvm_atomic_or_gen_i_cta:
4557 case Intrinsic::nvvm_atomic_or_gen_i_sys:
4558 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
4559 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
4560 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
4561 case Intrinsic::nvvm_atomic_xor_gen_i_sys: {
4562 auto &DL = I.getDataLayout();
4563 Info.opc = ISD::INTRINSIC_W_CHAIN;
4564 Info.memVT = getValueType(DL, I.getType());
4565 Info.ptrVal = I.getArgOperand(0);
4566 Info.offset = 0;
4568 Info.align.reset();
4569 return true;
4570 }
4571
4572 case Intrinsic::nvvm_prefetch_tensormap: {
4573 auto &DL = I.getDataLayout();
4574 Info.opc = ISD::INTRINSIC_VOID;
4575 Info.memVT = getPointerTy(DL);
4576 Info.ptrVal = I.getArgOperand(0);
4577 Info.offset = 0;
4578 Info.flags =
4580 Info.align.reset();
4581 return true;
4582 }
4583
4584 case Intrinsic::nvvm_tensormap_replace_global_address:
4585 case Intrinsic::nvvm_tensormap_replace_global_stride: {
4586 Info.opc = ISD::INTRINSIC_VOID;
4587 Info.memVT = MVT::i64;
4588 Info.ptrVal = I.getArgOperand(0);
4589 Info.offset = 0;
4590 Info.flags = MachineMemOperand::MOStore;
4591 Info.align.reset();
4592 return true;
4593 }
4594
4595 case Intrinsic::nvvm_tensormap_replace_rank:
4596 case Intrinsic::nvvm_tensormap_replace_box_dim:
4597 case Intrinsic::nvvm_tensormap_replace_global_dim:
4598 case Intrinsic::nvvm_tensormap_replace_element_stride:
4599 case Intrinsic::nvvm_tensormap_replace_elemtype:
4600 case Intrinsic::nvvm_tensormap_replace_interleave_layout:
4601 case Intrinsic::nvvm_tensormap_replace_swizzle_mode:
4602 case Intrinsic::nvvm_tensormap_replace_swizzle_atomicity:
4603 case Intrinsic::nvvm_tensormap_replace_fill_mode: {
4604 Info.opc = ISD::INTRINSIC_VOID;
4605 Info.memVT = MVT::i32;
4606 Info.ptrVal = I.getArgOperand(0);
4607 Info.offset = 0;
4608 Info.flags = MachineMemOperand::MOStore;
4609 Info.align.reset();
4610 return true;
4611 }
4612
4613 case Intrinsic::nvvm_ldu_global_i:
4614 case Intrinsic::nvvm_ldu_global_f:
4615 case Intrinsic::nvvm_ldu_global_p: {
4616 Info.opc = ISD::INTRINSIC_W_CHAIN;
4617 Info.memVT = getValueType(I.getDataLayout(), I.getType());
4618 Info.ptrVal = I.getArgOperand(0);
4619 Info.offset = 0;
4620 Info.flags = MachineMemOperand::MOLoad;
4621 Info.align = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4622
4623 return true;
4624 }
4625 case Intrinsic::nvvm_tex_1d_v4f32_s32:
4626 case Intrinsic::nvvm_tex_1d_v4f32_f32:
4627 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
4628 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
4629 case Intrinsic::nvvm_tex_1d_array_v4f32_s32:
4630 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
4631 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
4632 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
4633 case Intrinsic::nvvm_tex_2d_v4f32_s32:
4634 case Intrinsic::nvvm_tex_2d_v4f32_f32:
4635 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
4636 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
4637 case Intrinsic::nvvm_tex_2d_array_v4f32_s32:
4638 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
4639 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
4640 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
4641 case Intrinsic::nvvm_tex_3d_v4f32_s32:
4642 case Intrinsic::nvvm_tex_3d_v4f32_f32:
4643 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
4644 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
4645 case Intrinsic::nvvm_tex_cube_v4f32_f32:
4646 case Intrinsic::nvvm_tex_cube_level_v4f32_f32:
4647 case Intrinsic::nvvm_tex_cube_array_v4f32_f32:
4648 case Intrinsic::nvvm_tex_cube_array_level_v4f32_f32:
4649 case Intrinsic::nvvm_tld4_r_2d_v4f32_f32:
4650 case Intrinsic::nvvm_tld4_g_2d_v4f32_f32:
4651 case Intrinsic::nvvm_tld4_b_2d_v4f32_f32:
4652 case Intrinsic::nvvm_tld4_a_2d_v4f32_f32:
4653 case Intrinsic::nvvm_tex_unified_1d_v4f32_s32:
4654 case Intrinsic::nvvm_tex_unified_1d_v4f32_f32:
4655 case Intrinsic::nvvm_tex_unified_1d_level_v4f32_f32:
4656 case Intrinsic::nvvm_tex_unified_1d_grad_v4f32_f32:
4657 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_s32:
4658 case Intrinsic::nvvm_tex_unified_1d_array_v4f32_f32:
4659 case Intrinsic::nvvm_tex_unified_1d_array_level_v4f32_f32:
4660 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4f32_f32:
4661 case Intrinsic::nvvm_tex_unified_2d_v4f32_s32:
4662 case Intrinsic::nvvm_tex_unified_2d_v4f32_f32:
4663 case Intrinsic::nvvm_tex_unified_2d_level_v4f32_f32:
4664 case Intrinsic::nvvm_tex_unified_2d_grad_v4f32_f32:
4665 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_s32:
4666 case Intrinsic::nvvm_tex_unified_2d_array_v4f32_f32:
4667 case Intrinsic::nvvm_tex_unified_2d_array_level_v4f32_f32:
4668 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4f32_f32:
4669 case Intrinsic::nvvm_tex_unified_3d_v4f32_s32:
4670 case Intrinsic::nvvm_tex_unified_3d_v4f32_f32:
4671 case Intrinsic::nvvm_tex_unified_3d_level_v4f32_f32:
4672 case Intrinsic::nvvm_tex_unified_3d_grad_v4f32_f32:
4673 case Intrinsic::nvvm_tex_unified_cube_v4f32_f32:
4674 case Intrinsic::nvvm_tex_unified_cube_level_v4f32_f32:
4675 case Intrinsic::nvvm_tex_unified_cube_array_v4f32_f32:
4676 case Intrinsic::nvvm_tex_unified_cube_array_level_v4f32_f32:
4677 case Intrinsic::nvvm_tex_unified_cube_grad_v4f32_f32:
4678 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4f32_f32:
4679 case Intrinsic::nvvm_tld4_unified_r_2d_v4f32_f32:
4680 case Intrinsic::nvvm_tld4_unified_g_2d_v4f32_f32:
4681 case Intrinsic::nvvm_tld4_unified_b_2d_v4f32_f32:
4682 case Intrinsic::nvvm_tld4_unified_a_2d_v4f32_f32:
4683 Info.opc = ISD::INTRINSIC_W_CHAIN;
4684 Info.memVT = MVT::v4f32;
4685 Info.ptrVal = nullptr;
4686 Info.offset = 0;
4687 Info.flags = MachineMemOperand::MOLoad;
4688 Info.align = Align(16);
4689 return true;
4690
4691 case Intrinsic::nvvm_tex_1d_v4s32_s32:
4692 case Intrinsic::nvvm_tex_1d_v4s32_f32:
4693 case Intrinsic::nvvm_tex_1d_level_v4s32_f32:
4694 case Intrinsic::nvvm_tex_1d_grad_v4s32_f32:
4695 case Intrinsic::nvvm_tex_1d_array_v4s32_s32:
4696 case Intrinsic::nvvm_tex_1d_array_v4s32_f32:
4697 case Intrinsic::nvvm_tex_1d_array_level_v4s32_f32:
4698 case Intrinsic::nvvm_tex_1d_array_grad_v4s32_f32:
4699 case Intrinsic::nvvm_tex_2d_v4s32_s32:
4700 case Intrinsic::nvvm_tex_2d_v4s32_f32:
4701 case Intrinsic::nvvm_tex_2d_level_v4s32_f32:
4702 case Intrinsic::nvvm_tex_2d_grad_v4s32_f32:
4703 case Intrinsic::nvvm_tex_2d_array_v4s32_s32:
4704 case Intrinsic::nvvm_tex_2d_array_v4s32_f32:
4705 case Intrinsic::nvvm_tex_2d_array_level_v4s32_f32:
4706 case Intrinsic::nvvm_tex_2d_array_grad_v4s32_f32:
4707 case Intrinsic::nvvm_tex_3d_v4s32_s32:
4708 case Intrinsic::nvvm_tex_3d_v4s32_f32:
4709 case Intrinsic::nvvm_tex_3d_level_v4s32_f32:
4710 case Intrinsic::nvvm_tex_3d_grad_v4s32_f32:
4711 case Intrinsic::nvvm_tex_cube_v4s32_f32:
4712 case Intrinsic::nvvm_tex_cube_level_v4s32_f32:
4713 case Intrinsic::nvvm_tex_cube_array_v4s32_f32:
4714 case Intrinsic::nvvm_tex_cube_array_level_v4s32_f32:
4715 case Intrinsic::nvvm_tex_cube_v4u32_f32:
4716 case Intrinsic::nvvm_tex_cube_level_v4u32_f32:
4717 case Intrinsic::nvvm_tex_cube_array_v4u32_f32:
4718 case Intrinsic::nvvm_tex_cube_array_level_v4u32_f32:
4719 case Intrinsic::nvvm_tex_1d_v4u32_s32:
4720 case Intrinsic::nvvm_tex_1d_v4u32_f32:
4721 case Intrinsic::nvvm_tex_1d_level_v4u32_f32:
4722 case Intrinsic::nvvm_tex_1d_grad_v4u32_f32:
4723 case Intrinsic::nvvm_tex_1d_array_v4u32_s32:
4724 case Intrinsic::nvvm_tex_1d_array_v4u32_f32:
4725 case Intrinsic::nvvm_tex_1d_array_level_v4u32_f32:
4726 case Intrinsic::nvvm_tex_1d_array_grad_v4u32_f32:
4727 case Intrinsic::nvvm_tex_2d_v4u32_s32:
4728 case Intrinsic::nvvm_tex_2d_v4u32_f32:
4729 case Intrinsic::nvvm_tex_2d_level_v4u32_f32:
4730 case Intrinsic::nvvm_tex_2d_grad_v4u32_f32:
4731 case Intrinsic::nvvm_tex_2d_array_v4u32_s32:
4732 case Intrinsic::nvvm_tex_2d_array_v4u32_f32:
4733 case Intrinsic::nvvm_tex_2d_array_level_v4u32_f32:
4734 case Intrinsic::nvvm_tex_2d_array_grad_v4u32_f32:
4735 case Intrinsic::nvvm_tex_3d_v4u32_s32:
4736 case Intrinsic::nvvm_tex_3d_v4u32_f32:
4737 case Intrinsic::nvvm_tex_3d_level_v4u32_f32:
4738 case Intrinsic::nvvm_tex_3d_grad_v4u32_f32:
4739 case Intrinsic::nvvm_tld4_r_2d_v4s32_f32:
4740 case Intrinsic::nvvm_tld4_g_2d_v4s32_f32:
4741 case Intrinsic::nvvm_tld4_b_2d_v4s32_f32:
4742 case Intrinsic::nvvm_tld4_a_2d_v4s32_f32:
4743 case Intrinsic::nvvm_tld4_r_2d_v4u32_f32:
4744 case Intrinsic::nvvm_tld4_g_2d_v4u32_f32:
4745 case Intrinsic::nvvm_tld4_b_2d_v4u32_f32:
4746 case Intrinsic::nvvm_tld4_a_2d_v4u32_f32:
4747 case Intrinsic::nvvm_tex_unified_1d_v4s32_s32:
4748 case Intrinsic::nvvm_tex_unified_1d_v4s32_f32:
4749 case Intrinsic::nvvm_tex_unified_1d_level_v4s32_f32:
4750 case Intrinsic::nvvm_tex_unified_1d_grad_v4s32_f32:
4751 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_s32:
4752 case Intrinsic::nvvm_tex_unified_1d_array_v4s32_f32:
4753 case Intrinsic::nvvm_tex_unified_1d_array_level_v4s32_f32:
4754 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4s32_f32:
4755 case Intrinsic::nvvm_tex_unified_2d_v4s32_s32:
4756 case Intrinsic::nvvm_tex_unified_2d_v4s32_f32:
4757 case Intrinsic::nvvm_tex_unified_2d_level_v4s32_f32:
4758 case Intrinsic::nvvm_tex_unified_2d_grad_v4s32_f32:
4759 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_s32:
4760 case Intrinsic::nvvm_tex_unified_2d_array_v4s32_f32:
4761 case Intrinsic::nvvm_tex_unified_2d_array_level_v4s32_f32:
4762 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4s32_f32:
4763 case Intrinsic::nvvm_tex_unified_3d_v4s32_s32:
4764 case Intrinsic::nvvm_tex_unified_3d_v4s32_f32:
4765 case Intrinsic::nvvm_tex_unified_3d_level_v4s32_f32:
4766 case Intrinsic::nvvm_tex_unified_3d_grad_v4s32_f32:
4767 case Intrinsic::nvvm_tex_unified_1d_v4u32_s32:
4768 case Intrinsic::nvvm_tex_unified_1d_v4u32_f32:
4769 case Intrinsic::nvvm_tex_unified_1d_level_v4u32_f32:
4770 case Intrinsic::nvvm_tex_unified_1d_grad_v4u32_f32:
4771 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_s32:
4772 case Intrinsic::nvvm_tex_unified_1d_array_v4u32_f32:
4773 case Intrinsic::nvvm_tex_unified_1d_array_level_v4u32_f32:
4774 case Intrinsic::nvvm_tex_unified_1d_array_grad_v4u32_f32:
4775 case Intrinsic::nvvm_tex_unified_2d_v4u32_s32:
4776 case Intrinsic::nvvm_tex_unified_2d_v4u32_f32:
4777 case Intrinsic::nvvm_tex_unified_2d_level_v4u32_f32:
4778 case Intrinsic::nvvm_tex_unified_2d_grad_v4u32_f32:
4779 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_s32:
4780 case Intrinsic::nvvm_tex_unified_2d_array_v4u32_f32:
4781 case Intrinsic::nvvm_tex_unified_2d_array_level_v4u32_f32:
4782 case Intrinsic::nvvm_tex_unified_2d_array_grad_v4u32_f32:
4783 case Intrinsic::nvvm_tex_unified_3d_v4u32_s32:
4784 case Intrinsic::nvvm_tex_unified_3d_v4u32_f32:
4785 case Intrinsic::nvvm_tex_unified_3d_level_v4u32_f32:
4786 case Intrinsic::nvvm_tex_unified_3d_grad_v4u32_f32:
4787 case Intrinsic::nvvm_tex_unified_cube_v4s32_f32:
4788 case Intrinsic::nvvm_tex_unified_cube_level_v4s32_f32:
4789 case Intrinsic::nvvm_tex_unified_cube_array_v4s32_f32:
4790 case Intrinsic::nvvm_tex_unified_cube_array_level_v4s32_f32:
4791 case Intrinsic::nvvm_tex_unified_cube_v4u32_f32:
4792 case Intrinsic::nvvm_tex_unified_cube_level_v4u32_f32:
4793 case Intrinsic::nvvm_tex_unified_cube_array_v4u32_f32:
4794 case Intrinsic::nvvm_tex_unified_cube_array_level_v4u32_f32:
4795 case Intrinsic::nvvm_tex_unified_cube_grad_v4s32_f32:
4796 case Intrinsic::nvvm_tex_unified_cube_grad_v4u32_f32:
4797 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4s32_f32:
4798 case Intrinsic::nvvm_tex_unified_cube_array_grad_v4u32_f32:
4799 case Intrinsic::nvvm_tld4_unified_r_2d_v4s32_f32:
4800 case Intrinsic::nvvm_tld4_unified_g_2d_v4s32_f32:
4801 case Intrinsic::nvvm_tld4_unified_b_2d_v4s32_f32:
4802 case Intrinsic::nvvm_tld4_unified_a_2d_v4s32_f32:
4803 case Intrinsic::nvvm_tld4_unified_r_2d_v4u32_f32:
4804 case Intrinsic::nvvm_tld4_unified_g_2d_v4u32_f32:
4805 case Intrinsic::nvvm_tld4_unified_b_2d_v4u32_f32:
4806 case Intrinsic::nvvm_tld4_unified_a_2d_v4u32_f32:
4807 Info.opc = ISD::INTRINSIC_W_CHAIN;
4808 Info.memVT = MVT::v4i32;
4809 Info.ptrVal = nullptr;
4810 Info.offset = 0;
4811 Info.flags = MachineMemOperand::MOLoad;
4812 Info.align = Align(16);
4813 return true;
4814
4815 case Intrinsic::nvvm_suld_1d_i8_clamp:
4816 case Intrinsic::nvvm_suld_1d_v2i8_clamp:
4817 case Intrinsic::nvvm_suld_1d_v4i8_clamp:
4818 case Intrinsic::nvvm_suld_1d_array_i8_clamp:
4819 case Intrinsic::nvvm_suld_1d_array_v2i8_clamp:
4820 case Intrinsic::nvvm_suld_1d_array_v4i8_clamp:
4821 case Intrinsic::nvvm_suld_2d_i8_clamp:
4822 case Intrinsic::nvvm_suld_2d_v2i8_clamp:
4823 case Intrinsic::nvvm_suld_2d_v4i8_clamp:
4824 case Intrinsic::nvvm_suld_2d_array_i8_clamp:
4825 case Intrinsic::nvvm_suld_2d_array_v2i8_clamp:
4826 case Intrinsic::nvvm_suld_2d_array_v4i8_clamp:
4827 case Intrinsic::nvvm_suld_3d_i8_clamp:
4828 case Intrinsic::nvvm_suld_3d_v2i8_clamp:
4829 case Intrinsic::nvvm_suld_3d_v4i8_clamp:
4830 case Intrinsic::nvvm_suld_1d_i8_trap:
4831 case Intrinsic::nvvm_suld_1d_v2i8_trap:
4832 case Intrinsic::nvvm_suld_1d_v4i8_trap:
4833 case Intrinsic::nvvm_suld_1d_array_i8_trap:
4834 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
4835 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
4836 case Intrinsic::nvvm_suld_2d_i8_trap:
4837 case Intrinsic::nvvm_suld_2d_v2i8_trap:
4838 case Intrinsic::nvvm_suld_2d_v4i8_trap:
4839 case Intrinsic::nvvm_suld_2d_array_i8_trap:
4840 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
4841 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
4842 case Intrinsic::nvvm_suld_3d_i8_trap:
4843 case Intrinsic::nvvm_suld_3d_v2i8_trap:
4844 case Intrinsic::nvvm_suld_3d_v4i8_trap:
4845 case Intrinsic::nvvm_suld_1d_i8_zero:
4846 case Intrinsic::nvvm_suld_1d_v2i8_zero:
4847 case Intrinsic::nvvm_suld_1d_v4i8_zero:
4848 case Intrinsic::nvvm_suld_1d_array_i8_zero:
4849 case Intrinsic::nvvm_suld_1d_array_v2i8_zero:
4850 case Intrinsic::nvvm_suld_1d_array_v4i8_zero:
4851 case Intrinsic::nvvm_suld_2d_i8_zero:
4852 case Intrinsic::nvvm_suld_2d_v2i8_zero:
4853 case Intrinsic::nvvm_suld_2d_v4i8_zero:
4854 case Intrinsic::nvvm_suld_2d_array_i8_zero:
4855 case Intrinsic::nvvm_suld_2d_array_v2i8_zero:
4856 case Intrinsic::nvvm_suld_2d_array_v4i8_zero:
4857 case Intrinsic::nvvm_suld_3d_i8_zero:
4858 case Intrinsic::nvvm_suld_3d_v2i8_zero:
4859 case Intrinsic::nvvm_suld_3d_v4i8_zero:
4860 Info.opc = ISD::INTRINSIC_W_CHAIN;
4861 Info.memVT = MVT::i8;
4862 Info.ptrVal = nullptr;
4863 Info.offset = 0;
4864 Info.flags = MachineMemOperand::MOLoad;
4865 Info.align = Align(16);
4866 return true;
4867
4868 case Intrinsic::nvvm_suld_1d_i16_clamp:
4869 case Intrinsic::nvvm_suld_1d_v2i16_clamp:
4870 case Intrinsic::nvvm_suld_1d_v4i16_clamp:
4871 case Intrinsic::nvvm_suld_1d_array_i16_clamp:
4872 case Intrinsic::nvvm_suld_1d_array_v2i16_clamp:
4873 case Intrinsic::nvvm_suld_1d_array_v4i16_clamp:
4874 case Intrinsic::nvvm_suld_2d_i16_clamp:
4875 case Intrinsic::nvvm_suld_2d_v2i16_clamp:
4876 case Intrinsic::nvvm_suld_2d_v4i16_clamp:
4877 case Intrinsic::nvvm_suld_2d_array_i16_clamp:
4878 case Intrinsic::nvvm_suld_2d_array_v2i16_clamp:
4879 case Intrinsic::nvvm_suld_2d_array_v4i16_clamp:
4880 case Intrinsic::nvvm_suld_3d_i16_clamp:
4881 case Intrinsic::nvvm_suld_3d_v2i16_clamp:
4882 case Intrinsic::nvvm_suld_3d_v4i16_clamp:
4883 case Intrinsic::nvvm_suld_1d_i16_trap:
4884 case Intrinsic::nvvm_suld_1d_v2i16_trap:
4885 case Intrinsic::nvvm_suld_1d_v4i16_trap:
4886 case Intrinsic::nvvm_suld_1d_array_i16_trap:
4887 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
4888 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
4889 case Intrinsic::nvvm_suld_2d_i16_trap:
4890 case Intrinsic::nvvm_suld_2d_v2i16_trap:
4891 case Intrinsic::nvvm_suld_2d_v4i16_trap:
4892 case Intrinsic::nvvm_suld_2d_array_i16_trap:
4893 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
4894 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
4895 case Intrinsic::nvvm_suld_3d_i16_trap:
4896 case Intrinsic::nvvm_suld_3d_v2i16_trap:
4897 case Intrinsic::nvvm_suld_3d_v4i16_trap:
4898 case Intrinsic::nvvm_suld_1d_i16_zero:
4899 case Intrinsic::nvvm_suld_1d_v2i16_zero:
4900 case Intrinsic::nvvm_suld_1d_v4i16_zero:
4901 case Intrinsic::nvvm_suld_1d_array_i16_zero:
4902 case Intrinsic::nvvm_suld_1d_array_v2i16_zero:
4903 case Intrinsic::nvvm_suld_1d_array_v4i16_zero:
4904 case Intrinsic::nvvm_suld_2d_i16_zero:
4905 case Intrinsic::nvvm_suld_2d_v2i16_zero:
4906 case Intrinsic::nvvm_suld_2d_v4i16_zero:
4907 case Intrinsic::nvvm_suld_2d_array_i16_zero:
4908 case Intrinsic::nvvm_suld_2d_array_v2i16_zero:
4909 case Intrinsic::nvvm_suld_2d_array_v4i16_zero:
4910 case Intrinsic::nvvm_suld_3d_i16_zero:
4911 case Intrinsic::nvvm_suld_3d_v2i16_zero:
4912 case Intrinsic::nvvm_suld_3d_v4i16_zero:
4913 Info.opc = ISD::INTRINSIC_W_CHAIN;
4914 Info.memVT = MVT::i16;
4915 Info.ptrVal = nullptr;
4916 Info.offset = 0;
4917 Info.flags = MachineMemOperand::MOLoad;
4918 Info.align = Align(16);
4919 return true;
4920
4921 case Intrinsic::nvvm_suld_1d_i32_clamp:
4922 case Intrinsic::nvvm_suld_1d_v2i32_clamp:
4923 case Intrinsic::nvvm_suld_1d_v4i32_clamp:
4924 case Intrinsic::nvvm_suld_1d_array_i32_clamp:
4925 case Intrinsic::nvvm_suld_1d_array_v2i32_clamp:
4926 case Intrinsic::nvvm_suld_1d_array_v4i32_clamp:
4927 case Intrinsic::nvvm_suld_2d_i32_clamp:
4928 case Intrinsic::nvvm_suld_2d_v2i32_clamp:
4929 case Intrinsic::nvvm_suld_2d_v4i32_clamp:
4930 case Intrinsic::nvvm_suld_2d_array_i32_clamp:
4931 case Intrinsic::nvvm_suld_2d_array_v2i32_clamp:
4932 case Intrinsic::nvvm_suld_2d_array_v4i32_clamp:
4933 case Intrinsic::nvvm_suld_3d_i32_clamp:
4934 case Intrinsic::nvvm_suld_3d_v2i32_clamp:
4935 case Intrinsic::nvvm_suld_3d_v4i32_clamp:
4936 case Intrinsic::nvvm_suld_1d_i32_trap:
4937 case Intrinsic::nvvm_suld_1d_v2i32_trap:
4938 case Intrinsic::nvvm_suld_1d_v4i32_trap:
4939 case Intrinsic::nvvm_suld_1d_array_i32_trap:
4940 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
4941 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
4942 case Intrinsic::nvvm_suld_2d_i32_trap:
4943 case Intrinsic::nvvm_suld_2d_v2i32_trap:
4944 case Intrinsic::nvvm_suld_2d_v4i32_trap:
4945 case Intrinsic::nvvm_suld_2d_array_i32_trap:
4946 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
4947 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
4948 case Intrinsic::nvvm_suld_3d_i32_trap:
4949 case Intrinsic::nvvm_suld_3d_v2i32_trap:
4950 case Intrinsic::nvvm_suld_3d_v4i32_trap:
4951 case Intrinsic::nvvm_suld_1d_i32_zero:
4952 case Intrinsic::nvvm_suld_1d_v2i32_zero:
4953 case Intrinsic::nvvm_suld_1d_v4i32_zero:
4954 case Intrinsic::nvvm_suld_1d_array_i32_zero:
4955 case Intrinsic::nvvm_suld_1d_array_v2i32_zero:
4956 case Intrinsic::nvvm_suld_1d_array_v4i32_zero:
4957 case Intrinsic::nvvm_suld_2d_i32_zero:
4958 case Intrinsic::nvvm_suld_2d_v2i32_zero:
4959 case Intrinsic::nvvm_suld_2d_v4i32_zero:
4960 case Intrinsic::nvvm_suld_2d_array_i32_zero:
4961 case Intrinsic::nvvm_suld_2d_array_v2i32_zero:
4962 case Intrinsic::nvvm_suld_2d_array_v4i32_zero:
4963 case Intrinsic::nvvm_suld_3d_i32_zero:
4964 case Intrinsic::nvvm_suld_3d_v2i32_zero:
4965 case Intrinsic::nvvm_suld_3d_v4i32_zero:
4966 Info.opc = ISD::INTRINSIC_W_CHAIN;
4967 Info.memVT = MVT::i32;
4968 Info.ptrVal = nullptr;
4969 Info.offset = 0;
4970 Info.flags = MachineMemOperand::MOLoad;
4971 Info.align = Align(16);
4972 return true;
4973
4974 case Intrinsic::nvvm_suld_1d_i64_clamp:
4975 case Intrinsic::nvvm_suld_1d_v2i64_clamp:
4976 case Intrinsic::nvvm_suld_1d_array_i64_clamp:
4977 case Intrinsic::nvvm_suld_1d_array_v2i64_clamp:
4978 case Intrinsic::nvvm_suld_2d_i64_clamp:
4979 case Intrinsic::nvvm_suld_2d_v2i64_clamp:
4980 case Intrinsic::nvvm_suld_2d_array_i64_clamp:
4981 case Intrinsic::nvvm_suld_2d_array_v2i64_clamp:
4982 case Intrinsic::nvvm_suld_3d_i64_clamp:
4983 case Intrinsic::nvvm_suld_3d_v2i64_clamp:
4984 case Intrinsic::nvvm_suld_1d_i64_trap:
4985 case Intrinsic::nvvm_suld_1d_v2i64_trap:
4986 case Intrinsic::nvvm_suld_1d_array_i64_trap:
4987 case Intrinsic::nvvm_suld_1d_array_v2i64_trap:
4988 case Intrinsic::nvvm_suld_2d_i64_trap:
4989 case Intrinsic::nvvm_suld_2d_v2i64_trap:
4990 case Intrinsic::nvvm_suld_2d_array_i64_trap:
4991 case Intrinsic::nvvm_suld_2d_array_v2i64_trap:
4992 case Intrinsic::nvvm_suld_3d_i64_trap:
4993 case Intrinsic::nvvm_suld_3d_v2i64_trap:
4994 case Intrinsic::nvvm_suld_1d_i64_zero:
4995 case Intrinsic::nvvm_suld_1d_v2i64_zero:
4996 case Intrinsic::nvvm_suld_1d_array_i64_zero:
4997 case Intrinsic::nvvm_suld_1d_array_v2i64_zero:
4998 case Intrinsic::nvvm_suld_2d_i64_zero:
4999 case Intrinsic::nvvm_suld_2d_v2i64_zero:
5000 case Intrinsic::nvvm_suld_2d_array_i64_zero:
5001 case Intrinsic::nvvm_suld_2d_array_v2i64_zero:
5002 case Intrinsic::nvvm_suld_3d_i64_zero:
5003 case Intrinsic::nvvm_suld_3d_v2i64_zero:
5004 Info.opc = ISD::INTRINSIC_W_CHAIN;
5005 Info.memVT = MVT::i64;
5006 Info.ptrVal = nullptr;
5007 Info.offset = 0;
5008 Info.flags = MachineMemOperand::MOLoad;
5009 Info.align = Align(16);
5010 return true;
5011
5012 case Intrinsic::nvvm_tcgen05_ld_16x64b_x1:
5013 case Intrinsic::nvvm_tcgen05_ld_32x32b_x1:
5014 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x1: {
5015 Info.opc = ISD::INTRINSIC_W_CHAIN;
5016 Info.memVT = MVT::v1i32;
5017 Info.ptrVal = I.getArgOperand(0);
5018 Info.offset = 0;
5019 Info.flags = MachineMemOperand::MOLoad;
5020 Info.align.reset();
5021 return true;
5022 }
5023
5024 case Intrinsic::nvvm_tcgen05_ld_16x64b_x2:
5025 case Intrinsic::nvvm_tcgen05_ld_16x128b_x1:
5026 case Intrinsic::nvvm_tcgen05_ld_32x32b_x2:
5027 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x2: {
5028 Info.opc = ISD::INTRINSIC_W_CHAIN;
5029 Info.memVT = MVT::v2i32;
5030 Info.ptrVal = I.getArgOperand(0);
5031 Info.offset = 0;
5032 Info.flags = MachineMemOperand::MOLoad;
5033 Info.align.reset();
5034 return true;
5035 }
5036
5037 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
5038 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
5039 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
5040 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
5041 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4: {
5042 Info.opc = ISD::INTRINSIC_W_CHAIN;
5043 Info.memVT = MVT::v4i32;
5044 Info.ptrVal = I.getArgOperand(0);
5045 Info.offset = 0;
5046 Info.flags = MachineMemOperand::MOLoad;
5047 Info.align.reset();
5048 return true;
5049 }
5050
5051 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
5052 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
5053 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
5054 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
5055 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8: {
5056 Info.opc = ISD::INTRINSIC_W_CHAIN;
5057 Info.memVT = MVT::v8i32;
5058 Info.ptrVal = I.getArgOperand(0);
5059 Info.offset = 0;
5060 Info.flags = MachineMemOperand::MOLoad;
5061 Info.align.reset();
5062 return true;
5063 }
5064
5065 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
5066 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
5067 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
5068 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
5069 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16: {
5070 Info.opc = ISD::INTRINSIC_W_CHAIN;
5071 Info.memVT = MVT::v16i32;
5072 Info.ptrVal = I.getArgOperand(0);
5073 Info.offset = 0;
5074 Info.flags = MachineMemOperand::MOLoad;
5075 Info.align.reset();
5076 return true;
5077 }
5078
5079 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
5080 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
5081 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
5082 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
5083 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32: {
5084 Info.opc = ISD::INTRINSIC_W_CHAIN;
5085 Info.memVT = MVT::v32i32;
5086 Info.ptrVal = I.getArgOperand(0);
5087 Info.offset = 0;
5088 Info.flags = MachineMemOperand::MOLoad;
5089 Info.align.reset();
5090 return true;
5091 }
5092
5093 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
5094 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
5095 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
5096 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
5097 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64: {
5098 Info.opc = ISD::INTRINSIC_W_CHAIN;
5099 Info.memVT = MVT::v64i32;
5100 Info.ptrVal = I.getArgOperand(0);
5101 Info.offset = 0;
5102 Info.flags = MachineMemOperand::MOLoad;
5103 Info.align.reset();
5104 return true;
5105 }
5106
5107 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
5108 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
5109 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
5110 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
5111 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128: {
5112 Info.opc = ISD::INTRINSIC_W_CHAIN;
5113 Info.memVT = MVT::v128i32;
5114 Info.ptrVal = I.getArgOperand(0);
5115 Info.offset = 0;
5116 Info.flags = MachineMemOperand::MOLoad;
5117 Info.align.reset();
5118 return true;
5119 }
5120
5121 case Intrinsic::nvvm_tcgen05_st_16x64b_x1:
5122 case Intrinsic::nvvm_tcgen05_st_32x32b_x1:
5123 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x1: {
5124 Info.opc = ISD::INTRINSIC_VOID;
5125 Info.memVT = MVT::i32;
5126 Info.ptrVal = I.getArgOperand(0);
5127 Info.offset = 0;
5128 Info.flags = MachineMemOperand::MOStore;
5129 Info.align.reset();
5130 return true;
5131 }
5132
5133 case Intrinsic::nvvm_tcgen05_st_16x64b_x2:
5134 case Intrinsic::nvvm_tcgen05_st_16x128b_x1:
5135 case Intrinsic::nvvm_tcgen05_st_32x32b_x2:
5136 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x2: {
5137 Info.opc = ISD::INTRINSIC_VOID;
5138 Info.memVT = MVT::v2i32;
5139 Info.ptrVal = I.getArgOperand(0);
5140 Info.offset = 0;
5141 Info.flags = MachineMemOperand::MOStore;
5142 Info.align.reset();
5143 return true;
5144 }
5145
5146 case Intrinsic::nvvm_tcgen05_st_16x64b_x4:
5147 case Intrinsic::nvvm_tcgen05_st_16x128b_x2:
5148 case Intrinsic::nvvm_tcgen05_st_16x256b_x1:
5149 case Intrinsic::nvvm_tcgen05_st_32x32b_x4:
5150 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x4: {
5151 Info.opc = ISD::INTRINSIC_VOID;
5152 Info.memVT = MVT::v4i32;
5153 Info.ptrVal = I.getArgOperand(0);
5154 Info.offset = 0;
5155 Info.flags = MachineMemOperand::MOStore;
5156 Info.align.reset();
5157 return true;
5158 }
5159
5160 case Intrinsic::nvvm_tcgen05_st_16x64b_x8:
5161 case Intrinsic::nvvm_tcgen05_st_16x128b_x4:
5162 case Intrinsic::nvvm_tcgen05_st_16x256b_x2:
5163 case Intrinsic::nvvm_tcgen05_st_32x32b_x8:
5164 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x8: {
5165 Info.opc = ISD::INTRINSIC_VOID;
5166 Info.memVT = MVT::v8i32;
5167 Info.ptrVal = I.getArgOperand(0);
5168 Info.offset = 0;
5169 Info.flags = MachineMemOperand::MOStore;
5170 Info.align.reset();
5171 return true;
5172 }
5173
5174 case Intrinsic::nvvm_tcgen05_st_16x64b_x16:
5175 case Intrinsic::nvvm_tcgen05_st_16x128b_x8:
5176 case Intrinsic::nvvm_tcgen05_st_16x256b_x4:
5177 case Intrinsic::nvvm_tcgen05_st_32x32b_x16:
5178 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x16: {
5179 Info.opc = ISD::INTRINSIC_VOID;
5180 Info.memVT = MVT::v16i32;
5181 Info.ptrVal = I.getArgOperand(0);
5182 Info.offset = 0;
5183 Info.flags = MachineMemOperand::MOStore;
5184 Info.align.reset();
5185 return true;
5186 }
5187
5188 case Intrinsic::nvvm_tcgen05_st_16x64b_x32:
5189 case Intrinsic::nvvm_tcgen05_st_16x128b_x16:
5190 case Intrinsic::nvvm_tcgen05_st_16x256b_x8:
5191 case Intrinsic::nvvm_tcgen05_st_32x32b_x32:
5192 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x32: {
5193 Info.opc = ISD::INTRINSIC_VOID;
5194 Info.memVT = MVT::v32i32;
5195 Info.ptrVal = I.getArgOperand(0);
5196 Info.offset = 0;
5197 Info.flags = MachineMemOperand::MOStore;
5198 Info.align.reset();
5199 return true;
5200 }
5201
5202 case Intrinsic::nvvm_tcgen05_st_16x64b_x64:
5203 case Intrinsic::nvvm_tcgen05_st_16x128b_x32:
5204 case Intrinsic::nvvm_tcgen05_st_16x256b_x16:
5205 case Intrinsic::nvvm_tcgen05_st_32x32b_x64:
5206 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x64: {
5207 Info.opc = ISD::INTRINSIC_VOID;
5208 Info.memVT = MVT::v64i32;
5209 Info.ptrVal = I.getArgOperand(0);
5210 Info.offset = 0;
5211 Info.flags = MachineMemOperand::MOStore;
5212 Info.align.reset();
5213 return true;
5214 }
5215
5216 case Intrinsic::nvvm_tcgen05_st_16x64b_x128:
5217 case Intrinsic::nvvm_tcgen05_st_16x128b_x64:
5218 case Intrinsic::nvvm_tcgen05_st_16x256b_x32:
5219 case Intrinsic::nvvm_tcgen05_st_32x32b_x128:
5220 case Intrinsic::nvvm_tcgen05_st_16x32bx2_x128: {
5221 Info.opc = ISD::INTRINSIC_VOID;
5222 Info.memVT = MVT::v128i32;
5223 Info.ptrVal = I.getArgOperand(0);
5224 Info.offset = 0;
5225 Info.flags = MachineMemOperand::MOStore;
5226 Info.align.reset();
5227 return true;
5228 }
5229 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1:
5230 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1:
5231 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1:
5232 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1:
5233 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1:
5234 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1:
5235 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift:
5236 case Intrinsic::
5237 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift:
5238 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1:
5239 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1:
5240 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift:
5241 case Intrinsic::
5242 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift: {
5243 // We are reading and writing back to TMem
5244 Info.opc = ISD::INTRINSIC_VOID;
5245 Info.memVT = MVT::v4i32;
5246 Info.ptrVal = I.getArgOperand(0);
5247 Info.offset = 0;
5249 Info.align = Align(16);
5250 return true;
5251 }
5252
5253 case Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2:
5254 case Intrinsic::nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2:
5255 case Intrinsic::nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2:
5256 case Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2:
5257 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2:
5258 case Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2:
5259 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2:
5260 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2:
5261 case Intrinsic::nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift:
5262 case Intrinsic::
5263 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift:
5264 case Intrinsic::nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift:
5265 case Intrinsic::
5266 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift: {
5267 // We are reading and writing back to TMem
5268 Info.opc = ISD::INTRINSIC_VOID;
5269 Info.memVT = MVT::v8i32;
5270 Info.ptrVal = I.getArgOperand(0);
5271 Info.offset = 0;
5273 Info.align = Align(16);
5274 return true;
5275 }
5276 }
5277 return false;
5278}
5279
5280/// getFunctionParamOptimizedAlign - since function arguments are passed via
5281/// .param space, we may want to increase their alignment in a way that
5282/// ensures that we can effectively vectorize their loads & stores. We can
5283/// increase alignment only if the function has internal or has private
5284/// linkage as for other linkage types callers may already rely on default
5285/// alignment. To allow using 128-bit vectorized loads/stores, this function
5286/// ensures that alignment is 16 or greater.
5288 const Function *F, Type *ArgTy, const DataLayout &DL) const {
5289 // Capping the alignment to 128 bytes as that is the maximum alignment
5290 // supported by PTX.
5291 const Align ABITypeAlign = std::min(Align(128), DL.getABITypeAlign(ArgTy));
5292
5293 // If a function has linkage different from internal or private, we
5294 // must use default ABI alignment as external users rely on it. Same
5295 // for a function that may be called from a function pointer.
5296 if (!F || !F->hasLocalLinkage() ||
5297 F->hasAddressTaken(/*Users=*/nullptr,
5298 /*IgnoreCallbackUses=*/false,
5299 /*IgnoreAssumeLikeCalls=*/true,
5300 /*IgnoreLLVMUsed=*/true))
5301 return ABITypeAlign;
5302
5303 assert(!isKernelFunction(*F) && "Expect kernels to have non-local linkage");
5304 return std::max(Align(16), ABITypeAlign);
5305}
5306
5307/// Helper for computing alignment of a device function byval parameter.
5309 const Function *F, Type *ArgTy, Align InitialAlign,
5310 const DataLayout &DL) const {
5311 Align ArgAlign = InitialAlign;
5312 // Try to increase alignment to enhance vectorization options.
5313 if (F)
5314 ArgAlign = std::max(ArgAlign, getFunctionParamOptimizedAlign(F, ArgTy, DL));
5315
5316 // Old ptx versions have a bug. When PTX code takes address of
5317 // byval parameter with alignment < 4, ptxas generates code to
5318 // spill argument into memory. Alas on sm_50+ ptxas generates
5319 // SASS code that fails with misaligned access. To work around
5320 // the problem, make sure that we align byval parameters by at
5321 // least 4. This bug seems to be fixed at least starting from
5322 // ptxas > 9.0.
5323 // TODO: remove this after verifying the bug is not reproduced
5324 // on non-deprecated ptxas versions.
5326 ArgAlign = std::max(ArgAlign, Align(4));
5327
5328 return ArgAlign;
5329}
5330
5331// Helper for getting a function parameter name. Name is composed from
5332// its index and the function name. Negative index corresponds to special
5333// parameter (unsized array) used for passing variable arguments.
5335 int Idx) const {
5336 std::string ParamName;
5337 raw_string_ostream ParamStr(ParamName);
5338
5339 ParamStr << getTargetMachine().getSymbol(F)->getName();
5340 if (Idx < 0)
5341 ParamStr << "_vararg";
5342 else
5343 ParamStr << "_param_" << Idx;
5344
5345 return ParamName;
5346}
5347
5348/// isLegalAddressingMode - Return true if the addressing mode represented
5349/// by AM is legal for this target, for a load/store of the specified type.
5350/// Used to guide target specific optimizations, like loop strength reduction
5351/// (LoopStrengthReduce.cpp) and memory optimization for address mode
5352/// (CodeGenPrepare.cpp)
5354 const AddrMode &AM, Type *Ty,
5355 unsigned AS, Instruction *I) const {
5356 // AddrMode - This represents an addressing mode of:
5357 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
5358 //
5359 // The legal address modes are
5360 // - [avar]
5361 // - [areg]
5362 // - [areg+immoff]
5363 // - [immAddr]
5364
5365 // immoff must fit in a signed 32-bit int
5366 if (!APInt(64, AM.BaseOffs).isSignedIntN(32))
5367 return false;
5368
5369 if (AM.BaseGV)
5370 return !AM.BaseOffs && !AM.HasBaseReg && !AM.Scale;
5371
5372 switch (AM.Scale) {
5373 case 0: // "r", "r+i" or "i" is allowed
5374 break;
5375 case 1:
5376 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
5377 return false;
5378 // Otherwise we have r+i.
5379 break;
5380 default:
5381 // No scale > 1 is allowed
5382 return false;
5383 }
5384 return true;
5385}
5386
5387//===----------------------------------------------------------------------===//
5388// NVPTX Inline Assembly Support
5389//===----------------------------------------------------------------------===//
5390
5391/// getConstraintType - Given a constraint letter, return the type of
5392/// constraint it is for this target.
5395 if (Constraint.size() == 1) {
5396 switch (Constraint[0]) {
5397 default:
5398 break;
5399 case 'b':
5400 case 'r':
5401 case 'h':
5402 case 'c':
5403 case 'l':
5404 case 'f':
5405 case 'd':
5406 case 'q':
5407 case '0':
5408 case 'N':
5409 return C_RegisterClass;
5410 }
5411 }
5412 return TargetLowering::getConstraintType(Constraint);
5413}
5414
5415std::pair<unsigned, const TargetRegisterClass *>
5417 StringRef Constraint,
5418 MVT VT) const {
5419 if (Constraint.size() == 1) {
5420 switch (Constraint[0]) {
5421 case 'b':
5422 return std::make_pair(0U, &NVPTX::B1RegClass);
5423 case 'c':
5424 case 'h':
5425 return std::make_pair(0U, &NVPTX::B16RegClass);
5426 case 'r':
5427 case 'f':
5428 return std::make_pair(0U, &NVPTX::B32RegClass);
5429 case 'l':
5430 case 'N':
5431 case 'd':
5432 return std::make_pair(0U, &NVPTX::B64RegClass);
5433 case 'q': {
5434 if (STI.getSmVersion() < 70)
5435 report_fatal_error("Inline asm with 128 bit operands is only "
5436 "supported for sm_70 and higher!");
5437 return std::make_pair(0U, &NVPTX::B128RegClass);
5438 }
5439 }
5440 }
5441 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
5442}
5443
5444//===----------------------------------------------------------------------===//
5445// NVPTX DAG Combining
5446//===----------------------------------------------------------------------===//
5447
5449 CodeGenOptLevel OptLevel) const {
5450 // Always honor command-line argument
5451 if (FMAContractLevelOpt.getNumOccurrences() > 0)
5452 return FMAContractLevelOpt > 0;
5453
5454 // Do not contract if we're not optimizing the code.
5455 if (OptLevel == CodeGenOptLevel::None)
5456 return false;
5457
5458 // Honor TargetOptions flags that explicitly say fusion is okay.
5460 return true;
5461
5462 return false;
5463}
5464
5465static bool isConstZero(const SDValue &Operand) {
5466 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
5467 return Const && Const->getZExtValue() == 0;
5468}
5469
5470/// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
5471/// operands N0 and N1. This is a helper for PerformADDCombine that is
5472/// called with the default operands, and if that fails, with commuted
5473/// operands.
5474static SDValue
5477 EVT VT = N0.getValueType();
5478
5479 // Since integer multiply-add costs the same as integer multiply
5480 // but is more costly than integer add, do the fusion only when
5481 // the mul is only used in the add.
5482 // TODO: this may not be true for later architectures, consider relaxing this
5483 if (!N0.getNode()->hasOneUse())
5484 return SDValue();
5485
5486 // fold (add (select cond, 0, (mul a, b)), c)
5487 // -> (select cond, c, (add (mul a, b), c))
5488 //
5489 if (N0.getOpcode() == ISD::SELECT) {
5490 unsigned ZeroOpNum;
5491 if (isConstZero(N0->getOperand(1)))
5492 ZeroOpNum = 1;
5493 else if (isConstZero(N0->getOperand(2)))
5494 ZeroOpNum = 2;
5495 else
5496 return SDValue();
5497
5498 SDValue M = N0->getOperand((ZeroOpNum == 1) ? 2 : 1);
5499 if (M->getOpcode() != ISD::MUL || !M.getNode()->hasOneUse())
5500 return SDValue();
5501
5502 SDLoc DL(N);
5503 SDValue Mul =
5504 DCI.DAG.getNode(ISD::MUL, DL, VT, M->getOperand(0), M->getOperand(1));
5505 SDValue MAD = DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, N1);
5506 return DCI.DAG.getSelect(SDLoc(N), VT, N0->getOperand(0),
5507 ((ZeroOpNum == 1) ? N1 : MAD),
5508 ((ZeroOpNum == 1) ? MAD : N1));
5509 }
5510
5511 return SDValue();
5512}
5513
5514static SDValue
5517 CodeGenOptLevel OptLevel) {
5518 EVT VT = N0.getValueType();
5519 if (N0.getOpcode() == ISD::FMUL) {
5520 const auto *TLI = static_cast<const NVPTXTargetLowering *>(
5521 &DCI.DAG.getTargetLoweringInfo());
5522 if (!(TLI->allowFMA(DCI.DAG.getMachineFunction(), OptLevel) ||
5523 (N->getFlags().hasAllowContract() &&
5524 N0->getFlags().hasAllowContract())))
5525 return SDValue();
5526
5527 // For floating point:
5528 // Do the fusion only when the mul has less than 5 uses and all
5529 // are add.
5530 // The heuristic is that if a use is not an add, then that use
5531 // cannot be fused into fma, therefore mul is still needed anyway.
5532 // If there are more than 4 uses, even if they are all add, fusing
5533 // them will increase register pressue.
5534 //
5535 int numUses = 0;
5536 int nonAddCount = 0;
5537 for (const SDNode *User : N0.getNode()->users()) {
5538 numUses++;
5539 if (User->getOpcode() != ISD::FADD)
5540 ++nonAddCount;
5541 if (numUses >= 5)
5542 return SDValue();
5543 }
5544 if (nonAddCount) {
5545 int orderNo = N->getIROrder();
5546 int orderNo2 = N0.getNode()->getIROrder();
5547 // simple heuristics here for considering potential register
5548 // pressure, the logics here is that the differnce are used
5549 // to measure the distance between def and use, the longer distance
5550 // more likely cause register pressure.
5551 if (orderNo - orderNo2 < 500)
5552 return SDValue();
5553
5554 // Now, check if at least one of the FMUL's operands is live beyond the
5555 // node N, which guarantees that the FMA will not increase register
5556 // pressure at node N.
5557 bool opIsLive = false;
5558 const SDNode *left = N0.getOperand(0).getNode();
5559 const SDNode *right = N0.getOperand(1).getNode();
5560
5561 if (isa<ConstantSDNode>(left) || isa<ConstantSDNode>(right))
5562 opIsLive = true;
5563
5564 if (!opIsLive)
5565 for (const SDNode *User : left->users()) {
5566 int orderNo3 = User->getIROrder();
5567 if (orderNo3 > orderNo) {
5568 opIsLive = true;
5569 break;
5570 }
5571 }
5572
5573 if (!opIsLive)
5574 for (const SDNode *User : right->users()) {
5575 int orderNo3 = User->getIROrder();
5576 if (orderNo3 > orderNo) {
5577 opIsLive = true;
5578 break;
5579 }
5580 }
5581
5582 if (!opIsLive)
5583 return SDValue();
5584 }
5585
5586 return DCI.DAG.getNode(ISD::FMA, SDLoc(N), VT, N0.getOperand(0),
5587 N0.getOperand(1), N1);
5588 }
5589
5590 return SDValue();
5591}
5592
5593/// Fold unpacking movs into a load by increasing the number of return values.
5594///
5595/// ex:
5596/// L: v2f16,ch = load <p>
5597/// a: f16 = extractelt L:0, 0
5598/// b: f16 = extractelt L:0, 1
5599/// use(a, b)
5600///
5601/// ...is turned into...
5602///
5603/// L: f16,f16,ch = LoadV2 <p>
5604/// use(L:0, L:1)
5605static SDValue
5607 // Don't run this optimization before the legalizer
5608 if (!DCI.isAfterLegalizeDAG())
5609 return SDValue();
5610
5611 EVT ElementVT = N->getValueType(0);
5612 // Avoid non-packed types and v4i8
5613 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5614 return SDValue();
5615
5616 // Check whether all outputs are either used by an extractelt or are
5617 // glue/chain nodes
5618 if (!all_of(N->uses(), [&](SDUse &U) {
5619 // Skip glue, chain nodes
5620 if (U.getValueType() == MVT::Glue || U.getValueType() == MVT::Other)
5621 return true;
5622 if (U.getUser()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
5623 if (N->getOpcode() != ISD::LOAD)
5624 return true;
5625 // Since this is an ISD::LOAD, check all extractelts are used. If
5626 // any are not used, we don't want to defeat another optimization that
5627 // will narrow the load.
5628 //
5629 // For example:
5630 //
5631 // L: v2f16,ch = load <p>
5632 // e0: f16 = extractelt L:0, 0
5633 // e1: f16 = extractelt L:0, 1 <-- unused
5634 // store e0
5635 //
5636 // Can be optimized by DAGCombiner to:
5637 //
5638 // L: f16,ch = load <p>
5639 // store L:0
5640 return !U.getUser()->use_empty();
5641 }
5642
5643 // Otherwise, this use prevents us from splitting a value.
5644 return false;
5645 }))
5646 return SDValue();
5647
5648 auto *LD = cast<MemSDNode>(N);
5649 SDLoc DL(LD);
5650
5651 // the new opcode after we double the number of operands
5652 unsigned Opcode;
5653 SmallVector<SDValue> Operands(LD->ops());
5654 unsigned OldNumOutputs; // non-glue, non-chain outputs
5655 switch (LD->getOpcode()) {
5656 case ISD::LOAD:
5657 OldNumOutputs = 1;
5658 // Any packed type is legal, so the legalizer will not have lowered
5659 // ISD::LOAD -> NVPTXISD::Load (unless it's under-aligned). We have to do it
5660 // here.
5661 Opcode = NVPTXISD::LoadV2;
5662 // append a "full" used bytes mask operand right before the extension type
5663 // operand, signifying that all bytes are used.
5664 Operands.push_back(DCI.DAG.getConstant(UINT32_MAX, DL, MVT::i32));
5665 Operands.push_back(DCI.DAG.getIntPtrConstant(
5666 cast<LoadSDNode>(LD)->getExtensionType(), DL));
5667 break;
5668 case NVPTXISD::LoadV2:
5669 OldNumOutputs = 2;
5670 Opcode = NVPTXISD::LoadV4;
5671 break;
5672 case NVPTXISD::LoadV4:
5673 // V8 is only supported for f32/i32. Don't forget, we're not changing the
5674 // load size here. This is already a 256-bit load.
5675 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5676 return SDValue();
5677 OldNumOutputs = 4;
5678 Opcode = NVPTXISD::LoadV8;
5679 break;
5680 case NVPTXISD::LoadV8:
5681 // PTX doesn't support the next doubling of outputs
5682 return SDValue();
5683 }
5684
5685 // the non-glue, non-chain outputs in the new load
5686 const unsigned NewNumOutputs = OldNumOutputs * 2;
5687 SmallVector<EVT> NewVTs(NewNumOutputs, ElementVT.getVectorElementType());
5688 // add remaining chain and glue values
5689 NewVTs.append(LD->value_begin() + OldNumOutputs, LD->value_end());
5690
5691 // Create the new load
5692 SDValue NewLoad = DCI.DAG.getMemIntrinsicNode(
5693 Opcode, DL, DCI.DAG.getVTList(NewVTs), Operands, LD->getMemoryVT(),
5694 LD->getMemOperand());
5695
5696 // Now we use a combination of BUILD_VECTORs and a MERGE_VALUES node to keep
5697 // the outputs the same. These nodes will be optimized away in later
5698 // DAGCombiner iterations.
5700 for (unsigned I : seq(OldNumOutputs))
5701 Results.push_back(DCI.DAG.getBuildVector(
5702 ElementVT, DL, {NewLoad.getValue(I * 2), NewLoad.getValue(I * 2 + 1)}));
5703 // Add remaining chain and glue nodes
5704 for (unsigned I : seq(NewLoad->getNumValues() - NewNumOutputs))
5705 Results.push_back(NewLoad.getValue(NewNumOutputs + I));
5706
5707 return DCI.DAG.getMergeValues(Results, DL);
5708}
5709
5710/// Fold packing movs into a store.
5711///
5712/// ex:
5713/// v1: v2f16 = BUILD_VECTOR a:f16, b:f16
5714/// v2: v2f16 = BUILD_VECTOR c:f16, d:f16
5715/// StoreV2 v1, v2
5716///
5717/// ...is turned into...
5718///
5719/// StoreV4 a, b, c, d
5722 unsigned Front, unsigned Back) {
5723 // We want to run this as late as possible since other optimizations may
5724 // eliminate the BUILD_VECTORs.
5725 if (!DCI.isAfterLegalizeDAG())
5726 return SDValue();
5727
5728 // Get the type of the operands being stored.
5729 EVT ElementVT = N->getOperand(Front).getValueType();
5730
5731 // Avoid non-packed types and v4i8
5732 if (!NVPTX::isPackedVectorTy(ElementVT) || ElementVT == MVT::v4i8)
5733 return SDValue();
5734
5735 auto *ST = cast<MemSDNode>(N);
5736
5737 // The new opcode after we double the number of operands.
5738 unsigned Opcode;
5739 switch (N->getOpcode()) {
5740 case ISD::STORE:
5741 // Any packed type is legal, so the legalizer will not have lowered
5742 // ISD::STORE -> NVPTXISD::Store (unless it's under-aligned). We have to do
5743 // it here.
5744 Opcode = NVPTXISD::StoreV2;
5745 break;
5746 case NVPTXISD::StoreV2:
5747 Opcode = NVPTXISD::StoreV4;
5748 break;
5749 case NVPTXISD::StoreV4:
5750 // V8 is only supported for f32/i32. Don't forget, we're not changing the
5751 // store size here. This is already a 256-bit store.
5752 if (ElementVT != MVT::v2f32 && ElementVT != MVT::v2i32)
5753 return SDValue();
5754 Opcode = NVPTXISD::StoreV8;
5755 break;
5756 case NVPTXISD::StoreV8:
5757 // PTX doesn't support the next doubling of operands
5758 return SDValue();
5759 default:
5760 llvm_unreachable("Unhandled store opcode");
5761 }
5762
5763 // Scan the operands and if they're all BUILD_VECTORs, we'll have gathered
5764 // their elements.
5765 SmallVector<SDValue, 4> Operands(N->ops().take_front(Front));
5766 for (SDValue BV : N->ops().drop_front(Front).drop_back(Back)) {
5767 if (BV.getOpcode() != ISD::BUILD_VECTOR)
5768 return SDValue();
5769
5770 // If the operand has multiple uses, this optimization can increase register
5771 // pressure.
5772 if (!BV.hasOneUse())
5773 return SDValue();
5774
5775 // DAGCombiner visits nodes bottom-up. Check the BUILD_VECTOR operands for
5776 // any signs they may be folded by some other pattern or rule.
5777 for (SDValue Op : BV->ops()) {
5778 // Peek through bitcasts
5779 if (Op.getOpcode() == ISD::BITCAST)
5780 Op = Op.getOperand(0);
5781
5782 // This may be folded into a PRMT.
5783 if (Op.getValueType() == MVT::i16 && Op.getOpcode() == ISD::TRUNCATE &&
5784 Op->getOperand(0).getValueType() == MVT::i32)
5785 return SDValue();
5786
5787 // This may be folded into cvt.bf16x2
5788 if (Op.getOpcode() == ISD::FP_ROUND)
5789 return SDValue();
5790 }
5791 Operands.append({BV.getOperand(0), BV.getOperand(1)});
5792 }
5793 Operands.append(N->op_end() - Back, N->op_end());
5794
5795 // Now we replace the store
5796 return DCI.DAG.getMemIntrinsicNode(Opcode, SDLoc(N), N->getVTList(), Operands,
5797 ST->getMemoryVT(), ST->getMemOperand());
5798}
5799
5801 const NVPTXSubtarget &STI) {
5802
5803 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::STORE) {
5804 // Here is our chance to custom lower a store with a non-simple type.
5805 // Unfortunately, we can't do this in the legalizer because there is no
5806 // way to setOperationAction for an non-simple type.
5808 if (!ST->getValue().getValueType().isSimple())
5809 return lowerSTOREVector(SDValue(ST, 0), DCI.DAG, STI);
5810 }
5811
5812 return combinePackingMovIntoStore(N, DCI, 1, 2);
5813}
5814
5816 const NVPTXSubtarget &STI) {
5817 if (DCI.isBeforeLegalize() && N->getOpcode() == ISD::LOAD) {
5818 // Here is our chance to custom lower a load with a non-simple type.
5819 // Unfortunately, we can't do this in the legalizer because there is no
5820 // way to setOperationAction for an non-simple type.
5821 if (!N->getValueType(0).isSimple())
5822 return lowerLoadVector(N, DCI.DAG, STI);
5823 }
5824
5825 return combineUnpackingMovIntoLoad(N, DCI);
5826}
5827
5828/// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
5829///
5832 CodeGenOptLevel OptLevel) {
5833 if (OptLevel == CodeGenOptLevel::None)
5834 return SDValue();
5835
5836 SDValue N0 = N->getOperand(0);
5837 SDValue N1 = N->getOperand(1);
5838
5839 // Skip non-integer, non-scalar case
5840 EVT VT = N0.getValueType();
5841 if (VT.isVector() || VT != MVT::i32)
5842 return SDValue();
5843
5844 // First try with the default operand order.
5845 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI))
5846 return Result;
5847
5848 // If that didn't work, try again with the operands commuted.
5849 return PerformADDCombineWithOperands(N, N1, N0, DCI);
5850}
5851
5852/// PerformFADDCombine - Target-specific dag combine xforms for ISD::FADD.
5853///
5856 CodeGenOptLevel OptLevel) {
5857 SDValue N0 = N->getOperand(0);
5858 SDValue N1 = N->getOperand(1);
5859
5860 EVT VT = N0.getValueType();
5861 if (VT.isVector() || !(VT == MVT::f32 || VT == MVT::f64))
5862 return SDValue();
5863
5864 // First try with the default operand order.
5865 if (SDValue Result = PerformFADDCombineWithOperands(N, N0, N1, DCI, OptLevel))
5866 return Result;
5867
5868 // If that didn't work, try again with the operands commuted.
5869 return PerformFADDCombineWithOperands(N, N1, N0, DCI, OptLevel);
5870}
5871
5872/// Get 3-input version of a 2-input min/max opcode
5873static unsigned getMinMax3Opcode(unsigned MinMax2Opcode) {
5874 switch (MinMax2Opcode) {
5875 case ISD::FMAXNUM:
5876 case ISD::FMAXIMUMNUM:
5877 return NVPTXISD::FMAXNUM3;
5878 case ISD::FMINNUM:
5879 case ISD::FMINIMUMNUM:
5880 return NVPTXISD::FMINNUM3;
5881 case ISD::FMAXIMUM:
5882 return NVPTXISD::FMAXIMUM3;
5883 case ISD::FMINIMUM:
5884 return NVPTXISD::FMINIMUM3;
5885 default:
5886 llvm_unreachable("Invalid 2-input min/max opcode");
5887 }
5888}
5889
5890/// PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into
5891/// (fmaxnum3 a, b, c). Also covers other llvm min/max intrinsics.
5894 unsigned PTXVersion, unsigned SmVersion) {
5895
5896 // 3-input min/max requires PTX 8.8+ and SM_100+, and only supports f32s
5897 EVT VT = N->getValueType(0);
5898 if (VT != MVT::f32 || PTXVersion < 88 || SmVersion < 100)
5899 return SDValue();
5900
5901 SDValue Op0 = N->getOperand(0);
5902 SDValue Op1 = N->getOperand(1);
5903 unsigned MinMaxOp2 = N->getOpcode();
5904 unsigned MinMaxOp3 = getMinMax3Opcode(MinMaxOp2);
5905
5906 if (Op0.getOpcode() == MinMaxOp2 && Op0.hasOneUse()) {
5907 // (maxnum (maxnum a, b), c) -> (maxnum3 a, b, c)
5908 SDValue A = Op0.getOperand(0);
5909 SDValue B = Op0.getOperand(1);
5910 SDValue C = Op1;
5911 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
5912 } else if (Op1.getOpcode() == MinMaxOp2 && Op1.hasOneUse()) {
5913 // (maxnum a, (maxnum b, c)) -> (maxnum3 a, b, c)
5914 SDValue A = Op0;
5915 SDValue B = Op1.getOperand(0);
5916 SDValue C = Op1.getOperand(1);
5917 return DCI.DAG.getNode(MinMaxOp3, SDLoc(N), VT, A, B, C, N->getFlags());
5918 }
5919 return SDValue();
5920}
5921
5924 CodeGenOptLevel OptLevel) {
5925 assert(N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM);
5926
5927 // Don't do anything at less than -O2.
5928 if (OptLevel < CodeGenOptLevel::Default)
5929 return SDValue();
5930
5931 SelectionDAG &DAG = DCI.DAG;
5932 SDLoc DL(N);
5933 EVT VT = N->getValueType(0);
5934 bool IsSigned = N->getOpcode() == ISD::SREM;
5935 unsigned DivOpc = IsSigned ? ISD::SDIV : ISD::UDIV;
5936
5937 const SDValue &Num = N->getOperand(0);
5938 const SDValue &Den = N->getOperand(1);
5939
5940 for (const SDNode *U : Num->users()) {
5941 if (U->getOpcode() == DivOpc && U->getOperand(0) == Num &&
5942 U->getOperand(1) == Den) {
5943 // Num % Den -> Num - (Num / Den) * Den
5944 return DAG.getNode(ISD::SUB, DL, VT, Num,
5945 DAG.getNode(ISD::MUL, DL, VT,
5946 DAG.getNode(DivOpc, DL, VT, Num, Den),
5947 Den));
5948 }
5949 }
5950 return SDValue();
5951}
5952
5953// (sign_extend|zero_extend (mul|shl) x, y) -> (mul.wide x, y)
5955 CodeGenOptLevel OptLevel) {
5956 if (OptLevel == CodeGenOptLevel::None)
5957 return SDValue();
5958
5959 SDValue Op = N->getOperand(0);
5960 if (!Op.hasOneUse())
5961 return SDValue();
5962 EVT ToVT = N->getValueType(0);
5963 EVT FromVT = Op.getValueType();
5964 if (!((ToVT == MVT::i32 && FromVT == MVT::i16) ||
5965 (ToVT == MVT::i64 && FromVT == MVT::i32)))
5966 return SDValue();
5967 if (!(Op.getOpcode() == ISD::MUL ||
5968 (Op.getOpcode() == ISD::SHL && isa<ConstantSDNode>(Op.getOperand(1)))))
5969 return SDValue();
5970
5971 SDLoc DL(N);
5972 unsigned ExtOpcode = N->getOpcode();
5973 unsigned Opcode = 0;
5974 if (ExtOpcode == ISD::SIGN_EXTEND && Op->getFlags().hasNoSignedWrap())
5975 Opcode = NVPTXISD::MUL_WIDE_SIGNED;
5976 else if (ExtOpcode == ISD::ZERO_EXTEND && Op->getFlags().hasNoUnsignedWrap())
5977 Opcode = NVPTXISD::MUL_WIDE_UNSIGNED;
5978 else
5979 return SDValue();
5980 SDValue RHS = Op.getOperand(1);
5981 if (Op.getOpcode() == ISD::SHL) {
5982 const auto ShiftAmt = Op.getConstantOperandVal(1);
5983 const auto MulVal = APInt(ToVT.getSizeInBits(), 1) << ShiftAmt;
5984 RHS = DCI.DAG.getConstant(MulVal, DL, ToVT);
5985 }
5986 return DCI.DAG.getNode(Opcode, DL, ToVT, Op.getOperand(0), RHS);
5987}
5988
5994
5995/// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
5996/// that can be demoted to \p OptSize bits without loss of information. The
5997/// signedness of the operand, if determinable, is placed in \p S.
5999 unsigned OptSize,
6000 OperandSignedness &S) {
6001 S = Unknown;
6002
6003 if (Op.getOpcode() == ISD::SIGN_EXTEND ||
6004 Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
6005 EVT OrigVT = Op.getOperand(0).getValueType();
6006 if (OrigVT.getFixedSizeInBits() <= OptSize) {
6007 S = Signed;
6008 return true;
6009 }
6010 } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
6011 EVT OrigVT = Op.getOperand(0).getValueType();
6012 if (OrigVT.getFixedSizeInBits() <= OptSize) {
6013 S = Unsigned;
6014 return true;
6015 }
6016 }
6017
6018 return false;
6019}
6020
6021/// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
6022/// be demoted to \p OptSize bits without loss of information. If the operands
6023/// contain a constant, it should appear as the RHS operand. The signedness of
6024/// the operands is placed in \p IsSigned.
6026 unsigned OptSize,
6027 bool &IsSigned) {
6028 OperandSignedness LHSSign;
6029
6030 // The LHS operand must be a demotable op
6031 if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
6032 return false;
6033
6034 // We should have been able to determine the signedness from the LHS
6035 if (LHSSign == Unknown)
6036 return false;
6037
6038 IsSigned = (LHSSign == Signed);
6039
6040 // The RHS can be a demotable op or a constant
6042 const APInt &Val = CI->getAPIntValue();
6043 if (LHSSign == Unsigned) {
6044 return Val.isIntN(OptSize);
6045 } else {
6046 return Val.isSignedIntN(OptSize);
6047 }
6048 } else {
6049 OperandSignedness RHSSign;
6050 if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
6051 return false;
6052
6053 return LHSSign == RHSSign;
6054 }
6055}
6056
6057/// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
6058/// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
6059/// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
6060/// amount.
6063 EVT MulType = N->getValueType(0);
6064 if (MulType != MVT::i32 && MulType != MVT::i64) {
6065 return SDValue();
6066 }
6067
6068 SDLoc DL(N);
6069 unsigned OptSize = MulType.getSizeInBits() >> 1;
6070 SDValue LHS = N->getOperand(0);
6071 SDValue RHS = N->getOperand(1);
6072
6073 // Canonicalize the multiply so the constant (if any) is on the right
6074 if (N->getOpcode() == ISD::MUL) {
6075 if (isa<ConstantSDNode>(LHS)) {
6076 std::swap(LHS, RHS);
6077 }
6078 }
6079
6080 // If we have a SHL, determine the actual multiply amount
6081 if (N->getOpcode() == ISD::SHL) {
6083 if (!ShlRHS) {
6084 return SDValue();
6085 }
6086
6087 APInt ShiftAmt = ShlRHS->getAPIntValue();
6088 unsigned BitWidth = MulType.getSizeInBits();
6089 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
6090 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
6091 RHS = DCI.DAG.getConstant(MulVal, DL, MulType);
6092 } else {
6093 return SDValue();
6094 }
6095 }
6096
6097 bool Signed;
6098 // Verify that our operands are demotable
6099 if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
6100 return SDValue();
6101 }
6102
6103 EVT DemotedVT;
6104 if (MulType == MVT::i32) {
6105 DemotedVT = MVT::i16;
6106 } else {
6107 DemotedVT = MVT::i32;
6108 }
6109
6110 // Truncate the operands to the correct size. Note that these are just for
6111 // type consistency and will (likely) be eliminated in later phases.
6112 SDValue TruncLHS =
6113 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, LHS);
6114 SDValue TruncRHS =
6115 DCI.DAG.getNode(ISD::TRUNCATE, DL, DemotedVT, RHS);
6116
6117 unsigned Opc;
6118 if (Signed) {
6119 Opc = NVPTXISD::MUL_WIDE_SIGNED;
6120 } else {
6121 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
6122 }
6123
6124 return DCI.DAG.getNode(Opc, DL, MulType, TruncLHS, TruncRHS);
6125}
6126
6127static bool isConstOne(const SDValue &Operand) {
6128 const auto *Const = dyn_cast<ConstantSDNode>(Operand);
6129 return Const && Const->getZExtValue() == 1;
6130}
6131
6133 if (Add->getOpcode() != ISD::ADD)
6134 return SDValue();
6135
6136 if (isConstOne(Add->getOperand(0)))
6137 return Add->getOperand(1);
6138
6139 if (isConstOne(Add->getOperand(1)))
6140 return Add->getOperand(0);
6141
6142 return SDValue();
6143}
6144
6147
6149 SDValue Mul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
6150 return DCI.DAG.getNode(ISD::ADD, DL, VT, Mul, X);
6151 }
6152
6153 return SDValue();
6154}
6155
6157 SDLoc DL,
6159 if (Select->getOpcode() != ISD::SELECT)
6160 return SDValue();
6161
6162 SDValue Cond = Select->getOperand(0);
6163
6164 unsigned ConstOpNo;
6165 if (isConstOne(Select->getOperand(1)))
6166 ConstOpNo = 1;
6167 else if (isConstOne(Select->getOperand(2)))
6168 ConstOpNo = 2;
6169 else
6170 return SDValue();
6171
6172 SDValue Y = Select->getOperand((ConstOpNo == 1) ? 2 : 1);
6173
6174 // Do not combine if the resulting sequence is not obviously profitable.
6176 return SDValue();
6177
6178 SDValue NewMul = DCI.DAG.getNode(ISD::MUL, DL, VT, X, Y);
6179
6180 return DCI.DAG.getNode(ISD::SELECT, DL, VT, Cond,
6181 (ConstOpNo == 1) ? X : NewMul,
6182 (ConstOpNo == 1) ? NewMul : X);
6183}
6184
6185static SDValue
6188
6189 EVT VT = N0.getValueType();
6190 if (VT.isVector())
6191 return SDValue();
6192
6193 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
6194 return SDValue();
6195
6196 SDLoc DL(N);
6197
6198 // (mul x, (add y, 1)) -> (add (mul x, y), x)
6199 if (SDValue Res = combineMADConstOne(N0, N1, VT, DL, DCI))
6200 return Res;
6201 if (SDValue Res = combineMADConstOne(N1, N0, VT, DL, DCI))
6202 return Res;
6203
6204 // (mul x, (select y, 1)) -> (select (mul x, y), x)
6205 if (SDValue Res = combineMulSelectConstOne(N0, N1, VT, DL, DCI))
6206 return Res;
6207 if (SDValue Res = combineMulSelectConstOne(N1, N0, VT, DL, DCI))
6208 return Res;
6209
6210 return SDValue();
6211}
6212
6213/// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
6216 CodeGenOptLevel OptLevel) {
6217 if (OptLevel == CodeGenOptLevel::None)
6218 return SDValue();
6219
6220 if (SDValue Ret = TryMULWIDECombine(N, DCI))
6221 return Ret;
6222
6223 SDValue N0 = N->getOperand(0);
6224 SDValue N1 = N->getOperand(1);
6225 return PerformMULCombineWithOperands(N, N0, N1, DCI);
6226}
6227
6228/// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
6231 CodeGenOptLevel OptLevel) {
6232 if (OptLevel > CodeGenOptLevel::None) {
6233 // Try mul.wide combining at OptLevel > 0
6234 if (SDValue Ret = TryMULWIDECombine(N, DCI))
6235 return Ret;
6236 }
6237
6238 return SDValue();
6239}
6240
6243 unsigned int SmVersion) {
6244 EVT CCType = N->getValueType(0);
6245 SDValue A = N->getOperand(0);
6246 SDValue B = N->getOperand(1);
6247
6248 EVT AType = A.getValueType();
6249 if (!(CCType == MVT::v2i1 && (AType == MVT::v2f16 || AType == MVT::v2bf16)))
6250 return SDValue();
6251
6252 if (A.getValueType() == MVT::v2bf16 && SmVersion < 90)
6253 return SDValue();
6254
6255 SDLoc DL(N);
6256 // setp.f16x2 returns two scalar predicates, which we need to
6257 // convert back to v2i1. The returned result will be scalarized by
6258 // the legalizer, but the comparison will remain a single vector
6259 // instruction.
6260 SDValue CCNode = DCI.DAG.getNode(
6261 A.getValueType() == MVT::v2f16 ? NVPTXISD::SETP_F16X2
6263 DL, DCI.DAG.getVTList(MVT::i1, MVT::i1), {A, B, N->getOperand(2)});
6264 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, CCType, CCNode.getValue(0),
6265 CCNode.getValue(1));
6266}
6267
6270 SDValue Vector = N->getOperand(0);
6271 if (Vector->getOpcode() == ISD::FREEZE)
6272 Vector = Vector->getOperand(0);
6273 SDLoc DL(N);
6274 EVT VectorVT = Vector.getValueType();
6275 if (Vector->getOpcode() == ISD::LOAD && VectorVT.isSimple() &&
6276 IsPTXVectorType(VectorVT.getSimpleVT()))
6277 return SDValue(); // Native vector loads already combine nicely w/
6278 // extract_vector_elt.
6279 // Don't mess with singletons or packed types (v2*32, v2*16, v4i8 and v8i8),
6280 // we already handle them OK.
6281 if (VectorVT.getVectorNumElements() == 1 ||
6282 NVPTX::isPackedVectorTy(VectorVT) || VectorVT == MVT::v8i8)
6283 return SDValue();
6284
6285 // Don't mess with undef values as sra may be simplified to 0, not undef.
6286 if (Vector->isUndef() || ISD::allOperandsUndef(Vector.getNode()))
6287 return SDValue();
6288
6289 uint64_t VectorBits = VectorVT.getSizeInBits();
6290 // We only handle the types we can extract in-register.
6291 if (!(VectorBits == 16 || VectorBits == 32 || VectorBits == 64))
6292 return SDValue();
6293
6294 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(N->getOperand(1));
6295 // Index == 0 is handled by generic DAG combiner.
6296 if (!Index || Index->getZExtValue() == 0)
6297 return SDValue();
6298
6299 MVT IVT = MVT::getIntegerVT(VectorBits);
6300 EVT EltVT = VectorVT.getVectorElementType();
6301 EVT EltIVT = EltVT.changeTypeToInteger();
6302 uint64_t EltBits = EltVT.getScalarSizeInBits();
6303
6304 SDValue Result = DCI.DAG.getNode(
6305 ISD::TRUNCATE, DL, EltIVT,
6306 DCI.DAG.getNode(
6307 ISD::SRA, DL, IVT, DCI.DAG.getNode(ISD::BITCAST, DL, IVT, Vector),
6308 DCI.DAG.getConstant(Index->getZExtValue() * EltBits, DL, IVT)));
6309
6310 // If element has non-integer type, bitcast it back to the expected type.
6311 if (EltVT != EltIVT)
6312 Result = DCI.DAG.getNode(ISD::BITCAST, DL, EltVT, Result);
6313 // Past legalizer, we may need to extent i8 -> i16 to match the register type.
6314 if (EltVT != N->getValueType(0))
6315 Result = DCI.DAG.getNode(ISD::ANY_EXTEND, DL, N->getValueType(0), Result);
6316
6317 return Result;
6318}
6319
6320/// Transform patterns like:
6321/// (select (ugt shift_amt, BitWidth-1), 0, (srl/shl x, shift_amt))
6322/// (select (ult shift_amt, BitWidth), (srl/shl x, shift_amt), 0)
6323/// Into:
6324/// (NVPTXISD::SRL_CLAMP x, shift_amt) or (NVPTXISD::SHL_CLAMP x, shift_amt)
6325///
6326/// These patterns arise from C/C++ code like `shift >= 32 ? 0 : x >> shift`
6327/// which guards against undefined behavior. PTX shr/shl instructions clamp
6328/// shift amounts >= BitWidth to produce 0 for logical shifts, making the
6329/// guard redundant.
6330///
6331/// Note: We only handle SRL and SHL, not SRA, because arithmetic right
6332/// shifts could produce 0 or -1 when shift >= BitWidth.
6333/// Note: We don't handle uge or ule. These don't appear because of
6334/// canonicalization.
6337 if (!DCI.isAfterLegalizeDAG())
6338 return SDValue();
6339
6340 using namespace SDPatternMatch;
6341 unsigned BitWidth = N->getValueType(0).getSizeInBits();
6342 SDValue ShiftAmt, ShiftOp;
6343
6344 // Match logical shifts where the shift amount in the guard matches the shift
6345 // amount in the operation.
6346 auto LogicalShift =
6347 m_AllOf(m_Value(ShiftOp),
6348 m_AnyOf(m_Srl(m_Value(), m_TruncOrSelf(m_Deferred(ShiftAmt))),
6349 m_Shl(m_Value(), m_TruncOrSelf(m_Deferred(ShiftAmt)))));
6350
6351 // shift_amt > BitWidth-1 ? 0 : shift_op
6352 bool MatchedUGT =
6353 sd_match(N, m_Select(m_SetCC(m_Value(ShiftAmt),
6355 m_SpecificCondCode(ISD::SETUGT)),
6356 m_Zero(), LogicalShift));
6357 // shift_amt < BitWidth ? shift_op : 0
6358 bool MatchedULT =
6359 !MatchedUGT &&
6360 sd_match(N, m_Select(m_SetCC(m_Value(ShiftAmt),
6362 m_SpecificCondCode(ISD::SETULT)),
6363 LogicalShift, m_Zero()));
6364
6365 if (!MatchedUGT && !MatchedULT)
6366 return SDValue();
6367
6368 // Return a clamp shift operation, which has the same semantics as PTX shift.
6369 unsigned ClampOpc = ShiftOp.getOpcode() == ISD::SRL ? NVPTXISD::SRL_CLAMP
6370 : NVPTXISD::SHL_CLAMP;
6371 return DCI.DAG.getNode(ClampOpc, SDLoc(N), ShiftOp.getValueType(),
6372 ShiftOp.getOperand(0), ShiftOp.getOperand(1));
6373}
6374
6377 SDValue VA = N->getOperand(1);
6378 EVT VectorVT = VA.getValueType();
6379 if (VectorVT != MVT::v4i8)
6380 return SDValue();
6381
6382 // We need to split vselect into individual per-element operations Because we
6383 // use BFE/BFI instruction for byte extraction/insertion, we do end up with
6384 // 32-bit values, so we may as well do comparison as i32 to avoid conversions
6385 // to/from i16 normally used for i8 values.
6387 SDLoc DL(N);
6388 SDValue VCond = N->getOperand(0);
6389 SDValue VB = N->getOperand(2);
6390 for (int I = 0; I < 4; ++I) {
6391 SDValue C = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i1, VCond,
6392 DCI.DAG.getConstant(I, DL, MVT::i32));
6393 SDValue EA = DCI.DAG.getAnyExtOrTrunc(
6394 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VA,
6395 DCI.DAG.getConstant(I, DL, MVT::i32)),
6396 DL, MVT::i32);
6397 SDValue EB = DCI.DAG.getAnyExtOrTrunc(
6398 DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i8, VB,
6399 DCI.DAG.getConstant(I, DL, MVT::i32)),
6400 DL, MVT::i32);
6401 E.push_back(DCI.DAG.getAnyExtOrTrunc(
6402 DCI.DAG.getNode(ISD::SELECT, DL, MVT::i32, C, EA, EB), DL, MVT::i8));
6403 }
6404 return DCI.DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v4i8, E);
6405}
6406
6407static SDValue
6409 auto VT = N->getValueType(0);
6410 if (!DCI.isAfterLegalizeDAG() ||
6411 // only process v2*16 types
6412 !(NVPTX::isPackedVectorTy(VT) && VT.is32BitVector() &&
6413 VT.getVectorNumElements() == 2))
6414 return SDValue();
6415
6416 auto Op0 = N->getOperand(0);
6417 auto Op1 = N->getOperand(1);
6418
6419 // Start out by assuming we want to take the lower 2 bytes of each i32
6420 // operand.
6421 uint64_t Op0Bytes = 0x10;
6422 uint64_t Op1Bytes = 0x54;
6423
6424 std::pair<SDValue *, uint64_t *> OpData[2] = {{&Op0, &Op0Bytes},
6425 {&Op1, &Op1Bytes}};
6426
6427 // Check that each operand is an i16, truncated from an i32 operand. We'll
6428 // select individual bytes from those original operands. Optionally, fold in a
6429 // shift right of that original operand.
6430 for (auto &[Op, OpBytes] : OpData) {
6431 // Eat up any bitcast
6432 if (Op->getOpcode() == ISD::BITCAST)
6433 *Op = Op->getOperand(0);
6434
6435 if (!(Op->getValueType() == MVT::i16 && Op->getOpcode() == ISD::TRUNCATE &&
6436 Op->getOperand(0).getValueType() == MVT::i32))
6437 return SDValue();
6438
6439 // If the truncate has multiple uses, this optimization can increase
6440 // register pressure
6441 if (!Op->hasOneUse())
6442 return SDValue();
6443
6444 *Op = Op->getOperand(0);
6445
6446 // Optionally, fold in a shift-right of the original operand and let permute
6447 // pick the two higher bytes of the original value directly.
6448 if (Op->getOpcode() == ISD::SRL && isa<ConstantSDNode>(Op->getOperand(1))) {
6449 if (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue() == 16) {
6450 // Shift the PRMT byte selector to pick upper bytes from each respective
6451 // value, instead of the lower ones: 0x10 -> 0x32, 0x54 -> 0x76
6452 assert((*OpBytes == 0x10 || *OpBytes == 0x54) &&
6453 "PRMT selector values out of range");
6454 *OpBytes += 0x22;
6455 *Op = Op->getOperand(0);
6456 }
6457 }
6458 }
6459
6460 SDLoc DL(N);
6461 auto &DAG = DCI.DAG;
6462
6463 auto PRMT =
6464 getPRMT(DAG.getBitcast(MVT::i32, Op0), DAG.getBitcast(MVT::i32, Op1),
6465 (Op1Bytes << 8) | Op0Bytes, DL, DAG);
6466 return DAG.getBitcast(VT, PRMT);
6467}
6468
6471 auto *ASCN1 = cast<AddrSpaceCastSDNode>(N);
6472
6473 if (auto *ASCN2 = dyn_cast<AddrSpaceCastSDNode>(ASCN1->getOperand(0))) {
6474 assert(ASCN2->getDestAddressSpace() == ASCN1->getSrcAddressSpace());
6475
6476 // Fold asc[B -> A](asc[A -> B](x)) -> x
6477 if (ASCN1->getDestAddressSpace() == ASCN2->getSrcAddressSpace())
6478 return ASCN2->getOperand(0);
6479 }
6480
6481 return SDValue();
6482}
6483
6484// Given a constant selector value and a prmt mode, return the selector value
6485// normalized to the generic prmt mode. See the PTX ISA documentation for more
6486// details:
6487// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-prmt
6488static APInt getPRMTSelector(const APInt &Selector, unsigned Mode) {
6489 assert(Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6490
6492 return Selector;
6493
6494 const unsigned V = Selector.trunc(2).getZExtValue();
6495
6496 const auto GetSelector = [](unsigned S0, unsigned S1, unsigned S2,
6497 unsigned S3) {
6498 return APInt(32, S0 | (S1 << 4) | (S2 << 8) | (S3 << 12));
6499 };
6500
6501 switch (Mode) {
6503 return GetSelector(V, V + 1, V + 2, V + 3);
6505 return GetSelector(V, (V - 1) & 7, (V - 2) & 7, (V - 3) & 7);
6507 return GetSelector(V, V, V, V);
6509 return GetSelector(V, std::max(V, 1U), std::max(V, 2U), 3U);
6511 return GetSelector(0, std::min(V, 1U), std::min(V, 2U), V);
6513 unsigned V1 = (V & 1) << 1;
6514 return GetSelector(V1, V1 + 1, V1, V1 + 1);
6515 }
6516 default:
6517 llvm_unreachable("Invalid PRMT mode");
6518 }
6519}
6520
6521static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode) {
6522 assert(A.getBitWidth() == 32 && B.getBitWidth() == 32 &&
6523 Selector.getBitWidth() == 32 && "PRMT must have i32 operands");
6524 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
6525 APInt BitField = B.concat(A);
6526 APInt SelectorVal = getPRMTSelector(Selector, Mode);
6527 APInt Result(32, 0);
6528 for (unsigned I : llvm::seq(4U)) {
6529 APInt Sel = SelectorVal.extractBits(4, I * 4);
6530 unsigned Idx = Sel.getLoBits(3).getZExtValue();
6531 unsigned Sign = Sel.getHiBits(1).getZExtValue();
6532 APInt Byte = BitField.extractBits(8, Idx * 8);
6533 if (Sign)
6534 Byte = Byte.ashr(8);
6535 Result.insertBits(Byte, I * 8);
6536 }
6537 return Result;
6538}
6539
6541 CodeGenOptLevel OptLevel) {
6542 if (OptLevel == CodeGenOptLevel::None)
6543 return SDValue();
6544
6545 // Constant fold PRMT
6546 if (isa<ConstantSDNode>(N->getOperand(0)) &&
6547 isa<ConstantSDNode>(N->getOperand(1)) &&
6548 isa<ConstantSDNode>(N->getOperand(2)))
6549 return DCI.DAG.getConstant(computePRMT(N->getConstantOperandAPInt(0),
6550 N->getConstantOperandAPInt(1),
6551 N->getConstantOperandAPInt(2),
6552 N->getConstantOperandVal(3)),
6553 SDLoc(N), N->getValueType(0));
6554 return SDValue();
6555}
6556
6557// During call lowering we wrap the return values in a ProxyReg node which
6558// depend on the chain value produced by the completed call. This ensures that
6559// the full call is emitted in cases where libcalls are used to legalize
6560// operations. To improve the functioning of other DAG combines we pull all
6561// operations we can through one of these nodes, ensuring that the ProxyReg
6562// directly wraps a load. That is:
6563//
6564// (ProxyReg (zext (load retval0))) => (zext (ProxyReg (load retval0)))
6565//
6568 switch (R.getOpcode()) {
6569 case ISD::TRUNCATE:
6570 case ISD::ANY_EXTEND:
6571 case ISD::SIGN_EXTEND:
6572 case ISD::ZERO_EXTEND:
6573 case ISD::BITCAST: {
6574 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
6575 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), V);
6576 return SDValue();
6577 }
6578 case ISD::SHL:
6579 case ISD::SRL:
6580 case ISD::SRA:
6581 case ISD::OR: {
6582 if (SDValue A = sinkProxyReg(R.getOperand(0), Chain, DCI))
6583 if (SDValue B = sinkProxyReg(R.getOperand(1), Chain, DCI))
6584 return DCI.DAG.getNode(R.getOpcode(), SDLoc(R), R.getValueType(), A, B);
6585 return SDValue();
6586 }
6587 case ISD::Constant:
6588 return R;
6589 case ISD::LOAD:
6590 case NVPTXISD::LoadV2:
6591 case NVPTXISD::LoadV4: {
6592 return DCI.DAG.getNode(NVPTXISD::ProxyReg, SDLoc(R), R.getValueType(),
6593 {Chain, R});
6594 }
6595 case ISD::BUILD_VECTOR: {
6596 if (DCI.isBeforeLegalize())
6597 return SDValue();
6598
6600 for (auto &Op : R->ops()) {
6601 SDValue V = sinkProxyReg(Op, Chain, DCI);
6602 if (!V)
6603 return SDValue();
6604 Ops.push_back(V);
6605 }
6606 return DCI.DAG.getNode(ISD::BUILD_VECTOR, SDLoc(R), R.getValueType(), Ops);
6607 }
6609 if (DCI.isBeforeLegalize())
6610 return SDValue();
6611
6612 if (SDValue V = sinkProxyReg(R.getOperand(0), Chain, DCI))
6614 R.getValueType(), V, R.getOperand(1));
6615 return SDValue();
6616 }
6617 default:
6618 return SDValue();
6619 }
6620}
6621
6624
6625 SDValue Chain = N->getOperand(0);
6626 SDValue Reg = N->getOperand(1);
6627
6628 // If the ProxyReg is not wrapping a load, try to pull the operations through
6629 // the ProxyReg.
6630 if (Reg.getOpcode() != ISD::LOAD) {
6631 if (SDValue V = sinkProxyReg(Reg, Chain, DCI))
6632 return V;
6633 }
6634
6635 return SDValue();
6636}
6637
6638SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
6639 DAGCombinerInfo &DCI) const {
6641 switch (N->getOpcode()) {
6642 default:
6643 break;
6644 case ISD::ADD:
6645 return PerformADDCombine(N, DCI, OptLevel);
6646 case ISD::ADDRSPACECAST:
6647 return combineADDRSPACECAST(N, DCI);
6648 case ISD::SIGN_EXTEND:
6649 case ISD::ZERO_EXTEND:
6650 return combineMulWide(N, DCI, OptLevel);
6651 case ISD::BUILD_VECTOR:
6652 return PerformBUILD_VECTORCombine(N, DCI);
6654 return PerformEXTRACTCombine(N, DCI);
6655 case ISD::FADD:
6656 return PerformFADDCombine(N, DCI, OptLevel);
6657 case ISD::FMAXNUM:
6658 case ISD::FMINNUM:
6659 case ISD::FMAXIMUM:
6660 case ISD::FMINIMUM:
6661 case ISD::FMAXIMUMNUM:
6662 case ISD::FMINIMUMNUM:
6663 return PerformFMinMaxCombine(N, DCI, STI.getPTXVersion(),
6664 STI.getSmVersion());
6665 case ISD::LOAD:
6666 case NVPTXISD::LoadV2:
6667 case NVPTXISD::LoadV4:
6668 return combineLOAD(N, DCI, STI);
6669 case ISD::MUL:
6670 return PerformMULCombine(N, DCI, OptLevel);
6671 case NVPTXISD::PRMT:
6672 return combinePRMT(N, DCI, OptLevel);
6673 case NVPTXISD::ProxyReg:
6674 return combineProxyReg(N, DCI);
6675 case ISD::SETCC:
6676 return PerformSETCCCombine(N, DCI, STI.getSmVersion());
6677 case ISD::SHL:
6678 return PerformSHLCombine(N, DCI, OptLevel);
6679 case ISD::SREM:
6680 case ISD::UREM:
6681 return PerformREMCombine(N, DCI, OptLevel);
6682 case ISD::STORE:
6683 case NVPTXISD::StoreV2:
6684 case NVPTXISD::StoreV4:
6685 return combineSTORE(N, DCI, STI);
6686 case ISD::SELECT:
6687 return PerformSELECTShiftCombine(N, DCI);
6688 case ISD::VSELECT:
6689 return PerformVSELECTCombine(N, DCI);
6690 }
6691 return SDValue();
6692}
6693
6696 // Handle bitcasting to v2i8 without hitting the default promotion
6697 // strategy which goes through stack memory.
6698 SDValue Op(Node, 0);
6699 EVT ToVT = Op->getValueType(0);
6700 if (ToVT != MVT::v2i8) {
6701 return;
6702 }
6703
6704 // Bitcast to i16 and unpack elements into a vector
6705 SDLoc DL(Node);
6706 SDValue AsInt = DAG.getBitcast(MVT::i16, Op->getOperand(0));
6707 SDValue Vec0 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, AsInt);
6708 SDValue Const8 = DAG.getConstant(8, DL, MVT::i16);
6709 SDValue Vec1 =
6710 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
6711 DAG.getNode(ISD::SRL, DL, MVT::i16, {AsInt, Const8}));
6712 Results.push_back(
6713 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i8, {Vec0, Vec1}));
6714}
6715
6718 SDValue Chain = N->getOperand(0);
6719 SDValue Intrin = N->getOperand(1);
6720 SDLoc DL(N);
6721
6722 // Get the intrinsic ID
6723 unsigned IntrinNo = Intrin.getNode()->getAsZExtVal();
6724 switch (IntrinNo) {
6725 default:
6726 return;
6727 case Intrinsic::nvvm_ldu_global_i:
6728 case Intrinsic::nvvm_ldu_global_f:
6729 case Intrinsic::nvvm_ldu_global_p: {
6730 EVT ResVT = N->getValueType(0);
6731
6732 if (ResVT.isVector()) {
6733 // Vector LDG/LDU
6734
6735 unsigned NumElts = ResVT.getVectorNumElements();
6736 EVT EltVT = ResVT.getVectorElementType();
6737
6738 // Since LDU/LDG are target nodes, we cannot rely on DAG type
6739 // legalization.
6740 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
6741 // loaded type to i16 and propagate the "real" type as the memory type.
6742 bool NeedTrunc = false;
6743 if (EltVT.getSizeInBits() < 16) {
6744 EltVT = MVT::i16;
6745 NeedTrunc = true;
6746 }
6747
6748 unsigned Opcode = 0;
6749 SDVTList LdResVTs;
6750
6751 switch (NumElts) {
6752 default:
6753 return;
6754 case 2:
6755 Opcode = NVPTXISD::LDUV2;
6756 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
6757 break;
6758 case 4: {
6759 Opcode = NVPTXISD::LDUV4;
6760 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
6761 LdResVTs = DAG.getVTList(ListVTs);
6762 break;
6763 }
6764 }
6765
6766 SmallVector<SDValue, 8> OtherOps;
6767
6768 // Copy regular operands
6769
6770 OtherOps.push_back(Chain); // Chain
6771 // Skip operand 1 (intrinsic ID)
6772 // Others
6773 OtherOps.append(N->op_begin() + 2, N->op_end());
6774
6776
6777 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
6778 MemSD->getMemoryVT(),
6779 MemSD->getMemOperand());
6780
6781 SmallVector<SDValue, 4> ScalarRes;
6782
6783 for (unsigned i = 0; i < NumElts; ++i) {
6784 SDValue Res = NewLD.getValue(i);
6785 if (NeedTrunc)
6786 Res =
6787 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
6788 ScalarRes.push_back(Res);
6789 }
6790
6791 SDValue LoadChain = NewLD.getValue(NumElts);
6792
6793 SDValue BuildVec =
6794 DAG.getBuildVector(ResVT, DL, ScalarRes);
6795
6796 Results.push_back(BuildVec);
6797 Results.push_back(LoadChain);
6798 } else {
6799 // i8 LDG/LDU
6800 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
6801 "Custom handling of non-i8 ldu/ldg?");
6802
6803 // Just copy all operands as-is
6805
6806 // Force output to i16
6807 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
6808
6810
6811 // We make sure the memory type is i8, which will be used during isel
6812 // to select the proper instruction.
6813 SDValue NewLD =
6815 MVT::i8, MemSD->getMemOperand());
6816
6817 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
6818 NewLD.getValue(0)));
6819 Results.push_back(NewLD.getValue(1));
6820 }
6821 return;
6822 }
6823
6824 case Intrinsic::nvvm_tcgen05_ld_16x64b_x4:
6825 case Intrinsic::nvvm_tcgen05_ld_16x64b_x8:
6826 case Intrinsic::nvvm_tcgen05_ld_16x64b_x16:
6827 case Intrinsic::nvvm_tcgen05_ld_16x64b_x32:
6828 case Intrinsic::nvvm_tcgen05_ld_16x64b_x64:
6829 case Intrinsic::nvvm_tcgen05_ld_16x64b_x128:
6830 case Intrinsic::nvvm_tcgen05_ld_32x32b_x4:
6831 case Intrinsic::nvvm_tcgen05_ld_32x32b_x8:
6832 case Intrinsic::nvvm_tcgen05_ld_32x32b_x16:
6833 case Intrinsic::nvvm_tcgen05_ld_32x32b_x32:
6834 case Intrinsic::nvvm_tcgen05_ld_32x32b_x64:
6835 case Intrinsic::nvvm_tcgen05_ld_32x32b_x128:
6836 case Intrinsic::nvvm_tcgen05_ld_16x128b_x2:
6837 case Intrinsic::nvvm_tcgen05_ld_16x128b_x4:
6838 case Intrinsic::nvvm_tcgen05_ld_16x128b_x8:
6839 case Intrinsic::nvvm_tcgen05_ld_16x128b_x16:
6840 case Intrinsic::nvvm_tcgen05_ld_16x128b_x32:
6841 case Intrinsic::nvvm_tcgen05_ld_16x128b_x64:
6842 case Intrinsic::nvvm_tcgen05_ld_16x256b_x1:
6843 case Intrinsic::nvvm_tcgen05_ld_16x256b_x2:
6844 case Intrinsic::nvvm_tcgen05_ld_16x256b_x4:
6845 case Intrinsic::nvvm_tcgen05_ld_16x256b_x8:
6846 case Intrinsic::nvvm_tcgen05_ld_16x256b_x16:
6847 case Intrinsic::nvvm_tcgen05_ld_16x256b_x32:
6848 if (auto Res = lowerTcgen05Ld(N, DAG)) {
6849 Results.push_back(Res->first);
6850 Results.push_back(Res->second);
6851 }
6852 return;
6853
6854 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x4:
6855 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x8:
6856 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x16:
6857 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x32:
6858 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x64:
6859 case Intrinsic::nvvm_tcgen05_ld_16x32bx2_x128:
6860 if (auto Res = lowerTcgen05Ld(N, DAG, /*HasOffset=*/true)) {
6861 Results.push_back(Res->first);
6862 Results.push_back(Res->second);
6863 }
6864 return;
6865 }
6866}
6867
6870 // Change the CopyFromReg to output 2 64-bit results instead of a 128-bit
6871 // result so that it can pass the legalization
6872 SDLoc DL(N);
6873 SDValue Chain = N->getOperand(0);
6874 SDValue Reg = N->getOperand(1);
6875 SDValue Glue = N->getOperand(2);
6876
6877 assert(Reg.getValueType() == MVT::i128 &&
6878 "Custom lowering for CopyFromReg with 128-bit reg only");
6879 SmallVector<EVT, 4> ResultsType = {MVT::i64, MVT::i64, N->getValueType(1),
6880 N->getValueType(2)};
6881 SmallVector<SDValue, 3> NewOps = {Chain, Reg, Glue};
6882
6883 SDValue NewValue = DAG.getNode(ISD::CopyFromReg, DL, ResultsType, NewOps);
6884 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128,
6885 {NewValue.getValue(0), NewValue.getValue(1)});
6886
6887 Results.push_back(Pair);
6888 Results.push_back(NewValue.getValue(2));
6889 Results.push_back(NewValue.getValue(3));
6890}
6891
6893 const TargetLowering &TLI,
6895 SDValue Chain = N->getOperand(0);
6896 SDValue Reg = N->getOperand(1);
6897
6898 MVT VT = TLI.getRegisterType(*DAG.getContext(), Reg.getValueType());
6899
6900 SDValue NewReg = DAG.getAnyExtOrTrunc(Reg, SDLoc(N), VT);
6901 SDValue NewProxy =
6902 DAG.getNode(NVPTXISD::ProxyReg, SDLoc(N), VT, {Chain, NewReg});
6903 SDValue Res = DAG.getAnyExtOrTrunc(NewProxy, SDLoc(N), N->getValueType(0));
6904
6905 Results.push_back(Res);
6906}
6907
6909 const NVPTXSubtarget &STI,
6911 assert(N->getValueType(0) == MVT::i128 &&
6912 "Custom lowering for atomic128 only supports i128");
6913
6915 SDLoc dl(N);
6916
6917 if (!STI.hasAtomSwap128()) {
6920 "Support for b128 atomics introduced in PTX ISA version 8.3 and "
6921 "requires target sm_90.",
6922 dl.getDebugLoc()));
6923
6924 Results.push_back(DAG.getUNDEF(MVT::i128));
6925 Results.push_back(AN->getOperand(0)); // Chain
6926 return;
6927 }
6928
6930 Ops.push_back(AN->getOperand(0)); // Chain
6931 Ops.push_back(AN->getOperand(1)); // Ptr
6932 for (const auto &Op : AN->ops().drop_front(2)) {
6933 // Low part
6934 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
6935 DAG.getIntPtrConstant(0, dl)));
6936 // High part
6937 Ops.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i64, Op,
6938 DAG.getIntPtrConstant(1, dl)));
6939 }
6940 unsigned Opcode = N->getOpcode() == ISD::ATOMIC_SWAP
6943 SDVTList Tys = DAG.getVTList(MVT::i64, MVT::i64, MVT::Other);
6944 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, MVT::i128,
6945 AN->getMemOperand());
6946 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i128,
6947 {Result.getValue(0), Result.getValue(1)}));
6948 Results.push_back(Result.getValue(2));
6949}
6950
6951void NVPTXTargetLowering::ReplaceNodeResults(
6953 switch (N->getOpcode()) {
6954 default:
6955 report_fatal_error("Unhandled custom legalization");
6956 case ISD::BITCAST:
6957 ReplaceBITCAST(N, DAG, Results);
6958 return;
6959 case ISD::LOAD:
6960 case ISD::MLOAD:
6961 replaceLoadVector(N, DAG, Results, STI);
6962 return;
6965 return;
6966 case ISD::CopyFromReg:
6968 return;
6969 case NVPTXISD::ProxyReg:
6970 replaceProxyReg(N, DAG, *this, Results);
6971 return;
6973 case ISD::ATOMIC_SWAP:
6974 replaceAtomicSwap128(N, DAG, STI, Results);
6975 return;
6976 }
6977}
6978
6981 Type *Ty = AI->getValOperand()->getType();
6982
6983 if (AI->isFloatingPointOperation()) {
6985 if (Ty->isHalfTy() && STI.getSmVersion() >= 70 &&
6986 STI.getPTXVersion() >= 63)
6988 if (Ty->isBFloatTy() && STI.getSmVersion() >= 90 &&
6989 STI.getPTXVersion() >= 78)
6991 if (Ty->isFloatTy())
6993 if (Ty->isDoubleTy() && STI.hasAtomAddF64())
6995 }
6997 }
6998
6999 assert(Ty->isIntegerTy() && "Ty should be integer at this point");
7000 const unsigned BitWidth = cast<IntegerType>(Ty)->getBitWidth();
7001
7002 switch (AI->getOperation()) {
7003 default:
7006 if (BitWidth == 128)
7008 [[fallthrough]];
7012 switch (BitWidth) {
7013 case 8:
7014 case 16:
7016 case 32:
7018 case 64:
7019 if (STI.hasAtomBitwise64())
7022 case 128:
7024 default:
7025 llvm_unreachable("unsupported width encountered");
7026 }
7033 switch (BitWidth) {
7034 case 8:
7035 case 16:
7037 case 32:
7039 case 64:
7040 if (STI.hasAtomMinMax64())
7043 case 128:
7045 default:
7046 llvm_unreachable("unsupported width encountered");
7047 }
7050 switch (BitWidth) {
7051 case 32:
7053 case 8:
7054 case 16:
7055 case 64:
7056 case 128:
7058 default:
7059 llvm_unreachable("unsupported width encountered");
7060 }
7061 }
7062
7064}
7065
7067 const Instruction *I) const {
7068 auto *CI = dyn_cast<AtomicCmpXchgInst>(I);
7069 // When CAS bitwidth is not supported on the hardware, the CAS is emulated
7070 // using a retry loop that uses a higher-bitwidth monotonic CAS. We enforce
7071 // the memory order using explicit fences around the retry loop.
7072 // The memory order of natively supported CAS operations can be enforced
7073 // by lowering to an atom.cas with the right memory synchronizing effect.
7074 // However, atom.cas only supports relaxed, acquire, release and acq_rel.
7075 // So we also use explicit fences for enforcing memory order for
7076 // seq_cast CAS with natively-supported bitwidths.
7077 return CI &&
7078 (cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() <
7079 STI.getMinCmpXchgSizeInBits() ||
7080 CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent);
7081}
7082
7084 const Instruction *I) const {
7085 auto *CI = dyn_cast<AtomicCmpXchgInst>(I);
7086 bool BitwidthSupportedAndIsSeqCst =
7087 CI && CI->getMergedOrdering() == AtomicOrdering::SequentiallyConsistent &&
7088 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth() >=
7089 STI.getMinCmpXchgSizeInBits();
7090 return BitwidthSupportedAndIsSeqCst ? AtomicOrdering::Acquire
7092}
7093
7095 Instruction *Inst,
7096 AtomicOrdering Ord) const {
7097 if (!isa<AtomicCmpXchgInst>(Inst))
7098 return TargetLoweringBase::emitLeadingFence(Builder, Inst, Ord);
7099
7100 // Specialize for cmpxchg
7101 // Emit a fence.sc leading fence for cmpxchg seq_cst which are not emulated
7102 SyncScope::ID SSID = cast<AtomicCmpXchgInst>(Inst)->getSyncScopeID();
7103 if (isReleaseOrStronger(Ord))
7104 return Builder.CreateFence(Ord == AtomicOrdering::SequentiallyConsistent
7105 ? Ord
7107 SSID);
7108
7109 return nullptr;
7110}
7111
7113 Instruction *Inst,
7114 AtomicOrdering Ord) const {
7115 // Specialize for cmpxchg
7116 if (!isa<AtomicCmpXchgInst>(Inst))
7117 return TargetLoweringBase::emitTrailingFence(Builder, Inst, Ord);
7118
7119 auto *CI = cast<AtomicCmpXchgInst>(Inst);
7120 auto CASWidth =
7121 cast<IntegerType>(CI->getCompareOperand()->getType())->getBitWidth();
7122 SyncScope::ID SSID = CI->getSyncScopeID();
7123 // Do not emit a trailing fence for cmpxchg seq_cst which are not emulated
7124 if (isAcquireOrStronger(Ord) &&
7126 CASWidth < STI.getMinCmpXchgSizeInBits()))
7127 return Builder.CreateFence(AtomicOrdering::Acquire, SSID);
7128
7129 return nullptr;
7130}
7131
7132// Rather than default to SINT when both UINT and SINT are custom, we only
7133// change the opcode when UINT is not legal and SINT is. UINT is preferred when
7134// both are custom since unsigned CVT instructions can lead to slightly better
7135// SASS code with fewer instructions.
7137 EVT ToVT) const {
7138 if (isOperationLegal(Op, ToVT))
7139 return Op;
7140 switch (Op) {
7141 case ISD::FP_TO_UINT:
7143 return ISD::FP_TO_SINT;
7144 break;
7148 break;
7149 case ISD::VP_FP_TO_UINT:
7150 if (isOperationLegal(ISD::VP_FP_TO_SINT, ToVT))
7151 return ISD::VP_FP_TO_SINT;
7152 break;
7153 default:
7154 break;
7155 }
7156 return Op;
7157}
7158
7159// Pin NVPTXTargetObjectFile's vtables to this file.
7161
7166
7168 const SelectionDAG &DAG, unsigned Depth) {
7169 SDValue A = Op.getOperand(0);
7170 SDValue B = Op.getOperand(1);
7171 ConstantSDNode *Selector = dyn_cast<ConstantSDNode>(Op.getOperand(2));
7172 unsigned Mode = Op.getConstantOperandVal(3);
7173
7174 if (!Selector)
7175 return;
7176
7177 KnownBits AKnown = DAG.computeKnownBits(A, Depth);
7178 KnownBits BKnown = DAG.computeKnownBits(B, Depth);
7179
7180 // {b, a} = {{b7, b6, b5, b4}, {b3, b2, b1, b0}}
7181 assert(AKnown.getBitWidth() == 32 && BKnown.getBitWidth() == 32 &&
7182 "PRMT must have i32 operands");
7183 assert(Known.getBitWidth() == 32 && "PRMT must have i32 result");
7184 KnownBits BitField = BKnown.concat(AKnown);
7185
7186 APInt SelectorVal = getPRMTSelector(Selector->getAPIntValue(), Mode);
7187 for (unsigned I : llvm::seq(4)) {
7188 APInt Sel = SelectorVal.extractBits(4, I * 4);
7189 unsigned Idx = Sel.getLoBits(3).getZExtValue();
7190 unsigned Sign = Sel.getHiBits(1).getZExtValue();
7191 KnownBits Byte = BitField.extractBits(8, Idx * 8);
7192 if (Sign)
7193 Byte = KnownBits::ashr(Byte, 8);
7194 Known.insertBits(Byte, I * 8);
7195 }
7196}
7197
7198static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known) {
7200
7201 // We can't do anything without knowing the sign bit.
7202 auto ExtType = LD->getConstantOperandVal(LD->getNumOperands() - 1);
7203 if (ExtType == ISD::SEXTLOAD)
7204 return;
7205
7206 // ExtLoading to vector types is weird and may not work well with known bits.
7207 auto DestVT = LD->getValueType(0);
7208 if (DestVT.isVector())
7209 return;
7210
7211 assert(Known.getBitWidth() == DestVT.getSizeInBits());
7212 auto ElementBitWidth = NVPTXDAGToDAGISel::getFromTypeWidthForLoad(LD);
7213 Known.Zero.setHighBits(Known.getBitWidth() - ElementBitWidth);
7214}
7215
7217 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
7218 const SelectionDAG &DAG, unsigned Depth) const {
7219 Known.resetAll();
7220
7221 switch (Op.getOpcode()) {
7222 case NVPTXISD::PRMT:
7223 computeKnownBitsForPRMT(Op, Known, DAG, Depth);
7224 break;
7225 case NVPTXISD::LoadV2:
7226 case NVPTXISD::LoadV4:
7227 case NVPTXISD::LoadV8:
7229 break;
7230 default:
7231 break;
7232 }
7233}
7234
7235static std::pair<APInt, APInt> getPRMTDemandedBits(const APInt &SelectorVal,
7236 const APInt &DemandedBits) {
7237 APInt DemandedLHS = APInt(32, 0);
7238 APInt DemandedRHS = APInt(32, 0);
7239
7240 for (unsigned I : llvm::seq(4)) {
7241 if (DemandedBits.extractBits(8, I * 8).isZero())
7242 continue;
7243
7244 APInt Sel = SelectorVal.extractBits(4, I * 4);
7245 unsigned Idx = Sel.getLoBits(3).getZExtValue();
7246 unsigned Sign = Sel.getHiBits(1).getZExtValue();
7247
7248 APInt &Src = Idx < 4 ? DemandedLHS : DemandedRHS;
7249 unsigned ByteStart = (Idx % 4) * 8;
7250 if (Sign)
7251 Src.setBit(ByteStart + 7);
7252 else
7253 Src.setBits(ByteStart, ByteStart + 8);
7254 }
7255
7256 return {DemandedLHS, DemandedRHS};
7257}
7258
7259// Replace undef with 0 as this is easier for other optimizations such as
7260// known bits.
7262 if (!Op)
7263 return SDValue();
7264 if (Op.isUndef())
7265 return DAG.getConstant(0, SDLoc(), MVT::i32);
7266 return Op;
7267}
7268
7270 const APInt &DemandedBits,
7271 SelectionDAG &DAG,
7272 const TargetLowering &TLI,
7273 unsigned Depth) {
7274 assert(PRMT.getOpcode() == NVPTXISD::PRMT);
7275 SDValue Op0 = PRMT.getOperand(0);
7276 SDValue Op1 = PRMT.getOperand(1);
7277 auto *SelectorConst = dyn_cast<ConstantSDNode>(PRMT.getOperand(2));
7278 if (!SelectorConst)
7279 return SDValue();
7280
7281 unsigned Mode = PRMT.getConstantOperandVal(3);
7282 const APInt Selector = getPRMTSelector(SelectorConst->getAPIntValue(), Mode);
7283
7284 // Try to simplify the PRMT to one of the inputs if the used bytes are all
7285 // from the same input in the correct order.
7286 const unsigned LeadingBytes = DemandedBits.countLeadingZeros() / 8;
7287 const unsigned SelBits = (4 - LeadingBytes) * 4;
7288 if (Selector.getLoBits(SelBits) == APInt(32, 0x3210).getLoBits(SelBits))
7289 return Op0;
7290 if (Selector.getLoBits(SelBits) == APInt(32, 0x7654).getLoBits(SelBits))
7291 return Op1;
7292
7293 auto [DemandedLHS, DemandedRHS] = getPRMTDemandedBits(Selector, DemandedBits);
7294
7295 // Attempt to avoid multi-use ops if we don't need anything from them.
7296 SDValue DemandedOp0 =
7297 TLI.SimplifyMultipleUseDemandedBits(Op0, DemandedLHS, DAG, Depth + 1);
7298 SDValue DemandedOp1 =
7299 TLI.SimplifyMultipleUseDemandedBits(Op1, DemandedRHS, DAG, Depth + 1);
7300
7301 DemandedOp0 = canonicalizePRMTInput(DemandedOp0, DAG);
7302 DemandedOp1 = canonicalizePRMTInput(DemandedOp1, DAG);
7303 if ((DemandedOp0 && DemandedOp0 != Op0) ||
7304 (DemandedOp1 && DemandedOp1 != Op1)) {
7305 Op0 = DemandedOp0 ? DemandedOp0 : Op0;
7306 Op1 = DemandedOp1 ? DemandedOp1 : Op1;
7307 return getPRMT(Op0, Op1, Selector.getZExtValue(), SDLoc(PRMT), DAG);
7308 }
7309
7310 return SDValue();
7311}
7312
7314 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
7315 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const {
7316 Known.resetAll();
7317
7318 switch (Op.getOpcode()) {
7319 case NVPTXISD::PRMT:
7321 *this, Depth)) {
7322 TLO.CombineTo(Op, Result);
7323 return true;
7324 }
7325 break;
7326 default:
7327 break;
7328 }
7329
7330 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth);
7331 return false;
7332}
return SDValue()
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
constexpr LLT S1
constexpr LLT F32
AMDGPU Register Bank Select
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file contains the declarations of entities that describe floating point environment and related ...
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first DebugLoc that has line number information, given a range of instructions.
Register Reg
Register const TargetRegisterInfo * TRI
#define T
NVPTX address space definition.
static SDValue reportInvalidTensormapReplaceUsage(SDValue Op, SelectionDAG &DAG, unsigned Val)
static bool shouldConvertToIndirectCall(const CallBase *CB, const GlobalAddressSDNode *Func)
static SDValue combineADDRSPACECAST(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< bool > sched4reg("nvptx-sched4reg", cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false))
static SDValue lowerTcgen05St(SDValue Op, SelectionDAG &DAG)
static SDValue PerformEXTRACTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static cl::opt< NVPTX::DivPrecisionLevel > UsePrecDivF32("nvptx-prec-divf32", cl::Hidden, cl::desc("NVPTX Specific: Override the precision of the lowering for f32 fdiv"), cl::values(clEnumValN(NVPTX::DivPrecisionLevel::Approx, "0", "Use div.approx"), clEnumValN(NVPTX::DivPrecisionLevel::Full, "1", "Use div.full"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754, "2", "Use IEEE Compliant F32 div.rnd if available (default)"), clEnumValN(NVPTX::DivPrecisionLevel::IEEE754_NoFTZ, "3", "Use IEEE Compliant F32 div.rnd if available, no FTZ")), cl::init(NVPTX::DivPrecisionLevel::IEEE754))
static bool isConstOne(const SDValue &Operand)
static cl::opt< unsigned > FMAContractLevelOpt("nvptx-fma-level", cl::Hidden, cl::desc("NVPTX Specific: FMA contraction (0: don't do it" " 1: do it 2: do it aggressively"), cl::init(2))
static bool IsPTXVectorType(MVT VT)
static SDValue PerformSELECTShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Transform patterns like: (select (ugt shift_amt, BitWidth-1), 0, (srl/shl x, shift_amt)) (select (ult...
static SDValue lowerLOADi1(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue lowerIntrinsicVoid(SDValue Op, SelectionDAG &DAG)
static MachinePointerInfo refinePtrAS(SDValue &Ptr, SelectionDAG &DAG, const DataLayout &DL, const TargetLowering &TL)
static SDValue lowerROT(SDValue Op, SelectionDAG &DAG)
static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL, LLVMContext &Ctx, CallingConv::ID CallConv, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > &Offsets, uint64_t StartingOffset=0)
ComputePTXValueVTs - For the given Type Ty, returns the set of primitive legal-ish MVTs that compose ...
static void ReplaceBITCAST(SDNode *Node, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static void replaceAtomicSwap128(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI, SmallVectorImpl< SDValue > &Results)
static unsigned getMinMax3Opcode(unsigned MinMax2Opcode)
Get 3-input version of a 2-input min/max opcode.
static SDValue lowerSTOREVector(SDValue Op, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue lowerLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static void replaceProxyReg(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI, SmallVectorImpl< SDValue > &Results)
static void ReplaceCopyFromReg_128(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue lowerCTLZCTPOP(SDValue Op, SelectionDAG &DAG)
static SDValue combineMADConstOne(SDValue X, SDValue Add, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue combinePRMT(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue combinePackingMovIntoStore(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned Front, unsigned Back)
Fold packing movs into a store.
static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, SmallVectorImpl< SDValue > &Results)
static SDValue getBuildVectorizedValue(unsigned N, const SDLoc &dl, SelectionDAG &DAG, T GetElement)
static SDValue getExtractVectorizedValue(SDValue V, unsigned I, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
static unsigned canMergeParamLoadStoresStartingAt(unsigned Idx, uint32_t AccessSize, const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment)
static EVT getVectorizedVT(EVT VT, unsigned N, LLVMContext &C)
static SDValue lowerIntrinsicWOChain(SDValue Op, SelectionDAG &DAG)
static SDValue PerformFMinMaxCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned PTXVersion, unsigned SmVersion)
PerformFMinMaxCombine - Combine (fmaxnum (fmaxnum a, b), c) into (fmaxnum3 a, b, c).
static SDValue combineMulWide(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue PerformFADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static std::optional< unsigned > getScalar3OpcodeForReduction(unsigned ReductionOpcode)
Get 3-input scalar reduction opcode.
static SDValue lowerIntrinsicWChain(SDValue Op, SelectionDAG &DAG)
static bool isConstZero(const SDValue &Operand)
static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG)
static SDValue LowerTcgen05MMADisableOutputLane(SDValue Op, SelectionDAG &DAG)
static bool IsMulWideOperandDemotable(SDValue Op, unsigned OptSize, OperandSignedness &S)
IsMulWideOperandDemotable - Checks if the provided DAG node is an operand that can be demoted to OptS...
static unsigned getTcgen05MMADisableOutputLane(unsigned IID)
static std::pair< APInt, APInt > getPRMTDemandedBits(const APInt &SelectorVal, const APInt &DemandedBits)
static APInt computePRMT(APInt A, APInt B, APInt Selector, unsigned Mode)
static ISD::NodeType getScalarOpcodeForReduction(unsigned ReductionOpcode)
static SDValue PerformREMCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
static SDValue lowerBSWAP(SDValue Op, SelectionDAG &DAG)
static SDValue lowerMSTORE(SDValue Op, SelectionDAG &DAG)
static SDValue PerformMULCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI)
static void computeKnownBitsForPRMT(const SDValue Op, KnownBits &Known, const SelectionDAG &DAG, unsigned Depth)
static SDValue combineUnpackingMovIntoLoad(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Fold unpacking movs into a load by increasing the number of return values.
static SDValue lowerTensormapReplaceElemtype(SDValue Op, SelectionDAG &DAG)
static SDValue LowerClusterLaunchControlQueryCancel(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > lowerTcgen05Ld(SDNode *N, SelectionDAG &DAG, bool HasOffset=false)
static SDValue lowerCvtRSIntrinsics(SDValue Op, SelectionDAG &DAG)
static std::optional< std::pair< SDValue, SDValue > > replaceLoadVector(SDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
replaceLoadVector - Convert vector loads into multi-output scalar loads.
static SDValue expandFSH64(SDValue A, SDValue B, SDValue ShiftAmount, SDLoc DL, unsigned Opcode, SelectionDAG &DAG)
static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS, unsigned OptSize, bool &IsSigned)
AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can be demoted to OptSize bits...
static std::pair< MemSDNode *, uint32_t > convertMLOADToLoadWithUsedBytesMask(MemSDNode *N, SelectionDAG &DAG, const NVPTXSubtarget &STI)
static SDValue TryMULWIDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply of M/2 bits that produces...
static SDValue lowerPrmtIntrinsic(SDValue Op, SelectionDAG &DAG)
static SDValue combineMulSelectConstOne(SDValue X, SDValue Select, EVT VT, SDLoc DL, TargetLowering::DAGCombinerInfo &DCI)
static SDValue buildTreeReduction(const SmallVector< SDValue > &Elements, EVT EltTy, ArrayRef< std::pair< unsigned, unsigned > > Ops, const SDLoc &DL, const SDNodeFlags Flags, SelectionDAG &DAG)
Reduces the elements using the scalar operations provided.
static SDValue combineProxyReg(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SmallVector< unsigned, 16 > VectorizePTXValueVTs(const SmallVectorImpl< EVT > &ValueVTs, const SmallVectorImpl< T > &Offsets, Align ParamAlignment, bool IsVAArg=false)
static SDValue getPRMT(SDValue A, SDValue B, SDValue Selector, SDLoc DL, SelectionDAG &DAG, unsigned Mode=NVPTX::PTXPrmtMode::NONE)
static SDValue matchMADConstOnePattern(SDValue Add)
static SDValue correctParamType(SDValue V, EVT ExpectedVT, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
static ISD::NodeType getExtOpcode(const ISD::ArgFlagsTy &Flags)
static cl::opt< bool > UsePrecSqrtF32("nvptx-prec-sqrtf32", cl::Hidden, cl::desc("NVPTX Specific: 0 use sqrt.approx, 1 use sqrt.rn."), cl::init(true))
static void computeKnownBitsForLoadV(const SDValue Op, KnownBits &Known)
static APInt getPRMTSelector(const APInt &Selector, unsigned Mode)
static EVT promoteScalarIntegerPTX(const EVT VT)
PromoteScalarIntegerPTX Used to make sure the arguments/returns are suitable for passing and promote ...
static SDValue simplifyDemandedBitsForPRMT(SDValue PRMT, const APInt &DemandedBits, SelectionDAG &DAG, const TargetLowering &TLI, unsigned Depth)
static SDValue lowerFREM(SDValue Op, SelectionDAG &DAG)
static SDValue canonicalizePRMTInput(SDValue Op, SelectionDAG &DAG)
static SDValue sinkProxyReg(SDValue R, SDValue Chain, TargetLowering::DAGCombinerInfo &DCI)
static SDValue lowerFSH(SDValue Op, SelectionDAG &DAG)
static SDValue lowerTensormapReplaceSwizzleMode(SDValue Op, SelectionDAG &DAG)
static SDValue PromoteBinOpToF32(SDNode *N, SelectionDAG &DAG)
static SDValue PerformSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, unsigned int SmVersion)
static std::optional< std::pair< unsigned int, MVT > > getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI, unsigned AddressSpace)
static cl::opt< bool > ForceMinByValParamAlign("nvptx-force-min-byval-param-align", cl::Hidden, cl::desc("NVPTX Specific: force 4-byte minimal alignment for byval" " params of device functions."), cl::init(false))
static cl::opt< bool > UseApproxLog2F32("nvptx-approx-log2f32", cl::desc("NVPTX Specific: whether to use lg2.approx for log2"), cl::init(false))
Whereas CUDA's implementation (see libdevice) uses ex2.approx for exp2(), it does NOT use lg2....
static SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG)
static SDValue combineLOAD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue combineSTORE(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const NVPTXSubtarget &STI)
static SDValue PerformSHLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, CodeGenOptLevel OptLevel)
PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
MachineInstr unsigned OpIdx
uint64_t High
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
Contains matchers for matching SelectionDAG nodes and values.
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This file describes how to lower LLVM code to machine code.
Value * RHS
Value * LHS
BinaryOperator * Mul
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition APFloat.h:1080
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt getLoBits(unsigned numBits) const
Compute an APInt containing numBits lowbits from this APInt.
Definition APInt.cpp:644
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition APInt.h:1392
LLVM_ABI APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
Definition APInt.cpp:639
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
Definition APInt.cpp:936
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition APInt.h:1331
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:436
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1131
LLVM_ABI APInt extractBits(unsigned numBits, unsigned bitPosition) const
Return an APInt with the extracted bits [bitPosition,bitPosition+numBits).
Definition APInt.cpp:482
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:433
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition APInt.h:1238
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:186
an instruction that atomically reads a memory location, combines it with another value,...
@ Add
*p = old + v
@ FAdd
*p = old + v
@ Min
*p = old <signed v ? old : v
@ Sub
*p = old - v
@ And
*p = old & v
@ Xor
*p = old ^ v
@ UIncWrap
Increment one up to a maximum value.
@ Max
*p = old >signed v ? old : v
@ UMin
*p = old <unsigned v ? old : v
@ UMax
*p = old >unsigned v ? old : v
@ UDecWrap
Decrement one until a minimum value or zero.
bool isFloatingPointOperation() const
BinOp getOperation() const
This is an SDNode representing atomic operations.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
FunctionType * getFunctionType() const
const APInt & getAPIntValue() const
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
LLVM_ABI TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
LLVM_ABI Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
Definition Function.cpp:640
Module * getParent()
Get the module that this global value is contained inside of...
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
This class is used to represent ISD::LOAD nodes.
MCSection * getDataSection() const
static constexpr unsigned NoRegister
Definition MCRegister.h:60
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition MCSection.h:517
StringRef getName() const
getName - Get the symbol name.
Definition MCSymbol.h:188
Machine Value Type.
static auto integer_fixedlen_vector_valuetypes()
SimpleValueType SimpleTy
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getScalarType() const
If this is a vector, return the element type, otherwise return this.
static auto fp_fixedlen_vector_valuetypes()
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Function & getFunction()
Return the LLVM function that this machine code represents.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
Align getAlign() const
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
static unsigned getFromTypeWidthForLoad(const MemSDNode *Mem)
bool hasTensormapReplaceSwizzleModeSupport(unsigned value) const
bool hasUsedBytesMaskPragma() const
bool hasTensormapReplaceElemtypeSupport(unsigned value) const
bool hasAtomSwap128() const
bool hasF32x2Instructions() const
bool has256BitVectorLoadStore(unsigned AS) const
AtomicOrdering atomicOperationOrderAfterFenceSplit(const Instruction *I) const override
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const NVPTXTargetMachine * nvTM
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallBase &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
NVPTXTargetLowering(const NVPTXTargetMachine &TM, const NVPTXSubtarget &STI)
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &, const SmallVectorImpl< ISD::OutputArg > &, std::optional< unsigned > FirstVAArg, const CallBase &CB, unsigned UniqueCallSite) const
unsigned getPreferredFPToIntOpcode(unsigned Op, EVT FromVT, EVT ToVT) const override
bool useF32FTZ(const MachineFunction &MF) const
SDValue LowerSTACKSAVE(SDValue Op, SelectionDAG &DAG) const
Align getFunctionArgumentAlignment(const Function *F, Type *Ty, unsigned Idx, const DataLayout &DL) const
SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, int &ExtraSteps, bool &UseOneConst, bool Reciprocal) const override
Hooks for building estimates in place of slower divisions and square roots.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &dl, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &dl, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
std::string getParamName(const Function *F, int Idx) const
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const override
Return the preferred vector type legalization action.
NVPTX::DivPrecisionLevel getDivF32Level(const MachineFunction &MF, const SDNode &N) const
bool shouldInsertFencesForAtomic(const Instruction *) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getFunctionParamOptimizedAlign(const Function *F, Type *ArgTy, const DataLayout &DL) const
getFunctionParamOptimizedAlign - since function arguments are passed via .param space,...
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Ctx, EVT VT) const override
Return the ValueType of the result of SETCC operations.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
Align getFunctionByValParamAlign(const Function *F, Type *ArgTy, Align InitialAlign, const DataLayout &DL) const
Helper for computing alignment of a device function byval parameter.
bool allowFMA(MachineFunction &MF, CodeGenOptLevel OptLevel) const
bool usePrecSqrtF32(const SDNode *N=nullptr) const
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MCSection * SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind, const TargetMachine &TM) const override
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
const APInt & getAsAPIntVal() const
Helper method returns the APInt value of a ConstantSDNode.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
unsigned getIROrder() const
Return the node ordering.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDVTList getVTList() const
const SDValue & getOperand(unsigned Num) const
bool isUndef() const
Returns true if the node type is UNDEF or POISON.
iterator_range< user_iterator > users()
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
SectionKind - This is a simple POD value that classifies the properties of a section.
Definition SectionKind.h:22
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue getFreeze(SDValue V)
Return a freeze using the SDLoc of the value operand.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getSymbolFunctionGlobalAddress(SDValue Op, Function **TargetFunction=nullptr)
Return a GlobalAddress of the function from the current module with name matching the given ExternalS...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
LLVM_ABI SDNode * MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs, ArrayRef< SDValue > Ops)
This mutates the specified node to have the specified return type, opcode, and operands.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build Select's if you just have operands and don't want to check...
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond, SDNodeFlags Flags=SDNodeFlags())
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
MachineFunction & getMachineFunction() const
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
const SDValue & setRoot(SDValue N)
Set the current root tag of the SelectionDAG.
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:140
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setMaxDivRemBitWidthSupported(unsigned SizeInBits)
Set the size in bits of the maximum div/rem the backend supports.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
void setOperationPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
Convenience method to set an operation to Promote and specify the type in a single call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth)
Tells the code generator which bitwidths to bypass.
virtual unsigned getNumRegisters(LLVMContext &Context, EVT VT, std::optional< MVT > RegisterVT=std::nullopt) const
Return the number of registers that this ValueType will eventually require.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
void setMinCmpXchgSizeInBits(unsigned SizeInBits)
Sets the minimum cmpxchg or ll/sc size supported by the backend.
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
Align getMinStackArgumentAlignment() const
Return the minimum stack alignment of an argument.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
SDValue SimplifyMultipleUseDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, SelectionDAG &DAG, unsigned Depth=0) const
More limited version of SimplifyDemandedBits that can be used to "lookthrough" ops that don't contrib...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
SDValue expandRoundInexactToOdd(EVT ResultVT, SDValue Op, const SDLoc &DL, SelectionDAG &DAG) const
Truncate Op to ResultVT.
SDValue expandFP_ROUND(SDNode *Node, SelectionDAG &DAG) const
Expand round(fp) to fp conversion.
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
TargetOptions Options
MCSymbol * getSymbol(const GlobalValue *GV) const
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fp-contract=xxx option.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetFrameLowering * getFrameLowering() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:197
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition Type.h:184
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
A raw_ostream that writes to an std::string.
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
LLVM_ABI APInt pow(const APInt &X, int64_t N)
Compute X^N for N>=0.
Definition APInt.cpp:3155
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition ISDOpcodes.h:41
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition ISDOpcodes.h:809
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ CTLZ_ZERO_UNDEF
Definition ISDOpcodes.h:782
@ POISON
POISON - A poison node.
Definition ISDOpcodes.h:231
@ MLOAD
Masked load and store - consecutive vector load and store operations with additional mask operand tha...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
Definition ISDOpcodes.h:270
@ BSWAP
Byte Swap and Counting operators.
Definition ISDOpcodes.h:773
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:289
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:259
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition ISDOpcodes.h:843
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
Definition ISDOpcodes.h:513
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
Definition ISDOpcodes.h:215
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition ISDOpcodes.h:870
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition ISDOpcodes.h:579
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
Definition ISDOpcodes.h:412
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition ISDOpcodes.h:746
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
Definition ISDOpcodes.h:275
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition ISDOpcodes.h:983
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
Definition ISDOpcodes.h:249
@ SIGN_EXTEND
Conversion operators.
Definition ISDOpcodes.h:834
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
Definition ISDOpcodes.h:347
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition ISDOpcodes.h:369
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition ISDOpcodes.h:786
@ UNDEF
UNDEF - An undefined node.
Definition ISDOpcodes.h:228
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
Definition ISDOpcodes.h:242
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
Definition ISDOpcodes.h:225
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition ISDOpcodes.h:343
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
Definition ISDOpcodes.h:703
@ SHL
Shift and rotation operations.
Definition ISDOpcodes.h:764
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition ISDOpcodes.h:644
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
Definition ISDOpcodes.h:609
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition ISDOpcodes.h:571
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
Definition ISDOpcodes.h:219
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition ISDOpcodes.h:840
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
Definition ISDOpcodes.h:801
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition ISDOpcodes.h:381
@ SMULO
Same for multiplication.
Definition ISDOpcodes.h:351
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition ISDOpcodes.h:878
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition ISDOpcodes.h:726
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition ISDOpcodes.h:968
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Definition ISDOpcodes.h:795
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:323
@ BF16_TO_FP
BF16_TO_FP, FP_TO_BF16 - These operators are used to perform promotions and truncation for bfloat16.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
Definition ISDOpcodes.h:110
@ STRICT_FP_TO_UINT
Definition ISDOpcodes.h:473
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:472
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition ISDOpcodes.h:916
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:738
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Definition ISDOpcodes.h:200
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:299
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
Definition ISDOpcodes.h:236
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition ISDOpcodes.h:560
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition ISDOpcodes.h:949
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition ISDOpcodes.h:987
@ VECREDUCE_FMINIMUM
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition ISDOpcodes.h:846
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
Definition ISDOpcodes.h:823
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition ISDOpcodes.h:529
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition ISDOpcodes.h:360
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition ISDOpcodes.h:333
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Definition ISDOpcodes.h:208
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
Definition ISDOpcodes.h:551
LLVM_ABI bool allOperandsUndef(const SDNode *N)
Return true if the node has at least one operand and all operands of the specified node are ISD::UNDE...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
LLVM_ABI StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ ATOMIC_CMP_SWAP_B128
These nodes are used to lower atomic instructions with i128 type.
bool isPackedVectorTy(EVT VT)
DivPrecisionLevel
Definition NVPTX.h:257
match_combine_or< CastInst_match< OpTy, TruncInst >, OpTy > m_TruncOrSelf(const OpTy &Op)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
NodeAddr< NodeBase * > Node
Definition RDFGraph.h:381
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
Definition Threading.h:280
@ Offset
Definition DWP.cpp:532
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
Definition STLExtras.h:829
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool shouldEmitPTXNoReturn(const Value *V, const TargetMachine &TM)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
MaybeAlign getAlign(const CallInst &I, unsigned Index)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1667
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs=nullptr, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition Analysis.cpp:119
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2530
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
uint64_t PowerOf2Ceil(uint64_t A)
Returns the power of two which is greater than or equal to the given value.
Definition MathExtras.h:385
bool isReleaseOrStronger(AtomicOrdering AO)
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:2016
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
unsigned promoteScalarArgumentSize(unsigned size)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
bool shouldPassAsArray(Type *Ty)
CodeGenOptLevel
Code generation optimization level.
Definition CodeGen.h:82
@ Default
-O2, -Os, -Oz
Definition CodeGen.h:85
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr unsigned BitWidth
bool isKernelFunction(const Function &F)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
Function * getMaybeBitcastedCallee(const CallBase *CB)
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
Definition Alignment.h:201
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition Sequence.h:305
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define N
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
Extended Value Type.
Definition ValueTypes.h:35
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
Definition ValueTypes.h:395
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition ValueTypes.h:137
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition ValueTypes.h:74
EVT changeTypeToInteger() const
Return the type converted to an equivalently sized integer or vector with integer element type.
Definition ValueTypes.h:121
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
Definition ValueTypes.h:284
bool bitsLT(EVT VT) const
Return true if this has less bits than VT.
Definition ValueTypes.h:300
ElementCount getVectorElementCount() const
Definition ValueTypes.h:350
bool is32BitVector() const
Return true if this is a 32-bit vector type.
Definition ValueTypes.h:197
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition ValueTypes.h:373
uint64_t getScalarSizeInBits() const
Definition ValueTypes.h:385
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition ValueTypes.h:316
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
Definition ValueTypes.h:381
bool isVector() const
Return true if this is a vector value type.
Definition ValueTypes.h:168
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition ValueTypes.h:323
bool bitsEq(EVT VT) const
Return true if this has the same number of bits as VT.
Definition ValueTypes.h:256
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition ValueTypes.h:328
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
Definition ValueTypes.h:113
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
Definition ValueTypes.h:157
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition ValueTypes.h:336
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition ValueTypes.h:152
static LLVM_ABI KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
KnownBits concat(const KnownBits &Lo) const
Concatenate the bits from Lo onto the bottom of *this.
Definition KnownBits.h:233
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
void resetAll()
Resets the known state of all bits.
Definition KnownBits.h:74
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition KnownBits.h:219
This class contains a discriminated union of information about pointers in memory operands,...
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
These are IR-level optimization flags that may be propagated to SDNodes.
bool hasAllowContract() const
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
SmallVector< ISD::OutputArg, 32 > Outs
Type * RetTy
Same as OrigRetTy, or partially legalized for soft float libcalls.
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...