LLVM 22.0.0git
NVPTXTargetTransformInfo.cpp
Go to the documentation of this file.
1//===-- NVPTXTargetTransformInfo.cpp - NVPTX specific TTI -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10#include "NVPTXUtilities.h"
11#include "llvm/ADT/STLExtras.h"
17#include "llvm/IR/Constants.h"
19#include "llvm/IR/Intrinsics.h"
20#include "llvm/IR/IntrinsicsNVPTX.h"
21#include "llvm/IR/Value.h"
26#include <optional>
27using namespace llvm;
28
29#define DEBUG_TYPE "NVPTXtti"
30
31// Whether the given intrinsic reads threadIdx.x/y/z.
32static bool readsThreadIndex(const IntrinsicInst *II) {
33 switch (II->getIntrinsicID()) {
34 default: return false;
35 case Intrinsic::nvvm_read_ptx_sreg_tid_x:
36 case Intrinsic::nvvm_read_ptx_sreg_tid_y:
37 case Intrinsic::nvvm_read_ptx_sreg_tid_z:
38 return true;
39 }
40}
41
42static bool readsLaneId(const IntrinsicInst *II) {
43 return II->getIntrinsicID() == Intrinsic::nvvm_read_ptx_sreg_laneid;
44}
45
46// Whether the given intrinsic is an atomic instruction in PTX.
47static bool isNVVMAtomic(const IntrinsicInst *II) {
48 switch (II->getIntrinsicID()) {
49 default:
50 return false;
51 case Intrinsic::nvvm_atomic_add_gen_f_cta:
52 case Intrinsic::nvvm_atomic_add_gen_f_sys:
53 case Intrinsic::nvvm_atomic_add_gen_i_cta:
54 case Intrinsic::nvvm_atomic_add_gen_i_sys:
55 case Intrinsic::nvvm_atomic_and_gen_i_cta:
56 case Intrinsic::nvvm_atomic_and_gen_i_sys:
57 case Intrinsic::nvvm_atomic_cas_gen_i_cta:
58 case Intrinsic::nvvm_atomic_cas_gen_i_sys:
59 case Intrinsic::nvvm_atomic_dec_gen_i_cta:
60 case Intrinsic::nvvm_atomic_dec_gen_i_sys:
61 case Intrinsic::nvvm_atomic_inc_gen_i_cta:
62 case Intrinsic::nvvm_atomic_inc_gen_i_sys:
63 case Intrinsic::nvvm_atomic_max_gen_i_cta:
64 case Intrinsic::nvvm_atomic_max_gen_i_sys:
65 case Intrinsic::nvvm_atomic_min_gen_i_cta:
66 case Intrinsic::nvvm_atomic_min_gen_i_sys:
67 case Intrinsic::nvvm_atomic_or_gen_i_cta:
68 case Intrinsic::nvvm_atomic_or_gen_i_sys:
69 case Intrinsic::nvvm_atomic_exch_gen_i_cta:
70 case Intrinsic::nvvm_atomic_exch_gen_i_sys:
71 case Intrinsic::nvvm_atomic_xor_gen_i_cta:
72 case Intrinsic::nvvm_atomic_xor_gen_i_sys:
73 return true;
74 }
75}
76
78 // Without inter-procedural analysis, we conservatively assume that arguments
79 // to __device__ functions are divergent.
80 if (const Argument *Arg = dyn_cast<Argument>(V))
81 return !isKernelFunction(*Arg->getParent());
82
83 if (const Instruction *I = dyn_cast<Instruction>(V)) {
84 // Without pointer analysis, we conservatively assume values loaded from
85 // generic or local address space are divergent.
86 if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
87 unsigned AS = LI->getPointerAddressSpace();
88 return AS == ADDRESS_SPACE_GENERIC || AS == ADDRESS_SPACE_LOCAL;
89 }
90 // Atomic instructions may cause divergence. Atomic instructions are
91 // executed sequentially across all threads in a warp. Therefore, an earlier
92 // executed thread may see different memory inputs than a later executed
93 // thread. For example, suppose *a = 0 initially.
94 //
95 // atom.global.add.s32 d, [a], 1
96 //
97 // returns 0 for the first thread that enters the critical region, and 1 for
98 // the second thread.
99 if (I->isAtomic())
100 return true;
102 // Instructions that read threadIdx are obviously divergent.
104 return true;
105 // Handle the NVPTX atomic intrinsics that cannot be represented as an
106 // atomic IR instruction.
107 if (isNVVMAtomic(II))
108 return true;
109 }
110 // Conservatively consider the return value of function calls as divergent.
111 // We could analyze callees with bodies more precisely using
112 // inter-procedural analysis.
113 if (isa<CallInst>(I))
114 return true;
115 }
116
117 return false;
118}
119
120// Convert NVVM intrinsics to target-generic LLVM code where possible.
122 IntrinsicInst *II) {
123 // Each NVVM intrinsic we can simplify can be replaced with one of:
124 //
125 // * an LLVM intrinsic,
126 // * an LLVM cast operation,
127 // * an LLVM binary operation, or
128 // * ad-hoc LLVM IR for the particular operation.
129
130 // Some transformations are only valid when the module's
131 // flush-denormals-to-zero (ftz) setting is true/false, whereas other
132 // transformations are valid regardless of the module's ftz setting.
133 enum FtzRequirementTy {
134 FTZ_Any, // Any ftz setting is ok.
135 FTZ_MustBeOn, // Transformation is valid only if ftz is on.
136 FTZ_MustBeOff, // Transformation is valid only if ftz is off.
137 };
138 // Classes of NVVM intrinsics that can't be replaced one-to-one with a
139 // target-generic intrinsic, cast op, or binary op but that we can nonetheless
140 // simplify.
141 enum SpecialCase {
142 SPC_Reciprocal,
143 SCP_FunnelShiftClamp,
144 };
145
146 // SimplifyAction is a poor-man's variant (plus an additional flag) that
147 // represents how to replace an NVVM intrinsic with target-generic LLVM IR.
148 struct SimplifyAction {
149 // Invariant: At most one of these Optionals has a value.
150 std::optional<Intrinsic::ID> IID;
151 std::optional<Instruction::CastOps> CastOp;
152 std::optional<Instruction::BinaryOps> BinaryOp;
153 std::optional<SpecialCase> Special;
154
155 FtzRequirementTy FtzRequirement = FTZ_Any;
156 // Denormal handling is guarded by different attributes depending on the
157 // type (denormal-fp-math vs denormal-fp-math-f32), take note of halfs.
158 bool IsHalfTy = false;
159
160 SimplifyAction() = default;
161
162 SimplifyAction(Intrinsic::ID IID, FtzRequirementTy FtzReq,
163 bool IsHalfTy = false)
164 : IID(IID), FtzRequirement(FtzReq), IsHalfTy(IsHalfTy) {}
165
166 // Cast operations don't have anything to do with FTZ, so we skip that
167 // argument.
168 SimplifyAction(Instruction::CastOps CastOp) : CastOp(CastOp) {}
169
170 SimplifyAction(Instruction::BinaryOps BinaryOp, FtzRequirementTy FtzReq)
171 : BinaryOp(BinaryOp), FtzRequirement(FtzReq) {}
172
173 SimplifyAction(SpecialCase Special, FtzRequirementTy FtzReq)
174 : Special(Special), FtzRequirement(FtzReq) {}
175 };
176
177 // Try to generate a SimplifyAction describing how to replace our
178 // IntrinsicInstr with target-generic LLVM IR.
179 const SimplifyAction Action = [II]() -> SimplifyAction {
180 switch (II->getIntrinsicID()) {
181 // NVVM intrinsics that map directly to LLVM intrinsics.
182 case Intrinsic::nvvm_ceil_d:
183 return {Intrinsic::ceil, FTZ_Any};
184 case Intrinsic::nvvm_ceil_f:
185 return {Intrinsic::ceil, FTZ_MustBeOff};
186 case Intrinsic::nvvm_ceil_ftz_f:
187 return {Intrinsic::ceil, FTZ_MustBeOn};
188 case Intrinsic::nvvm_floor_d:
189 return {Intrinsic::floor, FTZ_Any};
190 case Intrinsic::nvvm_floor_f:
191 return {Intrinsic::floor, FTZ_MustBeOff};
192 case Intrinsic::nvvm_floor_ftz_f:
193 return {Intrinsic::floor, FTZ_MustBeOn};
194 case Intrinsic::nvvm_fma_rn_d:
195 return {Intrinsic::fma, FTZ_Any};
196 case Intrinsic::nvvm_fma_rn_f:
197 return {Intrinsic::fma, FTZ_MustBeOff};
198 case Intrinsic::nvvm_fma_rn_ftz_f:
199 return {Intrinsic::fma, FTZ_MustBeOn};
200 case Intrinsic::nvvm_fma_rn_f16:
201 return {Intrinsic::fma, FTZ_MustBeOff, true};
202 case Intrinsic::nvvm_fma_rn_ftz_f16:
203 return {Intrinsic::fma, FTZ_MustBeOn, true};
204 case Intrinsic::nvvm_fma_rn_f16x2:
205 return {Intrinsic::fma, FTZ_MustBeOff, true};
206 case Intrinsic::nvvm_fma_rn_ftz_f16x2:
207 return {Intrinsic::fma, FTZ_MustBeOn, true};
208 case Intrinsic::nvvm_fma_rn_bf16:
209 return {Intrinsic::fma, FTZ_MustBeOff, true};
210 case Intrinsic::nvvm_fma_rn_ftz_bf16:
211 return {Intrinsic::fma, FTZ_MustBeOn, true};
212 case Intrinsic::nvvm_fma_rn_bf16x2:
213 return {Intrinsic::fma, FTZ_MustBeOff, true};
214 case Intrinsic::nvvm_fma_rn_ftz_bf16x2:
215 return {Intrinsic::fma, FTZ_MustBeOn, true};
216 case Intrinsic::nvvm_fmax_d:
217 return {Intrinsic::maxnum, FTZ_Any};
218 case Intrinsic::nvvm_fmax_f:
219 return {Intrinsic::maxnum, FTZ_MustBeOff};
220 case Intrinsic::nvvm_fmax_ftz_f:
221 return {Intrinsic::maxnum, FTZ_MustBeOn};
222 case Intrinsic::nvvm_fmax_nan_f:
223 return {Intrinsic::maximum, FTZ_MustBeOff};
224 case Intrinsic::nvvm_fmax_ftz_nan_f:
225 return {Intrinsic::maximum, FTZ_MustBeOn};
226 case Intrinsic::nvvm_fmax_f16:
227 return {Intrinsic::maxnum, FTZ_MustBeOff, true};
228 case Intrinsic::nvvm_fmax_ftz_f16:
229 return {Intrinsic::maxnum, FTZ_MustBeOn, true};
230 case Intrinsic::nvvm_fmax_f16x2:
231 return {Intrinsic::maxnum, FTZ_MustBeOff, true};
232 case Intrinsic::nvvm_fmax_ftz_f16x2:
233 return {Intrinsic::maxnum, FTZ_MustBeOn, true};
234 case Intrinsic::nvvm_fmax_nan_f16:
235 return {Intrinsic::maximum, FTZ_MustBeOff, true};
236 case Intrinsic::nvvm_fmax_ftz_nan_f16:
237 return {Intrinsic::maximum, FTZ_MustBeOn, true};
238 case Intrinsic::nvvm_fmax_nan_f16x2:
239 return {Intrinsic::maximum, FTZ_MustBeOff, true};
240 case Intrinsic::nvvm_fmax_ftz_nan_f16x2:
241 return {Intrinsic::maximum, FTZ_MustBeOn, true};
242 case Intrinsic::nvvm_fmin_d:
243 return {Intrinsic::minnum, FTZ_Any};
244 case Intrinsic::nvvm_fmin_f:
245 return {Intrinsic::minnum, FTZ_MustBeOff};
246 case Intrinsic::nvvm_fmin_ftz_f:
247 return {Intrinsic::minnum, FTZ_MustBeOn};
248 case Intrinsic::nvvm_fmin_nan_f:
249 return {Intrinsic::minimum, FTZ_MustBeOff};
250 case Intrinsic::nvvm_fmin_ftz_nan_f:
251 return {Intrinsic::minimum, FTZ_MustBeOn};
252 case Intrinsic::nvvm_fmin_f16:
253 return {Intrinsic::minnum, FTZ_MustBeOff, true};
254 case Intrinsic::nvvm_fmin_ftz_f16:
255 return {Intrinsic::minnum, FTZ_MustBeOn, true};
256 case Intrinsic::nvvm_fmin_f16x2:
257 return {Intrinsic::minnum, FTZ_MustBeOff, true};
258 case Intrinsic::nvvm_fmin_ftz_f16x2:
259 return {Intrinsic::minnum, FTZ_MustBeOn, true};
260 case Intrinsic::nvvm_fmin_nan_f16:
261 return {Intrinsic::minimum, FTZ_MustBeOff, true};
262 case Intrinsic::nvvm_fmin_ftz_nan_f16:
263 return {Intrinsic::minimum, FTZ_MustBeOn, true};
264 case Intrinsic::nvvm_fmin_nan_f16x2:
265 return {Intrinsic::minimum, FTZ_MustBeOff, true};
266 case Intrinsic::nvvm_fmin_ftz_nan_f16x2:
267 return {Intrinsic::minimum, FTZ_MustBeOn, true};
268 case Intrinsic::nvvm_sqrt_rn_d:
269 return {Intrinsic::sqrt, FTZ_Any};
270 case Intrinsic::nvvm_sqrt_f:
271 // nvvm_sqrt_f is a special case. For most intrinsics, foo_ftz_f is the
272 // ftz version, and foo_f is the non-ftz version. But nvvm_sqrt_f adopts
273 // the ftz-ness of the surrounding code. sqrt_rn_f and sqrt_rn_ftz_f are
274 // the versions with explicit ftz-ness.
275 return {Intrinsic::sqrt, FTZ_Any};
276 case Intrinsic::nvvm_trunc_d:
277 return {Intrinsic::trunc, FTZ_Any};
278 case Intrinsic::nvvm_trunc_f:
279 return {Intrinsic::trunc, FTZ_MustBeOff};
280 case Intrinsic::nvvm_trunc_ftz_f:
281 return {Intrinsic::trunc, FTZ_MustBeOn};
282
283 // NVVM intrinsics that map to LLVM cast operations.
284 // Note - we cannot map intrinsics like nvvm_d2ll_rz to LLVM's
285 // FPToSI, as NaN to int conversion with FPToSI is considered UB and is
286 // eliminated. NVVM conversion intrinsics are translated to PTX cvt
287 // instructions which define the outcome for NaN rather than leaving as UB.
288 // Therefore, translate NVVM intrinsics to sitofp/uitofp, but not to
289 // fptosi/fptoui.
290 case Intrinsic::nvvm_i2d_rn:
291 case Intrinsic::nvvm_i2f_rn:
292 case Intrinsic::nvvm_ll2d_rn:
293 case Intrinsic::nvvm_ll2f_rn:
294 return {Instruction::SIToFP};
295 case Intrinsic::nvvm_ui2d_rn:
296 case Intrinsic::nvvm_ui2f_rn:
297 case Intrinsic::nvvm_ull2d_rn:
298 case Intrinsic::nvvm_ull2f_rn:
299 return {Instruction::UIToFP};
300
301 // NVVM intrinsics that map to LLVM binary ops.
302 case Intrinsic::nvvm_div_rn_d:
303 return {Instruction::FDiv, FTZ_Any};
304
305 // The remainder of cases are NVVM intrinsics that map to LLVM idioms, but
306 // need special handling.
307 //
308 // We seem to be missing intrinsics for rcp.approx.{ftz.}f32, which is just
309 // as well.
310 case Intrinsic::nvvm_rcp_rn_d:
311 return {SPC_Reciprocal, FTZ_Any};
312
313 case Intrinsic::nvvm_fshl_clamp:
314 case Intrinsic::nvvm_fshr_clamp:
315 return {SCP_FunnelShiftClamp, FTZ_Any};
316
317 // We do not currently simplify intrinsics that give an approximate
318 // answer. These include:
319 //
320 // - nvvm_cos_approx_{f,ftz_f}
321 // - nvvm_ex2_approx_{d,f,ftz_f}
322 // - nvvm_lg2_approx_{d,f,ftz_f}
323 // - nvvm_sin_approx_{f,ftz_f}
324 // - nvvm_sqrt_approx_{f,ftz_f}
325 // - nvvm_rsqrt_approx_{d,f,ftz_f}
326 // - nvvm_div_approx_{ftz_d,ftz_f,f}
327 // - nvvm_rcp_approx_ftz_d
328 //
329 // Ideally we'd encode them as e.g. "fast call @llvm.cos", where "fast"
330 // means that fastmath is enabled in the intrinsic. Unfortunately only
331 // binary operators (currently) have a fastmath bit in SelectionDAG, so
332 // this information gets lost and we can't select on it.
333 //
334 // TODO: div and rcp are lowered to a binary op, so these we could in
335 // theory lower them to "fast fdiv".
336
337 default:
338 return {};
339 }
340 }();
341
342 // If Action.FtzRequirementTy is not satisfied by the module's ftz state, we
343 // can bail out now. (Notice that in the case that IID is not an NVVM
344 // intrinsic, we don't have to look up any module metadata, as
345 // FtzRequirementTy will be FTZ_Any.)
346 if (Action.FtzRequirement != FTZ_Any) {
347 // FIXME: Broken for f64
348 DenormalMode Mode = II->getFunction()->getDenormalMode(
349 Action.IsHalfTy ? APFloat::IEEEhalf() : APFloat::IEEEsingle());
350 bool FtzEnabled = Mode.Output == DenormalMode::PreserveSign;
351
352 if (FtzEnabled != (Action.FtzRequirement == FTZ_MustBeOn))
353 return nullptr;
354 }
355
356 // Simplify to target-generic intrinsic.
357 if (Action.IID) {
358 SmallVector<Value *, 4> Args(II->args());
359 // All the target-generic intrinsics currently of interest to us have one
360 // type argument, equal to that of the nvvm intrinsic's argument.
361 Type *Tys[] = {II->getArgOperand(0)->getType()};
362 return CallInst::Create(
363 Intrinsic::getOrInsertDeclaration(II->getModule(), *Action.IID, Tys),
364 Args);
365 }
366
367 // Simplify to target-generic binary op.
368 if (Action.BinaryOp)
369 return BinaryOperator::Create(*Action.BinaryOp, II->getArgOperand(0),
370 II->getArgOperand(1), II->getName());
371
372 // Simplify to target-generic cast op.
373 if (Action.CastOp)
374 return CastInst::Create(*Action.CastOp, II->getArgOperand(0), II->getType(),
375 II->getName());
376
377 // All that's left are the special cases.
378 if (!Action.Special)
379 return nullptr;
380
381 switch (*Action.Special) {
382 case SPC_Reciprocal:
383 // Simplify reciprocal.
385 Instruction::FDiv, ConstantFP::get(II->getArgOperand(0)->getType(), 1),
386 II->getArgOperand(0), II->getName());
387
388 case SCP_FunnelShiftClamp: {
389 // Canonicalize a clamping funnel shift to the generic llvm funnel shift
390 // when possible, as this is easier for llvm to optimize further.
391 if (const auto *ShiftConst = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
392 const bool IsLeft = II->getIntrinsicID() == Intrinsic::nvvm_fshl_clamp;
393 if (ShiftConst->getZExtValue() >= II->getType()->getIntegerBitWidth())
394 return IC.replaceInstUsesWith(*II, II->getArgOperand(IsLeft ? 1 : 0));
395
396 const unsigned FshIID = IsLeft ? Intrinsic::fshl : Intrinsic::fshr;
398 II->getModule(), FshIID, II->getType()),
399 SmallVector<Value *, 3>(II->args()));
400 }
401 return nullptr;
402 }
403 }
404 llvm_unreachable("All SpecialCase enumerators should be handled in switch.");
405}
406
407// Returns true/false when we know the answer, nullopt otherwise.
408static std::optional<bool> evaluateIsSpace(Intrinsic::ID IID, unsigned AS) {
411 return std::nullopt; // Got to check at run-time.
412 switch (IID) {
413 case Intrinsic::nvvm_isspacep_global:
415 case Intrinsic::nvvm_isspacep_local:
416 return AS == NVPTXAS::ADDRESS_SPACE_LOCAL;
417 case Intrinsic::nvvm_isspacep_shared:
418 // If shared cluster this can't be evaluated at compile time.
420 return std::nullopt;
422 case Intrinsic::nvvm_isspacep_shared_cluster:
425 case Intrinsic::nvvm_isspacep_const:
426 return AS == NVPTXAS::ADDRESS_SPACE_CONST;
427 default:
428 llvm_unreachable("Unexpected intrinsic");
429 }
430}
431
432// Returns an instruction pointer (may be nullptr if we do not know the answer).
433// Returns nullopt if `II` is not one of the `isspacep` intrinsics.
434//
435// TODO: If InferAddressSpaces were run early enough in the pipeline this could
436// be removed in favor of the constant folding that occurs there through
437// rewriteIntrinsicWithAddressSpace
438static std::optional<Instruction *>
440
441 switch (auto IID = II.getIntrinsicID()) {
442 case Intrinsic::nvvm_isspacep_global:
443 case Intrinsic::nvvm_isspacep_local:
444 case Intrinsic::nvvm_isspacep_shared:
445 case Intrinsic::nvvm_isspacep_shared_cluster:
446 case Intrinsic::nvvm_isspacep_const: {
447 Value *Op0 = II.getArgOperand(0);
448 unsigned AS = Op0->getType()->getPointerAddressSpace();
449 // Peek through ASC to generic AS.
450 // TODO: we could dig deeper through both ASCs and GEPs.
452 if (auto *ASCO = dyn_cast<AddrSpaceCastOperator>(Op0))
453 AS = ASCO->getOperand(0)->getType()->getPointerAddressSpace();
454
455 if (std::optional<bool> Answer = evaluateIsSpace(IID, AS))
456 return IC.replaceInstUsesWith(II,
457 ConstantInt::get(II.getType(), *Answer));
458 return nullptr; // Don't know the answer, got to check at run time.
459 }
460 default:
461 return std::nullopt;
462 }
463}
464
465std::optional<Instruction *>
467 if (std::optional<Instruction *> I = handleSpaceCheckIntrinsics(IC, II))
468 return *I;
470 return I;
471
472 return std::nullopt;
473}
474
479 if (const auto *CI = dyn_cast<CallInst>(U))
480 if (const auto *IA = dyn_cast<InlineAsm>(CI->getCalledOperand())) {
481 // Without this implementation getCallCost() would return the number
482 // of arguments+1 as the cost. Because the cost-model assumes it is a call
483 // since it is classified as a call in the IR. A better cost model would
484 // be to return the number of asm instructions embedded in the asm
485 // string.
486 StringRef AsmStr = IA->getAsmString();
487 const unsigned InstCount =
488 count_if(split(AsmStr, ';'), [](StringRef AsmInst) {
489 // Trim off scopes denoted by '{' and '}' as these can be ignored
490 AsmInst = AsmInst.trim().ltrim("{} \t\n\v\f\r");
491 // This is pretty coarse but does a reasonably good job of
492 // identifying things that look like instructions, possibly with a
493 // predicate ("@").
494 return !AsmInst.empty() &&
495 (AsmInst[0] == '@' || isAlpha(AsmInst[0]) ||
496 AsmInst.find(".pragma") != StringRef::npos);
497 });
498 return InstCount * TargetTransformInfo::TCC_Basic;
499 }
500
502}
503
505 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
507 ArrayRef<const Value *> Args, const Instruction *CxtI) const {
508 // Legalize the type.
509 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
510
511 int ISD = TLI->InstructionOpcodeToISD(Opcode);
512
513 switch (ISD) {
514 default:
515 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
516 Op2Info);
517 case ISD::ADD:
518 case ISD::MUL:
519 case ISD::XOR:
520 case ISD::OR:
521 case ISD::AND:
522 // The machine code (SASS) simulates an i64 with two i32. Therefore, we
523 // estimate that arithmetic operations on i64 are twice as expensive as
524 // those on types that can fit into one machine register.
525 if (LT.second.SimpleTy == MVT::i64)
526 return 2 * LT.first;
527 // Delegate other cases to the basic TTI.
528 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
529 Op2Info);
530 }
531}
532
535 OptimizationRemarkEmitter *ORE) const {
536 BaseT::getUnrollingPreferences(L, SE, UP, ORE);
537
538 // Enable partial unrolling and runtime unrolling, but reduce the
539 // threshold. This partially unrolls small loops which are often
540 // unrolled by the PTX to SASS compiler and unrolling earlier can be
541 // beneficial.
542 UP.Partial = UP.Runtime = true;
543 UP.PartialThreshold = UP.Threshold / 4;
544}
545
550
552 Intrinsic::ID IID) const {
553 switch (IID) {
554 case Intrinsic::nvvm_isspacep_const:
555 case Intrinsic::nvvm_isspacep_global:
556 case Intrinsic::nvvm_isspacep_local:
557 case Intrinsic::nvvm_isspacep_shared:
558 case Intrinsic::nvvm_isspacep_shared_cluster:
559 case Intrinsic::nvvm_prefetch_tensormap: {
560 OpIndexes.push_back(0);
561 return true;
562 }
563 }
564 return false;
565}
566
568 Value *OldV,
569 Value *NewV) const {
570 const Intrinsic::ID IID = II->getIntrinsicID();
571 switch (IID) {
572 case Intrinsic::nvvm_isspacep_const:
573 case Intrinsic::nvvm_isspacep_global:
574 case Intrinsic::nvvm_isspacep_local:
575 case Intrinsic::nvvm_isspacep_shared:
576 case Intrinsic::nvvm_isspacep_shared_cluster: {
577 const unsigned NewAS = NewV->getType()->getPointerAddressSpace();
578 if (const auto R = evaluateIsSpace(IID, NewAS))
579 return ConstantInt::get(II->getType(), *R);
580 return nullptr;
581 }
582 case Intrinsic::nvvm_prefetch_tensormap: {
583 IRBuilder<> Builder(II);
584 const unsigned NewAS = NewV->getType()->getPointerAddressSpace();
585 if (NewAS == NVPTXAS::ADDRESS_SPACE_CONST ||
587 return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_prefetch_tensormap,
588 NewV);
589 return nullptr;
590 }
591 }
592 return nullptr;
593}
594
595unsigned NVPTXTTIImpl::getLoadStoreVecRegBitWidth(unsigned AddrSpace) const {
596 // 256 bit loads/stores are currently only supported for global address space
597 if (ST->has256BitVectorLoadStore(AddrSpace))
598 return 256;
599 return 128;
600}
601
602unsigned NVPTXTTIImpl::getAssumedAddrSpace(const Value *V) const {
603 if (isa<AllocaInst>(V))
604 return ADDRESS_SPACE_LOCAL;
605
606 if (const Argument *Arg = dyn_cast<Argument>(V)) {
607 if (isKernelFunction(*Arg->getParent())) {
608 const NVPTXTargetMachine &TM =
609 static_cast<const NVPTXTargetMachine &>(getTLI()->getTargetMachine());
610 if (TM.getDrvInterface() == NVPTX::CUDA && !Arg->hasByValAttr())
612 } else {
613 // We assume that all device parameters that are passed byval will be
614 // placed in the local AS. Very simple cases will be updated after ISel to
615 // use the device param space where possible.
616 if (Arg->hasByValAttr())
617 return ADDRESS_SPACE_LOCAL;
618 }
619 }
620
621 return -1;
622}
623
625 const Function &F,
626 SmallVectorImpl<std::pair<StringRef, int64_t>> &LB) const {
627 if (const auto Val = getMaxClusterRank(F))
628 LB.push_back({"maxclusterrank", *Val});
629
630 const auto MaxNTID = getMaxNTID(F);
631 if (MaxNTID.size() > 0)
632 LB.push_back({"maxntidx", MaxNTID[0]});
633 if (MaxNTID.size() > 1)
634 LB.push_back({"maxntidy", MaxNTID[1]});
635 if (MaxNTID.size() > 2)
636 LB.push_back({"maxntidz", MaxNTID[2]});
637}
This file provides a helper that implements much of the TTI interface in terms of the target-independ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
This file provides the interface for the instcombine pass implementation.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
mir Rename Register Operands
NVPTX address space definition.
static std::optional< Instruction * > handleSpaceCheckIntrinsics(InstCombiner &IC, IntrinsicInst &II)
static bool isNVVMAtomic(const IntrinsicInst *II)
static Instruction * convertNvvmIntrinsicToLlvm(InstCombiner &IC, IntrinsicInst *II)
static bool readsLaneId(const IntrinsicInst *II)
static std::optional< bool > evaluateIsSpace(Intrinsic::ID IID, unsigned AS)
static bool readsThreadIndex(const IntrinsicInst *II)
This file a TargetTransformInfoImplBase conforming object specific to the NVPTX target machine.
uint64_t IntrinsicInst * II
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file contains some templates that are useful if you are working with the STL at all.
This file describes how to lower LLVM code to machine code.
This pass exposes codegen information to IR-level passes.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
std::pair< InstructionCost, MVT > getTypeLegalizationCost(Type *Ty) const
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2780
The core instruction combiner logic.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
A wrapper class for inspecting calls to intrinsic functions.
An instruction for reading from memory.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
Value * rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, Value *NewV) const override
InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const override
unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const override
std::optional< Instruction * > instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const override
InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind, TTI::OperandValueInfo Op1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Op2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr) const override
void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, TTI::UnrollingPreferences &UP, OptimizationRemarkEmitter *ORE) const override
void getPeelingPreferences(Loop *L, ScalarEvolution &SE, TTI::PeelingPreferences &PP) const override
bool isSourceOfDivergence(const Value *V) const override
bool collectFlatAddressOperands(SmallVectorImpl< int > &OpIndexes, Intrinsic::ID IID) const override
unsigned getAssumedAddrSpace(const Value *V) const override
void collectKernelLaunchBounds(const Function &F, SmallVectorImpl< std::pair< StringRef, int64_t > > &LB) const override
The optimization diagnostic interface.
The main scalar evolution driver.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:143
StringRef ltrim(char Char) const
Return string with consecutive Char characters starting from the the left removed.
Definition StringRef.h:792
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:293
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition StringRef.h:816
static constexpr size_t npos
Definition StringRef.h:57
virtual InstructionCost getInstructionCost(const User *U, ArrayRef< const Value * > Operands, TTI::TargetCostKind CostKind) const
TargetCostKind
The kind of cost model.
@ TCC_Basic
The cost of a typical 'add' instruction.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
ISD namespace - This namespace contains an enum which represents all of the SelectionDAG node types a...
Definition ISDOpcodes.h:24
@ ADD
Simple integer binary arithmetic operators.
Definition ISDOpcodes.h:259
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition ISDOpcodes.h:730
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
This is an optimization pass for GlobalISel generic memory operations.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
bool isAlpha(char C)
Checks if character C is a valid letter as classified by "C" locale.
iterator_range< SplittingIterator > split(StringRef Str, StringRef Separator)
Split the specified string over a separator and return a range-compatible iterable over its partition...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
std::optional< unsigned > getMaxClusterRank(const Function &F)
SmallVector< unsigned, 3 > getMaxNTID(const Function &F)
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
Definition STLExtras.h:1941
bool isKernelFunction(const Function &F)
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
Definition APFloat.cpp:266
static LLVM_ABI const fltSemantics & IEEEhalf() LLVM_READNONE
Definition APFloat.cpp:264
Represent subnormal handling kind for floating point instruction inputs and outputs.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
Parameters that control the generic loop unrolling transformation.
unsigned Threshold
The cost threshold for the unrolled loop.
unsigned PartialThreshold
The cost threshold for the unrolled loop, like Threshold, but used for partial/runtime unrolling (set...
bool Runtime
Allow runtime unrolling (unrolling of loops to expand the size of the loop body even when the number ...
bool Partial
Allow partial unrolling (unrolling of loops to expand the size of the loop body, not only to eliminat...