LLVM 23.0.0git
AMDGPUCodeGenPrepare.cpp
Go to the documentation of this file.
1//===-- AMDGPUCodeGenPrepare.cpp ------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This pass does misc. AMDGPU optimizations on IR before instruction
11/// selection.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPU.h"
16#include "AMDGPUTargetMachine.h"
18#include "llvm/ADT/SetVector.h"
26#include "llvm/IR/Dominators.h"
27#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/InstVisitor.h"
29#include "llvm/IR/IntrinsicsAMDGPU.h"
31#include "llvm/IR/ValueHandle.h"
33#include "llvm/Pass.h"
39
40#define DEBUG_TYPE "amdgpu-codegenprepare"
41
42using namespace llvm;
43using namespace llvm::PatternMatch;
44
45namespace {
46
48 "amdgpu-codegenprepare-widen-constant-loads",
49 cl::desc("Widen sub-dword constant address space loads in AMDGPUCodeGenPrepare"),
51 cl::init(false));
52
53static cl::opt<bool>
54 BreakLargePHIs("amdgpu-codegenprepare-break-large-phis",
55 cl::desc("Break large PHI nodes for DAGISel"),
57
58static cl::opt<bool>
59 ForceBreakLargePHIs("amdgpu-codegenprepare-force-break-large-phis",
60 cl::desc("For testing purposes, always break large "
61 "PHIs even if it isn't profitable."),
63
64static cl::opt<unsigned> BreakLargePHIsThreshold(
65 "amdgpu-codegenprepare-break-large-phis-threshold",
66 cl::desc("Minimum type size in bits for breaking large PHI nodes"),
68
69static cl::opt<bool> UseMul24Intrin(
70 "amdgpu-codegenprepare-mul24",
71 cl::desc("Introduce mul24 intrinsics in AMDGPUCodeGenPrepare"),
73 cl::init(true));
74
75// Legalize 64-bit division by using the generic IR expansion.
76static cl::opt<bool> ExpandDiv64InIR(
77 "amdgpu-codegenprepare-expand-div64",
78 cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
80 cl::init(false));
81
82// Leave all division operations as they are. This supersedes ExpandDiv64InIR
83// and is used for testing the legalizer.
84static cl::opt<bool> DisableIDivExpand(
85 "amdgpu-codegenprepare-disable-idiv-expansion",
86 cl::desc("Prevent expanding integer division in AMDGPUCodeGenPrepare"),
88 cl::init(false));
89
90// Disable processing of fdiv so we can better test the backend implementations.
91static cl::opt<bool> DisableFDivExpand(
92 "amdgpu-codegenprepare-disable-fdiv-expansion",
93 cl::desc("Prevent expanding floating point division in AMDGPUCodeGenPrepare"),
95 cl::init(false));
96
97class AMDGPUCodeGenPrepareImpl
98 : public InstVisitor<AMDGPUCodeGenPrepareImpl, bool> {
99public:
100 Function &F;
101 const GCNSubtarget &ST;
102 const AMDGPUTargetMachine &TM;
103 const TargetLibraryInfo *TLI;
104 const UniformityInfo &UA;
105 const DataLayout &DL;
106 SimplifyQuery SQ;
107 const bool HasFP32DenormalFlush;
108 bool FlowChanged = false;
109 mutable Function *SqrtF32 = nullptr;
110 mutable Function *LdexpF32 = nullptr;
111 mutable SmallVector<WeakVH> DeadVals;
112
113 DenseMap<const PHINode *, bool> BreakPhiNodesCache;
114
115 AMDGPUCodeGenPrepareImpl(Function &F, const AMDGPUTargetMachine &TM,
116 const TargetLibraryInfo *TLI, AssumptionCache *AC,
117 const DominatorTree *DT, const UniformityInfo &UA)
118 : F(F), ST(TM.getSubtarget<GCNSubtarget>(F)), TM(TM), TLI(TLI), UA(UA),
119 DL(F.getDataLayout()), SQ(DL, TLI, DT, AC),
120 HasFP32DenormalFlush(SIModeRegisterDefaults(F, ST).FP32Denormals ==
122
123 Function *getSqrtF32() const {
124 if (SqrtF32)
125 return SqrtF32;
126
127 LLVMContext &Ctx = F.getContext();
129 F.getParent(), Intrinsic::amdgcn_sqrt, {Type::getFloatTy(Ctx)});
130 return SqrtF32;
131 }
132
133 Function *getLdexpF32() const {
134 if (LdexpF32)
135 return LdexpF32;
136
137 LLVMContext &Ctx = F.getContext();
139 F.getParent(), Intrinsic::ldexp,
140 {Type::getFloatTy(Ctx), Type::getInt32Ty(Ctx)});
141 return LdexpF32;
142 }
143
144 bool canBreakPHINode(const PHINode &I);
145
146 /// Return true if \p T is a legal scalar floating point type.
147 bool isLegalFloatingTy(const Type *T) const;
148
149 /// Wrapper to pass all the arguments to computeKnownFPClass
151 const Instruction *CtxI) const {
152 return llvm::computeKnownFPClass(V, Interested,
153 SQ.getWithInstruction(CtxI));
154 }
155
156 bool canIgnoreDenormalInput(const Value *V, const Instruction *CtxI) const {
157 return HasFP32DenormalFlush ||
159 }
160
161 /// \returns The minimum number of bits needed to store the value of \Op as an
162 /// unsigned integer. Truncating to this size and then zero-extending to
163 /// the original will not change the value.
164 unsigned numBitsUnsigned(Value *Op, const Instruction *CtxI) const;
165
166 /// \returns The minimum number of bits needed to store the value of \Op as a
167 /// signed integer. Truncating to this size and then sign-extending to
168 /// the original size will not change the value.
169 unsigned numBitsSigned(Value *Op, const Instruction *CtxI) const;
170
171 /// Replace mul instructions with llvm.amdgcn.mul.u24 or llvm.amdgcn.mul.s24.
172 /// SelectionDAG has an issue where an and asserting the bits are known
173 bool replaceMulWithMul24(BinaryOperator &I) const;
174
175 /// Perform same function as equivalently named function in DAGCombiner. Since
176 /// we expand some divisions here, we need to perform this before obscuring.
177 bool foldBinOpIntoSelect(BinaryOperator &I) const;
178
179 bool divHasSpecialOptimization(BinaryOperator &I,
180 Value *Num, Value *Den) const;
181 unsigned getDivNumBits(BinaryOperator &I, Value *Num, Value *Den,
182 unsigned MaxDivBits, bool Signed) const;
183
184 /// Expands 24 bit div or rem.
185 Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
186 Value *Num, Value *Den,
187 bool IsDiv, bool IsSigned) const;
188
189 Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
190 Value *Num, Value *Den, unsigned NumBits,
191 bool IsDiv, bool IsSigned) const;
192
193 /// Expands 32 bit div or rem.
194 Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
195 Value *Num, Value *Den) const;
196
197 Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
198 Value *Num, Value *Den) const;
199 void expandDivRem64(BinaryOperator &I) const;
200
201 /// Widen a scalar load.
202 ///
203 /// \details \p Widen scalar load for uniform, small type loads from constant
204 // memory / to a full 32-bits and then truncate the input to allow a scalar
205 // load instead of a vector load.
206 //
207 /// \returns True.
208
209 bool canWidenScalarExtLoad(LoadInst &I) const;
210
211 Value *matchFractPatImpl(Value &V, const APFloat &C) const;
212 Value *matchFractPatNanAvoidant(Value &V);
213 Value *applyFractPat(IRBuilder<> &Builder, Value *FractArg);
214
215 bool canOptimizeWithRsq(FastMathFlags DivFMF, FastMathFlags SqrtFMF) const;
216
217 Value *optimizeWithRsq(IRBuilder<> &Builder, Value *Num, Value *Den,
218 FastMathFlags DivFMF, FastMathFlags SqrtFMF,
219 const Instruction *CtxI) const;
220
221 Value *optimizeWithRcp(IRBuilder<> &Builder, Value *Num, Value *Den,
222 FastMathFlags FMF, const Instruction *CtxI) const;
223 Value *optimizeWithFDivFast(IRBuilder<> &Builder, Value *Num, Value *Den,
224 float ReqdAccuracy) const;
225
226 Value *visitFDivElement(IRBuilder<> &Builder, Value *Num, Value *Den,
227 FastMathFlags DivFMF, FastMathFlags SqrtFMF,
228 Value *RsqOp, const Instruction *FDiv,
229 float ReqdAccuracy) const;
230
231 std::pair<Value *, Value *> getFrexpResults(IRBuilder<> &Builder,
232 Value *Src) const;
233
234 Value *emitRcpIEEE1ULP(IRBuilder<> &Builder, Value *Src,
235 bool IsNegative) const;
236 Value *emitFrexpDiv(IRBuilder<> &Builder, Value *LHS, Value *RHS,
237 FastMathFlags FMF) const;
238 Value *emitSqrtIEEE2ULP(IRBuilder<> &Builder, Value *Src,
239 FastMathFlags FMF) const;
240 Value *emitRsqF64(IRBuilder<> &Builder, Value *X, FastMathFlags SqrtFMF,
241 FastMathFlags DivFMF, const Instruction *CtxI,
242 bool IsNegative) const;
243
244 CallInst *createWorkitemIdX(IRBuilder<> &B) const;
245 void replaceWithWorkitemIdX(Instruction &I) const;
246 void replaceWithMaskedWorkitemIdX(Instruction &I, unsigned WaveSize) const;
247 bool tryReplaceWithWorkitemId(Instruction &I, unsigned Wave) const;
248
249 bool tryNarrowMathIfNoOverflow(Instruction *I);
250
251public:
252 bool visitFDiv(BinaryOperator &I);
253
254 bool visitInstruction(Instruction &I) { return false; }
255 bool visitBinaryOperator(BinaryOperator &I);
256 bool visitLoadInst(LoadInst &I);
257 bool visitSelectInst(SelectInst &I);
258 bool visitPHINode(PHINode &I);
259 bool visitAddrSpaceCastInst(AddrSpaceCastInst &I);
260
261 bool visitIntrinsicInst(IntrinsicInst &I);
262 bool visitFMinLike(IntrinsicInst &I);
263 bool visitSqrt(IntrinsicInst &I);
264 bool visitLog(FPMathOperator &Log, Intrinsic::ID IID);
265 bool visitMbcntLo(IntrinsicInst &I) const;
266 bool visitMbcntHi(IntrinsicInst &I) const;
267 bool visitVectorReduceAdd(IntrinsicInst &I);
268 bool visitSaturatingAdd(IntrinsicInst &I);
269 bool run();
270};
271
272class AMDGPUCodeGenPrepare : public FunctionPass {
273public:
274 static char ID;
275 AMDGPUCodeGenPrepare() : FunctionPass(ID) {}
276 void getAnalysisUsage(AnalysisUsage &AU) const override {
280
281 // FIXME: Division expansion needs to preserve the dominator tree.
282 if (!ExpandDiv64InIR)
283 AU.setPreservesAll();
284 }
285 bool runOnFunction(Function &F) override;
286 StringRef getPassName() const override { return "AMDGPU IR optimizations"; }
287};
288
289} // end anonymous namespace
290
291bool AMDGPUCodeGenPrepareImpl::run() {
292 BreakPhiNodesCache.clear();
293 bool MadeChange = false;
294
295 // Need to use make_early_inc_range because integer division expansion is
296 // handled by Transform/Utils, and it can delete instructions such as the
297 // terminator of the BB.
298 for (BasicBlock &BB : reverse(F)) {
299 for (Instruction &I : make_early_inc_range(reverse(BB))) {
300 if (!isInstructionTriviallyDead(&I, TLI))
301 MadeChange |= visit(I);
302 }
303 }
304
305 while (!DeadVals.empty()) {
306 if (auto *I = dyn_cast_or_null<Instruction>(DeadVals.pop_back_val()))
308 }
309
310 return MadeChange;
311}
312
313bool AMDGPUCodeGenPrepareImpl::isLegalFloatingTy(const Type *Ty) const {
314 return Ty->isFloatTy() || Ty->isDoubleTy() ||
315 (Ty->isHalfTy() && ST.has16BitInsts());
316}
317
318bool AMDGPUCodeGenPrepareImpl::canWidenScalarExtLoad(LoadInst &I) const {
319 Type *Ty = I.getType();
320 int TySize = DL.getTypeSizeInBits(Ty);
321 Align Alignment = DL.getValueOrABITypeAlignment(I.getAlign(), Ty);
322
323 return I.isSimple() && TySize < 32 && Alignment >= 4 && UA.isUniformAtDef(&I);
324}
325
326unsigned
327AMDGPUCodeGenPrepareImpl::numBitsUnsigned(Value *Op,
328 const Instruction *CtxI) const {
329 return computeKnownBits(Op, SQ.getWithInstruction(CtxI)).countMaxActiveBits();
330}
331
332unsigned
333AMDGPUCodeGenPrepareImpl::numBitsSigned(Value *Op,
334 const Instruction *CtxI) const {
335 return ComputeMaxSignificantBits(Op, SQ.DL, SQ.AC, CtxI, SQ.DT);
336}
337
338static void extractValues(IRBuilder<> &Builder,
339 SmallVectorImpl<Value *> &Values, Value *V) {
340 auto *VT = dyn_cast<FixedVectorType>(V->getType());
341 if (!VT) {
342 Values.push_back(V);
343 return;
344 }
345
346 for (int I = 0, E = VT->getNumElements(); I != E; ++I)
347 Values.push_back(Builder.CreateExtractElement(V, I));
348}
349
351 Type *Ty,
352 SmallVectorImpl<Value *> &Values) {
353 if (!Ty->isVectorTy()) {
354 assert(Values.size() == 1);
355 return Values[0];
356 }
357
358 Value *NewVal = PoisonValue::get(Ty);
359 for (int I = 0, E = Values.size(); I != E; ++I)
360 NewVal = Builder.CreateInsertElement(NewVal, Values[I], I);
361
362 return NewVal;
363}
364
365bool AMDGPUCodeGenPrepareImpl::replaceMulWithMul24(BinaryOperator &I) const {
366 if (I.getOpcode() != Instruction::Mul)
367 return false;
368
369 Type *Ty = I.getType();
370 unsigned Size = Ty->getScalarSizeInBits();
371 if (Size <= 16 && ST.has16BitInsts())
372 return false;
373
374 // Prefer scalar if this could be s_mul_i32
375 if (UA.isUniformAtDef(&I))
376 return false;
377
378 Value *LHS = I.getOperand(0);
379 Value *RHS = I.getOperand(1);
380 IRBuilder<> Builder(&I);
381 Builder.SetCurrentDebugLocation(I.getDebugLoc());
382
383 unsigned LHSBits = 0, RHSBits = 0;
384 bool IsSigned = false;
385
386 if (ST.hasMulU24() && (LHSBits = numBitsUnsigned(LHS, &I)) <= 24 &&
387 (RHSBits = numBitsUnsigned(RHS, &I)) <= 24) {
388 IsSigned = false;
389
390 } else if (ST.hasMulI24() && (LHSBits = numBitsSigned(LHS, &I)) <= 24 &&
391 (RHSBits = numBitsSigned(RHS, &I)) <= 24) {
392 IsSigned = true;
393
394 } else
395 return false;
396
397 SmallVector<Value *, 4> LHSVals;
398 SmallVector<Value *, 4> RHSVals;
399 SmallVector<Value *, 4> ResultVals;
400 extractValues(Builder, LHSVals, LHS);
401 extractValues(Builder, RHSVals, RHS);
402
403 IntegerType *I32Ty = Builder.getInt32Ty();
404 IntegerType *IntrinTy = Size > 32 ? Builder.getInt64Ty() : I32Ty;
405 Type *DstTy = LHSVals[0]->getType();
406
407 for (int I = 0, E = LHSVals.size(); I != E; ++I) {
408 Value *LHS = IsSigned ? Builder.CreateSExtOrTrunc(LHSVals[I], I32Ty)
409 : Builder.CreateZExtOrTrunc(LHSVals[I], I32Ty);
410 Value *RHS = IsSigned ? Builder.CreateSExtOrTrunc(RHSVals[I], I32Ty)
411 : Builder.CreateZExtOrTrunc(RHSVals[I], I32Ty);
413 IsSigned ? Intrinsic::amdgcn_mul_i24 : Intrinsic::amdgcn_mul_u24;
414 Value *Result = Builder.CreateIntrinsic(ID, {IntrinTy}, {LHS, RHS});
415 Result = IsSigned ? Builder.CreateSExtOrTrunc(Result, DstTy)
416 : Builder.CreateZExtOrTrunc(Result, DstTy);
417 ResultVals.push_back(Result);
418 }
419
420 Value *NewVal = insertValues(Builder, Ty, ResultVals);
421 NewVal->takeName(&I);
422 I.replaceAllUsesWith(NewVal);
423 DeadVals.push_back(&I);
424
425 return true;
426}
427
428// Find a select instruction, which may have been casted. This is mostly to deal
429// with cases where i16 selects were promoted here to i32.
431 Cast = nullptr;
432 if (SelectInst *Sel = dyn_cast<SelectInst>(V))
433 return Sel;
434
435 if ((Cast = dyn_cast<CastInst>(V))) {
436 if (SelectInst *Sel = dyn_cast<SelectInst>(Cast->getOperand(0)))
437 return Sel;
438 }
439
440 return nullptr;
441}
442
443bool AMDGPUCodeGenPrepareImpl::foldBinOpIntoSelect(BinaryOperator &BO) const {
444 // Don't do this unless the old select is going away. We want to eliminate the
445 // binary operator, not replace a binop with a select.
446 int SelOpNo = 0;
447
448 CastInst *CastOp;
449
450 // TODO: Should probably try to handle some cases with multiple
451 // users. Duplicating the select may be profitable for division.
452 SelectInst *Sel = findSelectThroughCast(BO.getOperand(0), CastOp);
453 if (!Sel || !Sel->hasOneUse()) {
454 SelOpNo = 1;
455 Sel = findSelectThroughCast(BO.getOperand(1), CastOp);
456 }
457
458 if (!Sel || !Sel->hasOneUse())
459 return false;
460
463 Constant *CBO = dyn_cast<Constant>(BO.getOperand(SelOpNo ^ 1));
464 if (!CBO || !CT || !CF)
465 return false;
466
467 if (CastOp) {
468 if (!CastOp->hasOneUse())
469 return false;
470 CT = ConstantFoldCastOperand(CastOp->getOpcode(), CT, BO.getType(), DL);
471 CF = ConstantFoldCastOperand(CastOp->getOpcode(), CF, BO.getType(), DL);
472 }
473
474 // TODO: Handle special 0/-1 cases DAG combine does, although we only really
475 // need to handle divisions here.
476 Constant *FoldedT =
477 SelOpNo ? ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CT, DL)
478 : ConstantFoldBinaryOpOperands(BO.getOpcode(), CT, CBO, DL);
479 if (!FoldedT || isa<ConstantExpr>(FoldedT))
480 return false;
481
482 Constant *FoldedF =
483 SelOpNo ? ConstantFoldBinaryOpOperands(BO.getOpcode(), CBO, CF, DL)
484 : ConstantFoldBinaryOpOperands(BO.getOpcode(), CF, CBO, DL);
485 if (!FoldedF || isa<ConstantExpr>(FoldedF))
486 return false;
487
488 IRBuilder<> Builder(&BO);
489 Builder.SetCurrentDebugLocation(BO.getDebugLoc());
490 if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(&BO))
491 Builder.setFastMathFlags(FPOp->getFastMathFlags());
492
493 Value *NewSelect = Builder.CreateSelect(Sel->getCondition(),
494 FoldedT, FoldedF);
495 NewSelect->takeName(&BO);
496 BO.replaceAllUsesWith(NewSelect);
497 DeadVals.push_back(&BO);
498 if (CastOp)
499 DeadVals.push_back(CastOp);
500 DeadVals.push_back(Sel);
501 return true;
502}
503
504std::pair<Value *, Value *>
505AMDGPUCodeGenPrepareImpl::getFrexpResults(IRBuilder<> &Builder,
506 Value *Src) const {
507 Type *Ty = Src->getType();
508 Value *Frexp = Builder.CreateIntrinsic(Intrinsic::frexp,
509 {Ty, Builder.getInt32Ty()}, Src);
510 Value *FrexpMant = Builder.CreateExtractValue(Frexp, {0});
511
512 // Bypass the bug workaround for the exponent result since it doesn't matter.
513 // TODO: Does the bug workaround even really need to consider the exponent
514 // result? It's unspecified by the spec.
515
516 Value *FrexpExp =
517 ST.hasFractBug()
518 ? Builder.CreateIntrinsic(Intrinsic::amdgcn_frexp_exp,
519 {Builder.getInt32Ty(), Ty}, Src)
520 : Builder.CreateExtractValue(Frexp, {1});
521 return {FrexpMant, FrexpExp};
522}
523
524/// Emit an expansion of 1.0 / Src good for 1ulp that supports denormals.
525Value *AMDGPUCodeGenPrepareImpl::emitRcpIEEE1ULP(IRBuilder<> &Builder,
526 Value *Src,
527 bool IsNegative) const {
528 // Same as for 1.0, but expand the sign out of the constant.
529 // -1.0 / x -> rcp (fneg x)
530 if (IsNegative)
531 Src = Builder.CreateFNeg(Src);
532
533 // The rcp instruction doesn't support denormals, so scale the input
534 // out of the denormal range and convert at the end.
535 //
536 // Expand as 2^-n * (1.0 / (x * 2^n))
537
538 // TODO: Skip scaling if input is known never denormal and the input
539 // range won't underflow to denormal. The hard part is knowing the
540 // result. We need a range check, the result could be denormal for
541 // 0x1p+126 < den <= 0x1p+127.
542 auto [FrexpMant, FrexpExp] = getFrexpResults(Builder, Src);
543 Value *ScaleFactor = Builder.CreateNeg(FrexpExp);
544 Value *Rcp = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, FrexpMant);
545 return Builder.CreateCall(getLdexpF32(), {Rcp, ScaleFactor});
546}
547
548/// Emit a 2ulp expansion for fdiv by using frexp for input scaling.
549Value *AMDGPUCodeGenPrepareImpl::emitFrexpDiv(IRBuilder<> &Builder, Value *LHS,
550 Value *RHS,
551 FastMathFlags FMF) const {
552 // If we have have to work around the fract/frexp bug, we're worse off than
553 // using the fdiv.fast expansion. The full safe expansion is faster if we have
554 // fast FMA.
555 if (HasFP32DenormalFlush && ST.hasFractBug() && !ST.hasFastFMAF32() &&
556 (!FMF.noNaNs() || !FMF.noInfs()))
557 return nullptr;
558
559 // We're scaling the LHS to avoid a denormal input, and scale the denominator
560 // to avoid large values underflowing the result.
561 auto [FrexpMantRHS, FrexpExpRHS] = getFrexpResults(Builder, RHS);
562
563 Value *Rcp =
564 Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, FrexpMantRHS);
565
566 auto [FrexpMantLHS, FrexpExpLHS] = getFrexpResults(Builder, LHS);
567 Value *Mul = Builder.CreateFMul(FrexpMantLHS, Rcp);
568
569 // We multiplied by 2^N/2^M, so we need to multiply by 2^(N-M) to scale the
570 // result.
571 Value *ExpDiff = Builder.CreateSub(FrexpExpLHS, FrexpExpRHS);
572 return Builder.CreateCall(getLdexpF32(), {Mul, ExpDiff});
573}
574
575/// Emit a sqrt that handles denormals and is accurate to 2ulp.
576Value *AMDGPUCodeGenPrepareImpl::emitSqrtIEEE2ULP(IRBuilder<> &Builder,
577 Value *Src,
578 FastMathFlags FMF) const {
579 Type *Ty = Src->getType();
580 APFloat SmallestNormal =
582 Value *NeedScale =
583 Builder.CreateFCmpOLT(Src, ConstantFP::get(Ty, SmallestNormal));
584
585 ConstantInt *Zero = Builder.getInt32(0);
586 Value *InputScaleFactor =
587 Builder.CreateSelect(NeedScale, Builder.getInt32(32), Zero);
588
589 Value *Scaled = Builder.CreateCall(getLdexpF32(), {Src, InputScaleFactor});
590
591 Value *Sqrt = Builder.CreateCall(getSqrtF32(), Scaled);
592
593 Value *OutputScaleFactor =
594 Builder.CreateSelect(NeedScale, Builder.getInt32(-16), Zero);
595 return Builder.CreateCall(getLdexpF32(), {Sqrt, OutputScaleFactor});
596}
597
598/// Emit an expansion of 1.0 / sqrt(Src) good for 1ulp that supports denormals.
599static Value *emitRsqIEEE1ULP(IRBuilder<> &Builder, Value *Src,
600 bool IsNegative) {
601 // bool need_scale = x < 0x1p-126f;
602 // float input_scale = need_scale ? 0x1.0p+24f : 1.0f;
603 // float output_scale = need_scale ? 0x1.0p+12f : 1.0f;
604 // rsq(x * input_scale) * output_scale;
605
606 Type *Ty = Src->getType();
607 APFloat SmallestNormal =
608 APFloat::getSmallestNormalized(Ty->getFltSemantics());
609 Value *NeedScale =
610 Builder.CreateFCmpOLT(Src, ConstantFP::get(Ty, SmallestNormal));
611 Constant *One = ConstantFP::get(Ty, 1.0);
612 Constant *InputScale = ConstantFP::get(Ty, 0x1.0p+24);
613 Constant *OutputScale =
614 ConstantFP::get(Ty, IsNegative ? -0x1.0p+12 : 0x1.0p+12);
615
616 Value *InputScaleFactor = Builder.CreateSelect(NeedScale, InputScale, One);
617
618 Value *ScaledInput = Builder.CreateFMul(Src, InputScaleFactor);
619 Value *Rsq = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, ScaledInput);
620 Value *OutputScaleFactor = Builder.CreateSelect(
621 NeedScale, OutputScale, IsNegative ? ConstantFP::get(Ty, -1.0) : One);
622
623 return Builder.CreateFMul(Rsq, OutputScaleFactor);
624}
625
626/// Emit inverse sqrt expansion for f64 with a correction sequence on top of
627/// v_rsq_f64. This should give a 1ulp result.
628Value *AMDGPUCodeGenPrepareImpl::emitRsqF64(IRBuilder<> &Builder, Value *X,
629 FastMathFlags SqrtFMF,
630 FastMathFlags DivFMF,
631 const Instruction *CtxI,
632 bool IsNegative) const {
633 // rsq(x):
634 // double y0 = BUILTIN_AMDGPU_RSQRT_F64(x);
635 // double e = MATH_MAD(-y0 * (x == PINF_F64 || x == 0.0 ? y0 : x), y0, 1.0);
636 // return MATH_MAD(y0*e, MATH_MAD(e, 0.375, 0.5), y0);
637 //
638 // -rsq(x):
639 // double y0 = BUILTIN_AMDGPU_RSQRT_F64(x);
640 // double e = MATH_MAD(-y0 * (x == PINF_F64 || x == 0.0 ? y0 : x), y0, 1.0);
641 // return MATH_MAD(-y0*e, MATH_MAD(e, 0.375, 0.5), -y0);
642 //
643 // The rsq instruction handles the special cases correctly. We need to check
644 // for the edge case conditions to ensure the special case propagates through
645 // the later instructions.
646
647 Value *Y0 = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, X);
648
649 // Try to elide the edge case check.
650 //
651 // Fast math flags imply:
652 // sqrt ninf => !isinf(x)
653 // fdiv ninf => x != 0, !isinf(x)
654 bool MaybePosInf = !SqrtFMF.noInfs() && !DivFMF.noInfs();
655 bool MaybeZero = !DivFMF.noInfs();
656
657 DenormalMode DenormMode;
658 FPClassTest Interested = fcNone;
659 if (MaybePosInf)
660 Interested = fcPosInf;
661 if (MaybeZero)
662 Interested |= fcZero;
663
664 if (Interested != fcNone) {
665 KnownFPClass KnownSrc = computeKnownFPClass(X, Interested, CtxI);
666 if (KnownSrc.isKnownNeverPosInfinity())
667 MaybePosInf = false;
668
669 DenormMode = F.getDenormalMode(X->getType()->getFltSemantics());
670 if (KnownSrc.isKnownNeverLogicalZero(DenormMode))
671 MaybeZero = false;
672 }
673
674 Value *SpecialOrRsq = X;
675 if (MaybeZero || MaybePosInf) {
676 Value *Cond;
677 if (MaybePosInf && MaybeZero) {
678 if (DenormMode.Input != DenormalMode::DenormalModeKind::Dynamic) {
679 FPClassTest TestMask = fcPosInf | fcZero;
680 if (DenormMode.inputsAreZero())
681 TestMask |= fcSubnormal;
682
683 Cond = Builder.createIsFPClass(X, TestMask);
684 } else {
685 // Avoid using llvm.is.fpclass for dynamic denormal mode, since it
686 // doesn't respect the floating-point environment.
687 Value *IsZero =
688 Builder.CreateFCmpOEQ(X, ConstantFP::getZero(X->getType()));
689 Value *IsInf =
690 Builder.CreateFCmpOEQ(X, ConstantFP::getInfinity(X->getType()));
691 Cond = Builder.CreateOr(IsZero, IsInf);
692 }
693 } else if (MaybeZero) {
694 Cond = Builder.CreateFCmpOEQ(X, ConstantFP::getZero(X->getType()));
695 } else {
696 Cond = Builder.CreateFCmpOEQ(X, ConstantFP::getInfinity(X->getType()));
697 }
698
699 SpecialOrRsq = Builder.CreateSelect(Cond, Y0, X);
700 }
701
702 Value *NegY0 = Builder.CreateFNeg(Y0);
703 Value *NegXY0 = Builder.CreateFMul(SpecialOrRsq, NegY0);
704
705 // Could be fmuladd, but isFMAFasterThanFMulAndFAdd is always true for f64.
706 Value *E = Builder.CreateFMA(NegXY0, Y0, ConstantFP::get(X->getType(), 1.0));
707
708 Value *Y0E = Builder.CreateFMul(E, IsNegative ? NegY0 : Y0);
709
710 Value *EFMA = Builder.CreateFMA(E, ConstantFP::get(X->getType(), 0.375),
711 ConstantFP::get(X->getType(), 0.5));
712
713 return Builder.CreateFMA(Y0E, EFMA, IsNegative ? NegY0 : Y0);
714}
715
716bool AMDGPUCodeGenPrepareImpl::canOptimizeWithRsq(FastMathFlags DivFMF,
717 FastMathFlags SqrtFMF) const {
718 // The rsqrt contraction increases accuracy from ~2ulp to ~1ulp for f32 and
719 // f64.
720 return DivFMF.allowContract() && SqrtFMF.allowContract();
721}
722
723Value *AMDGPUCodeGenPrepareImpl::optimizeWithRsq(
724 IRBuilder<> &Builder, Value *Num, Value *Den, const FastMathFlags DivFMF,
725 const FastMathFlags SqrtFMF, const Instruction *CtxI) const {
726 // The rsqrt contraction increases accuracy from ~2ulp to ~1ulp.
727 assert(DivFMF.allowContract() && SqrtFMF.allowContract());
728
729 // rsq_f16 is accurate to 0.51 ulp.
730 // rsq_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
731 // rsq_f64 is never accurate.
732 const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num);
733 if (!CLHS)
734 return nullptr;
735
736 bool IsNegative = false;
737
738 // TODO: Handle other numerator values with arcp.
739 if (CLHS->isExactlyValue(1.0) || (IsNegative = CLHS->isExactlyValue(-1.0))) {
740 // Add in the sqrt flags.
741 IRBuilder<>::FastMathFlagGuard Guard(Builder);
742 Builder.setFastMathFlags(DivFMF | SqrtFMF);
743
744 if (Den->getType()->isFloatTy()) {
745 if ((DivFMF.approxFunc() && SqrtFMF.approxFunc()) ||
746 canIgnoreDenormalInput(Den, CtxI)) {
747 Value *Result =
748 Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rsq, Den);
749 // -1.0 / sqrt(x) -> fneg(rsq(x))
750 return IsNegative ? Builder.CreateFNeg(Result) : Result;
751 }
752
753 return emitRsqIEEE1ULP(Builder, Den, IsNegative);
754 }
755
756 if (Den->getType()->isDoubleTy())
757 return emitRsqF64(Builder, Den, SqrtFMF, DivFMF, CtxI, IsNegative);
758 }
759
760 return nullptr;
761}
762
763// Optimize fdiv with rcp:
764//
765// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
766// allowed with afn.
767//
768// a/b -> a*rcp(b) when arcp is allowed, and we only need provide ULP 1.0
769Value *
770AMDGPUCodeGenPrepareImpl::optimizeWithRcp(IRBuilder<> &Builder, Value *Num,
771 Value *Den, FastMathFlags FMF,
772 const Instruction *CtxI) const {
773 // rcp_f16 is accurate to 0.51 ulp.
774 // rcp_f32 is accurate for !fpmath >= 1.0ulp and denormals are flushed.
775 // rcp_f64 is never accurate.
776 assert(Den->getType()->isFloatTy());
777
778 if (const ConstantFP *CLHS = dyn_cast<ConstantFP>(Num)) {
779 bool IsNegative = false;
780 if (CLHS->isExactlyValue(1.0) ||
781 (IsNegative = CLHS->isExactlyValue(-1.0))) {
782 Value *Src = Den;
783
784 if (HasFP32DenormalFlush || FMF.approxFunc()) {
785 // -1.0 / x -> 1.0 / fneg(x)
786 if (IsNegative)
787 Src = Builder.CreateFNeg(Src);
788
789 // v_rcp_f32 and v_rsq_f32 do not support denormals, and according to
790 // the CI documentation has a worst case error of 1 ulp.
791 // OpenCL requires <= 2.5 ulp for 1.0 / x, so it should always be OK
792 // to use it as long as we aren't trying to use denormals.
793 //
794 // v_rcp_f16 and v_rsq_f16 DO support denormals.
795
796 // NOTE: v_sqrt and v_rcp will be combined to v_rsq later. So we don't
797 // insert rsq intrinsic here.
798
799 // 1.0 / x -> rcp(x)
800 return Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, Src);
801 }
802
803 // TODO: If the input isn't denormal, and we know the input exponent isn't
804 // big enough to introduce a denormal we can avoid the scaling.
805 return emitRcpIEEE1ULP(Builder, Src, IsNegative);
806 }
807 }
808
809 if (FMF.allowReciprocal()) {
810 // x / y -> x * (1.0 / y)
811
812 // TODO: Could avoid denormal scaling and use raw rcp if we knew the output
813 // will never underflow.
814 if (HasFP32DenormalFlush || FMF.approxFunc()) {
815 Value *Recip = Builder.CreateUnaryIntrinsic(Intrinsic::amdgcn_rcp, Den);
816 return Builder.CreateFMul(Num, Recip);
817 }
818
819 Value *Recip = emitRcpIEEE1ULP(Builder, Den, false);
820 return Builder.CreateFMul(Num, Recip);
821 }
822
823 return nullptr;
824}
825
826// optimize with fdiv.fast:
827//
828// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
829//
830// 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
831//
832// NOTE: optimizeWithRcp should be tried first because rcp is the preference.
833Value *AMDGPUCodeGenPrepareImpl::optimizeWithFDivFast(
834 IRBuilder<> &Builder, Value *Num, Value *Den, float ReqdAccuracy) const {
835 // fdiv.fast can achieve 2.5 ULP accuracy.
836 if (ReqdAccuracy < 2.5f)
837 return nullptr;
838
839 // Only have fdiv.fast for f32.
840 assert(Den->getType()->isFloatTy());
841
842 bool NumIsOne = false;
843 if (const ConstantFP *CNum = dyn_cast<ConstantFP>(Num)) {
844 if (CNum->isExactlyValue(+1.0) || CNum->isExactlyValue(-1.0))
845 NumIsOne = true;
846 }
847
848 // fdiv does not support denormals. But 1.0/x is always fine to use it.
849 //
850 // TODO: This works for any value with a specific known exponent range, don't
851 // just limit to constant 1.
852 if (!HasFP32DenormalFlush && !NumIsOne)
853 return nullptr;
854
855 return Builder.CreateIntrinsic(Intrinsic::amdgcn_fdiv_fast, {Num, Den});
856}
857
858Value *AMDGPUCodeGenPrepareImpl::visitFDivElement(
859 IRBuilder<> &Builder, Value *Num, Value *Den, FastMathFlags DivFMF,
860 FastMathFlags SqrtFMF, Value *RsqOp, const Instruction *FDivInst,
861 float ReqdDivAccuracy) const {
862 if (RsqOp) {
863 Value *Rsq =
864 optimizeWithRsq(Builder, Num, RsqOp, DivFMF, SqrtFMF, FDivInst);
865 if (Rsq)
866 return Rsq;
867 }
868
869 if (!Num->getType()->isFloatTy())
870 return nullptr;
871
872 Value *Rcp = optimizeWithRcp(Builder, Num, Den, DivFMF, FDivInst);
873 if (Rcp)
874 return Rcp;
875
876 // In the basic case fdiv_fast has the same instruction count as the frexp div
877 // expansion. Slightly prefer fdiv_fast since it ends in an fmul that can
878 // potentially be fused into a user. Also, materialization of the constants
879 // can be reused for multiple instances.
880 Value *FDivFast = optimizeWithFDivFast(Builder, Num, Den, ReqdDivAccuracy);
881 if (FDivFast)
882 return FDivFast;
883
884 return emitFrexpDiv(Builder, Num, Den, DivFMF);
885}
886
887// Optimizations is performed based on fpmath, fast math flags as well as
888// denormals to optimize fdiv with either rcp or fdiv.fast.
889//
890// With rcp:
891// 1/x -> rcp(x) when rcp is sufficiently accurate or inaccurate rcp is
892// allowed with afn.
893//
894// a/b -> a*rcp(b) when inaccurate rcp is allowed with afn.
895//
896// With fdiv.fast:
897// a/b -> fdiv.fast(a, b) when !fpmath >= 2.5ulp with denormals flushed.
898//
899// 1/x -> fdiv.fast(1,x) when !fpmath >= 2.5ulp.
900//
901// NOTE: rcp is the preference in cases that both are legal.
902bool AMDGPUCodeGenPrepareImpl::visitFDiv(BinaryOperator &FDiv) {
903 if (DisableFDivExpand)
904 return false;
905
906 Type *Ty = FDiv.getType()->getScalarType();
907 const bool IsFloat = Ty->isFloatTy();
908 if (!IsFloat && !Ty->isDoubleTy())
909 return false;
910
911 // The f64 rcp/rsq approximations are pretty inaccurate. We can do an
912 // expansion around them in codegen. f16 is good enough to always use.
913
914 const FPMathOperator *FPOp = cast<const FPMathOperator>(&FDiv);
915 const FastMathFlags DivFMF = FPOp->getFastMathFlags();
916 const float ReqdAccuracy = FPOp->getFPAccuracy();
917
918 FastMathFlags SqrtFMF;
919
920 Value *Num = FDiv.getOperand(0);
921 Value *Den = FDiv.getOperand(1);
922
923 Value *RsqOp = nullptr;
924 auto *DenII = dyn_cast<IntrinsicInst>(Den);
925 if (DenII && DenII->getIntrinsicID() == Intrinsic::sqrt &&
926 DenII->hasOneUse()) {
927 const auto *SqrtOp = cast<FPMathOperator>(DenII);
928 SqrtFMF = SqrtOp->getFastMathFlags();
929 if (canOptimizeWithRsq(DivFMF, SqrtFMF))
930 RsqOp = SqrtOp->getOperand(0);
931 }
932
933 // rcp path not yet implemented for f64.
934 if (!IsFloat && !RsqOp)
935 return false;
936
937 // Inaccurate rcp is allowed with afn.
938 //
939 // Defer to codegen to handle this.
940 //
941 // TODO: Decide on an interpretation for interactions between afn + arcp +
942 // !fpmath, and make it consistent between here and codegen. For now, defer
943 // expansion of afn to codegen. The current interpretation is so aggressive we
944 // don't need any pre-consideration here when we have better information. A
945 // more conservative interpretation could use handling here.
946 const bool AllowInaccurateRcp = DivFMF.approxFunc();
947 if (!RsqOp && AllowInaccurateRcp)
948 return false;
949
950 // Defer the correct implementations to codegen.
951 if (IsFloat && ReqdAccuracy < 1.0f)
952 return false;
953
954 IRBuilder<> Builder(FDiv.getParent(), std::next(FDiv.getIterator()));
955 Builder.setFastMathFlags(DivFMF);
956 Builder.SetCurrentDebugLocation(FDiv.getDebugLoc());
957
958 SmallVector<Value *, 4> NumVals;
959 SmallVector<Value *, 4> DenVals;
960 SmallVector<Value *, 4> RsqDenVals;
961 extractValues(Builder, NumVals, Num);
962 extractValues(Builder, DenVals, Den);
963
964 if (RsqOp)
965 extractValues(Builder, RsqDenVals, RsqOp);
966
967 SmallVector<Value *, 4> ResultVals(NumVals.size());
968 for (int I = 0, E = NumVals.size(); I != E; ++I) {
969 Value *NumElt = NumVals[I];
970 Value *DenElt = DenVals[I];
971 Value *RsqDenElt = RsqOp ? RsqDenVals[I] : nullptr;
972
973 Value *NewElt =
974 visitFDivElement(Builder, NumElt, DenElt, DivFMF, SqrtFMF, RsqDenElt,
975 cast<Instruction>(FPOp), ReqdAccuracy);
976 if (!NewElt) {
977 // Keep the original, but scalarized.
978
979 // This has the unfortunate side effect of sometimes scalarizing when
980 // we're not going to do anything.
981 NewElt = Builder.CreateFDiv(NumElt, DenElt);
982 if (auto *NewEltInst = dyn_cast<Instruction>(NewElt))
983 NewEltInst->copyMetadata(FDiv);
984 }
985
986 ResultVals[I] = NewElt;
987 }
988
989 Value *NewVal = insertValues(Builder, FDiv.getType(), ResultVals);
990
991 if (NewVal) {
992 FDiv.replaceAllUsesWith(NewVal);
993 NewVal->takeName(&FDiv);
994 DeadVals.push_back(&FDiv);
995 }
996
997 return true;
998}
999
1000static std::pair<Value*, Value*> getMul64(IRBuilder<> &Builder,
1001 Value *LHS, Value *RHS) {
1002 Type *I32Ty = Builder.getInt32Ty();
1003 Type *I64Ty = Builder.getInt64Ty();
1004
1005 Value *LHS_EXT64 = Builder.CreateZExt(LHS, I64Ty);
1006 Value *RHS_EXT64 = Builder.CreateZExt(RHS, I64Ty);
1007 Value *MUL64 = Builder.CreateMul(LHS_EXT64, RHS_EXT64);
1008 Value *Lo = Builder.CreateTrunc(MUL64, I32Ty);
1009 Value *Hi = Builder.CreateLShr(MUL64, Builder.getInt64(32));
1010 Hi = Builder.CreateTrunc(Hi, I32Ty);
1011 return std::pair(Lo, Hi);
1012}
1013
1014static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
1015 return getMul64(Builder, LHS, RHS).second;
1016}
1017
1018/// Figure out how many bits are really needed for this division.
1019/// \p MaxDivBits is an optimization hint to bypass the second
1020/// ComputeNumSignBits/computeKnownBits call if the first one is
1021/// insufficient.
1022unsigned AMDGPUCodeGenPrepareImpl::getDivNumBits(BinaryOperator &I, Value *Num,
1023 Value *Den,
1024 unsigned MaxDivBits,
1025 bool IsSigned) const {
1027 Den->getType()->getScalarSizeInBits());
1028 unsigned SSBits = Num->getType()->getScalarSizeInBits();
1029 if (IsSigned) {
1030 unsigned RHSSignBits = ComputeNumSignBits(Den, SQ.DL, SQ.AC, &I, SQ.DT);
1031 // A sign bit needs to be reserved for shrinking.
1032 unsigned DivBits = SSBits - RHSSignBits + 1;
1033 if (DivBits > MaxDivBits)
1034 return SSBits;
1035
1036 unsigned LHSSignBits = ComputeNumSignBits(Num, SQ.DL, SQ.AC, &I);
1037
1038 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1039 DivBits = SSBits - SignBits + 1;
1040 return DivBits;
1041 }
1042
1043 // All bits are used for unsigned division for Num or Den in range
1044 // (SignedMax, UnsignedMax].
1045 KnownBits Known = computeKnownBits(Den, SQ.getWithInstruction(&I));
1046 if (Known.isNegative() || !Known.isNonNegative())
1047 return SSBits;
1048 unsigned RHSSignBits = Known.countMinLeadingZeros();
1049 unsigned DivBits = SSBits - RHSSignBits;
1050 if (DivBits > MaxDivBits)
1051 return SSBits;
1052
1053 Known = computeKnownBits(Num, SQ.getWithInstruction(&I));
1054 if (Known.isNegative() || !Known.isNonNegative())
1055 return SSBits;
1056 unsigned LHSSignBits = Known.countMinLeadingZeros();
1057
1058 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1059 DivBits = SSBits - SignBits;
1060 return DivBits;
1061}
1062
1063// The fractional part of a float is enough to accurately represent up to
1064// a 24-bit signed integer.
1065Value *AMDGPUCodeGenPrepareImpl::expandDivRem24(IRBuilder<> &Builder,
1066 BinaryOperator &I, Value *Num,
1067 Value *Den, bool IsDiv,
1068 bool IsSigned) const {
1069 unsigned DivBits = getDivNumBits(I, Num, Den, 24, IsSigned);
1070 if (DivBits > 24)
1071 return nullptr;
1072 return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
1073}
1074
1075Value *AMDGPUCodeGenPrepareImpl::expandDivRem24Impl(
1076 IRBuilder<> &Builder, BinaryOperator &I, Value *Num, Value *Den,
1077 unsigned DivBits, bool IsDiv, bool IsSigned) const {
1078 Type *I32Ty = Builder.getInt32Ty();
1079 Num = Builder.CreateTrunc(Num, I32Ty);
1080 Den = Builder.CreateTrunc(Den, I32Ty);
1081
1082 Type *F32Ty = Builder.getFloatTy();
1083 ConstantInt *One = Builder.getInt32(1);
1084 Value *JQ = One;
1085
1086 if (IsSigned) {
1087 // char|short jq = ia ^ ib;
1088 JQ = Builder.CreateXor(Num, Den);
1089
1090 // jq = jq >> (bitsize - 2)
1091 JQ = Builder.CreateAShr(JQ, Builder.getInt32(30));
1092
1093 // jq = jq | 0x1
1094 JQ = Builder.CreateOr(JQ, One);
1095 }
1096
1097 // int ia = (int)LHS;
1098 Value *IA = Num;
1099
1100 // int ib, (int)RHS;
1101 Value *IB = Den;
1102
1103 // float fa = (float)ia;
1104 Value *FA = IsSigned ? Builder.CreateSIToFP(IA, F32Ty)
1105 : Builder.CreateUIToFP(IA, F32Ty);
1106
1107 // float fb = (float)ib;
1108 Value *FB = IsSigned ? Builder.CreateSIToFP(IB,F32Ty)
1109 : Builder.CreateUIToFP(IB,F32Ty);
1110
1111 Value *RCP = Builder.CreateIntrinsic(Intrinsic::amdgcn_rcp,
1112 Builder.getFloatTy(), {FB});
1113 Value *FQM = Builder.CreateFMul(FA, RCP);
1114
1115 // fq = trunc(fqm);
1116 CallInst *FQ = Builder.CreateUnaryIntrinsic(Intrinsic::trunc, FQM);
1117 FQ->copyFastMathFlags(Builder.getFastMathFlags());
1118
1119 // float fqneg = -fq;
1120 Value *FQNeg = Builder.CreateFNeg(FQ);
1121
1122 // float fr = mad(fqneg, fb, fa);
1123 auto FMAD = !ST.hasMadMacF32Insts()
1124 ? Intrinsic::fma
1125 : (Intrinsic::ID)Intrinsic::amdgcn_fmad_ftz;
1126 Value *FR = Builder.CreateIntrinsic(FMAD,
1127 {FQNeg->getType()}, {FQNeg, FB, FA}, FQ);
1128
1129 // int iq = (int)fq;
1130 Value *IQ = IsSigned ? Builder.CreateFPToSI(FQ, I32Ty)
1131 : Builder.CreateFPToUI(FQ, I32Ty);
1132
1133 // fr = fabs(fr);
1134 FR = Builder.CreateFAbs(FR, FQ);
1135
1136 // fb = fabs(fb);
1137 FB = Builder.CreateFAbs(FB, FQ);
1138
1139 // int cv = fr >= fb;
1140 Value *CV = Builder.CreateFCmpOGE(FR, FB);
1141
1142 // jq = (cv ? jq : 0);
1143 JQ = Builder.CreateSelect(CV, JQ, Builder.getInt32(0));
1144
1145 // dst = iq + jq;
1146 Value *Div = Builder.CreateAdd(IQ, JQ);
1147
1148 Value *Res = Div;
1149 if (!IsDiv) {
1150 // Rem needs compensation, it's easier to recompute it
1151 Value *Rem = Builder.CreateMul(Div, Den);
1152 Res = Builder.CreateSub(Num, Rem);
1153 }
1154
1155 if (DivBits != 0 && DivBits < 32) {
1156 // Extend in register from the number of bits this divide really is.
1157 if (IsSigned) {
1158 int InRegBits = 32 - DivBits;
1159
1160 Res = Builder.CreateShl(Res, InRegBits);
1161 Res = Builder.CreateAShr(Res, InRegBits);
1162 } else {
1163 ConstantInt *TruncMask
1164 = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
1165 Res = Builder.CreateAnd(Res, TruncMask);
1166 }
1167 }
1168
1169 return Res;
1170}
1171
1172// Try to recognize special cases the DAG will emit special, better expansions
1173// than the general expansion we do here.
1174
1175// TODO: It would be better to just directly handle those optimizations here.
1176bool AMDGPUCodeGenPrepareImpl::divHasSpecialOptimization(BinaryOperator &I,
1177 Value *Num,
1178 Value *Den) const {
1179 if (Constant *C = dyn_cast<Constant>(Den)) {
1180 // Arbitrary constants get a better expansion as long as a wider mulhi is
1181 // legal.
1182 if (C->getType()->getScalarSizeInBits() <= 32)
1183 return true;
1184
1185 // TODO: Sdiv check for not exact for some reason.
1186
1187 // If there's no wider mulhi, there's only a better expansion for powers of
1188 // two.
1189 // TODO: Should really know for each vector element.
1191 return true;
1192
1193 return false;
1194 }
1195
1196 if (BinaryOperator *BinOpDen = dyn_cast<BinaryOperator>(Den)) {
1197 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
1198 if (BinOpDen->getOpcode() == Instruction::Shl &&
1199 isa<Constant>(BinOpDen->getOperand(0)) &&
1200 isKnownToBeAPowerOfTwo(BinOpDen->getOperand(0), true,
1201 SQ.getWithInstruction(&I))) {
1202 return true;
1203 }
1204 }
1205
1206 return false;
1207}
1208
1209static Value *getSign32(Value *V, IRBuilder<> &Builder, const DataLayout DL) {
1210 // Check whether the sign can be determined statically.
1211 KnownBits Known = computeKnownBits(V, DL);
1212 if (Known.isNegative())
1213 return Constant::getAllOnesValue(V->getType());
1214 if (Known.isNonNegative())
1215 return Constant::getNullValue(V->getType());
1216 return Builder.CreateAShr(V, Builder.getInt32(31));
1217}
1218
1219Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
1220 BinaryOperator &I, Value *X,
1221 Value *Y) const {
1222 Instruction::BinaryOps Opc = I.getOpcode();
1223 assert(Opc == Instruction::URem || Opc == Instruction::UDiv ||
1224 Opc == Instruction::SRem || Opc == Instruction::SDiv);
1225
1226 FastMathFlags FMF;
1227 FMF.setFast();
1228 Builder.setFastMathFlags(FMF);
1229
1230 if (divHasSpecialOptimization(I, X, Y))
1231 return nullptr; // Keep it for later optimization.
1232
1233 bool IsDiv = Opc == Instruction::UDiv || Opc == Instruction::SDiv;
1234 bool IsSigned = Opc == Instruction::SRem || Opc == Instruction::SDiv;
1235
1236 Type *Ty = X->getType();
1237 Type *I32Ty = Builder.getInt32Ty();
1238 Type *F32Ty = Builder.getFloatTy();
1239
1240 if (Ty->getScalarSizeInBits() != 32) {
1241 if (IsSigned) {
1242 X = Builder.CreateSExtOrTrunc(X, I32Ty);
1243 Y = Builder.CreateSExtOrTrunc(Y, I32Ty);
1244 } else {
1245 X = Builder.CreateZExtOrTrunc(X, I32Ty);
1246 Y = Builder.CreateZExtOrTrunc(Y, I32Ty);
1247 }
1248 }
1249
1250 if (Value *Res = expandDivRem24(Builder, I, X, Y, IsDiv, IsSigned)) {
1251 return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
1252 Builder.CreateZExtOrTrunc(Res, Ty);
1253 }
1254
1255 ConstantInt *Zero = Builder.getInt32(0);
1256 ConstantInt *One = Builder.getInt32(1);
1257
1258 Value *Sign = nullptr;
1259 if (IsSigned) {
1260 Value *SignX = getSign32(X, Builder, DL);
1261 Value *SignY = getSign32(Y, Builder, DL);
1262 // Remainder sign is the same as LHS
1263 Sign = IsDiv ? Builder.CreateXor(SignX, SignY) : SignX;
1264
1265 X = Builder.CreateAdd(X, SignX);
1266 Y = Builder.CreateAdd(Y, SignY);
1267
1268 X = Builder.CreateXor(X, SignX);
1269 Y = Builder.CreateXor(Y, SignY);
1270 }
1271
1272 // The algorithm here is based on ideas from "Software Integer Division", Tom
1273 // Rodeheffer, August 2008.
1274 //
1275 // unsigned udiv(unsigned x, unsigned y) {
1276 // // Initial estimate of inv(y). The constant is less than 2^32 to ensure
1277 // // that this is a lower bound on inv(y), even if some of the calculations
1278 // // round up.
1279 // unsigned z = (unsigned)((4294967296.0 - 512.0) * v_rcp_f32((float)y));
1280 //
1281 // // One round of UNR (Unsigned integer Newton-Raphson) to improve z.
1282 // // Empirically this is guaranteed to give a "two-y" lower bound on
1283 // // inv(y).
1284 // z += umulh(z, -y * z);
1285 //
1286 // // Quotient/remainder estimate.
1287 // unsigned q = umulh(x, z);
1288 // unsigned r = x - q * y;
1289 //
1290 // // Two rounds of quotient/remainder refinement.
1291 // if (r >= y) {
1292 // ++q;
1293 // r -= y;
1294 // }
1295 // if (r >= y) {
1296 // ++q;
1297 // r -= y;
1298 // }
1299 //
1300 // return q;
1301 // }
1302
1303 // Initial estimate of inv(y).
1304 Value *FloatY = Builder.CreateUIToFP(Y, F32Ty);
1305 Value *RcpY = Builder.CreateIntrinsic(Intrinsic::amdgcn_rcp, F32Ty, {FloatY});
1306 Constant *Scale = ConstantFP::get(F32Ty, llvm::bit_cast<float>(0x4F7FFFFE));
1307 Value *ScaledY = Builder.CreateFMul(RcpY, Scale);
1308 Value *Z = Builder.CreateFPToUI(ScaledY, I32Ty);
1309
1310 // One round of UNR.
1311 Value *NegY = Builder.CreateSub(Zero, Y);
1312 Value *NegYZ = Builder.CreateMul(NegY, Z);
1313 Z = Builder.CreateAdd(Z, getMulHu(Builder, Z, NegYZ));
1314
1315 // Quotient/remainder estimate.
1316 Value *Q = getMulHu(Builder, X, Z);
1317 Value *R = Builder.CreateSub(X, Builder.CreateMul(Q, Y));
1318
1319 // First quotient/remainder refinement.
1320 Value *Cond = Builder.CreateICmpUGE(R, Y);
1321 if (IsDiv)
1322 Q = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1323 R = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1324
1325 // Second quotient/remainder refinement.
1326 Cond = Builder.CreateICmpUGE(R, Y);
1327 Value *Res;
1328 if (IsDiv)
1329 Res = Builder.CreateSelect(Cond, Builder.CreateAdd(Q, One), Q);
1330 else
1331 Res = Builder.CreateSelect(Cond, Builder.CreateSub(R, Y), R);
1332
1333 if (IsSigned) {
1334 Res = Builder.CreateXor(Res, Sign);
1335 Res = Builder.CreateSub(Res, Sign);
1336 Res = Builder.CreateSExtOrTrunc(Res, Ty);
1337 } else {
1338 Res = Builder.CreateZExtOrTrunc(Res, Ty);
1339 }
1340 return Res;
1341}
1342
1343Value *AMDGPUCodeGenPrepareImpl::shrinkDivRem64(IRBuilder<> &Builder,
1344 BinaryOperator &I, Value *Num,
1345 Value *Den) const {
1346 if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
1347 return nullptr; // Keep it for later optimization.
1348
1349 Instruction::BinaryOps Opc = I.getOpcode();
1350
1351 bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv;
1352 bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem;
1353
1354 unsigned NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
1355 if (NumDivBits > 32)
1356 return nullptr;
1357
1358 Value *Narrowed = nullptr;
1359 if (NumDivBits <= 24) {
1360 Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
1361 IsDiv, IsSigned);
1362 } else if (NumDivBits <= 32) {
1363 Narrowed = expandDivRem32(Builder, I, Num, Den);
1364 }
1365
1366 if (Narrowed) {
1367 return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
1368 Builder.CreateZExt(Narrowed, Num->getType());
1369 }
1370
1371 return nullptr;
1372}
1373
1374void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
1375 Instruction::BinaryOps Opc = I.getOpcode();
1376 // Do the general expansion.
1377 if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) {
1379 return;
1380 }
1381
1382 if (Opc == Instruction::URem || Opc == Instruction::SRem) {
1384 return;
1385 }
1386
1387 llvm_unreachable("not a division");
1388}
1389
1390/*
1391This will cause non-byte load in consistency, for example:
1392```
1393 %load = load i1, ptr addrspace(4) %arg, align 4
1394 %zext = zext i1 %load to
1395 i64 %add = add i64 %zext
1396```
1397Instead of creating `s_and_b32 s0, s0, 1`,
1398it will create `s_and_b32 s0, s0, 0xff`.
1399We accept this change since the non-byte load assumes the upper bits
1400within the byte are all 0.
1401*/
1402bool AMDGPUCodeGenPrepareImpl::tryNarrowMathIfNoOverflow(Instruction *I) {
1403 unsigned Opc = I->getOpcode();
1404 Type *OldType = I->getType();
1405
1406 if (Opc != Instruction::Add && Opc != Instruction::Mul)
1407 return false;
1408
1409 unsigned OrigBit = OldType->getScalarSizeInBits();
1410
1411 if (Opc != Instruction::Add && Opc != Instruction::Mul)
1412 llvm_unreachable("Unexpected opcode, only valid for Instruction::Add and "
1413 "Instruction::Mul.");
1414
1415 unsigned MaxBitsNeeded = computeKnownBits(I, DL).countMaxActiveBits();
1416
1417 MaxBitsNeeded = std::max<unsigned>(bit_ceil(MaxBitsNeeded), 8);
1418 Type *NewType = DL.getSmallestLegalIntType(I->getContext(), MaxBitsNeeded);
1419 if (!NewType)
1420 return false;
1421 unsigned NewBit = NewType->getIntegerBitWidth();
1422 if (NewBit >= OrigBit)
1423 return false;
1424 NewType = I->getType()->getWithNewBitWidth(NewBit);
1425
1426 // Old cost
1427 const TargetTransformInfo &TTI = TM.getTargetTransformInfo(F);
1428 InstructionCost OldCost =
1430 // New cost of new op
1431 InstructionCost NewCost =
1433 // New cost of narrowing 2 operands (use trunc)
1434 int NumOfNonConstOps = 2;
1435 if (isa<Constant>(I->getOperand(0)) || isa<Constant>(I->getOperand(1))) {
1436 // Cannot be both constant, should be propagated
1437 NumOfNonConstOps = 1;
1438 }
1439 NewCost += NumOfNonConstOps * TTI.getCastInstrCost(Instruction::Trunc,
1440 NewType, OldType,
1443 // New cost of zext narrowed result to original type
1444 NewCost +=
1445 TTI.getCastInstrCost(Instruction::ZExt, OldType, NewType,
1447 if (NewCost >= OldCost)
1448 return false;
1449
1450 IRBuilder<> Builder(I);
1451 Value *Trunc0 = Builder.CreateTrunc(I->getOperand(0), NewType);
1452 Value *Trunc1 = Builder.CreateTrunc(I->getOperand(1), NewType);
1453 Value *Arith =
1454 Builder.CreateBinOp((Instruction::BinaryOps)Opc, Trunc0, Trunc1);
1455
1456 Value *Zext = Builder.CreateZExt(Arith, OldType);
1457 I->replaceAllUsesWith(Zext);
1458 DeadVals.push_back(I);
1459 return true;
1460}
1461
1462bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
1463 if (foldBinOpIntoSelect(I))
1464 return true;
1465
1466 if (UseMul24Intrin && replaceMulWithMul24(I))
1467 return true;
1468 if (tryNarrowMathIfNoOverflow(&I))
1469 return true;
1470
1471 bool Changed = false;
1472 Instruction::BinaryOps Opc = I.getOpcode();
1473 Type *Ty = I.getType();
1474 Value *NewDiv = nullptr;
1475 unsigned ScalarSize = Ty->getScalarSizeInBits();
1476
1478
1479 if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
1480 Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
1481 ScalarSize <= 64 &&
1482 !DisableIDivExpand) {
1483 Value *Num = I.getOperand(0);
1484 Value *Den = I.getOperand(1);
1485 IRBuilder<> Builder(&I);
1486 Builder.SetCurrentDebugLocation(I.getDebugLoc());
1487
1488 if (auto *VT = dyn_cast<FixedVectorType>(Ty)) {
1489 NewDiv = PoisonValue::get(VT);
1490
1491 for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
1492 Value *NumEltN = Builder.CreateExtractElement(Num, N);
1493 Value *DenEltN = Builder.CreateExtractElement(Den, N);
1494
1495 Value *NewElt;
1496 if (ScalarSize <= 32) {
1497 NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
1498 if (!NewElt)
1499 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1500 } else {
1501 // See if this 64-bit division can be shrunk to 32/24-bits before
1502 // producing the general expansion.
1503 NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN);
1504 if (!NewElt) {
1505 // The general 64-bit expansion introduces control flow and doesn't
1506 // return the new value. Just insert a scalar copy and defer
1507 // expanding it.
1508 NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
1509 // CreateBinOp does constant folding. If the operands are constant,
1510 // it will return a Constant instead of a BinaryOperator.
1511 if (auto *NewEltBO = dyn_cast<BinaryOperator>(NewElt))
1512 Div64ToExpand.push_back(NewEltBO);
1513 }
1514 }
1515
1516 if (auto *NewEltI = dyn_cast<Instruction>(NewElt))
1517 NewEltI->copyIRFlags(&I);
1518
1519 NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
1520 }
1521 } else {
1522 if (ScalarSize <= 32)
1523 NewDiv = expandDivRem32(Builder, I, Num, Den);
1524 else {
1525 NewDiv = shrinkDivRem64(Builder, I, Num, Den);
1526 if (!NewDiv)
1527 Div64ToExpand.push_back(&I);
1528 }
1529 }
1530
1531 if (NewDiv) {
1532 I.replaceAllUsesWith(NewDiv);
1533 DeadVals.push_back(&I);
1534 Changed = true;
1535 }
1536 }
1537
1538 if (ExpandDiv64InIR) {
1539 // TODO: We get much worse code in specially handled constant cases.
1540 for (BinaryOperator *Div : Div64ToExpand) {
1541 expandDivRem64(*Div);
1542 FlowChanged = true;
1543 Changed = true;
1544 }
1545 }
1546
1547 return Changed;
1548}
1549
1550bool AMDGPUCodeGenPrepareImpl::visitLoadInst(LoadInst &I) {
1551 if (!WidenLoads)
1552 return false;
1553
1554 if ((I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
1555 I.getPointerAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) &&
1556 canWidenScalarExtLoad(I)) {
1557 IRBuilder<> Builder(&I);
1558 Builder.SetCurrentDebugLocation(I.getDebugLoc());
1559
1560 Type *I32Ty = Builder.getInt32Ty();
1561 LoadInst *WidenLoad = Builder.CreateLoad(I32Ty, I.getPointerOperand());
1562 WidenLoad->copyMetadata(I);
1563
1564 // If we have range metadata, we need to convert the type, and not make
1565 // assumptions about the high bits.
1566 if (auto *Range = WidenLoad->getMetadata(LLVMContext::MD_range)) {
1567 ConstantInt *Lower =
1568 mdconst::extract<ConstantInt>(Range->getOperand(0));
1569
1570 if (Lower->isNullValue()) {
1571 WidenLoad->setMetadata(LLVMContext::MD_range, nullptr);
1572 } else {
1573 Metadata *LowAndHigh[] = {
1574 ConstantAsMetadata::get(ConstantInt::get(I32Ty, Lower->getValue().zext(32))),
1575 // Don't make assumptions about the high bits.
1576 ConstantAsMetadata::get(ConstantInt::get(I32Ty, 0))
1577 };
1578
1579 WidenLoad->setMetadata(LLVMContext::MD_range,
1580 MDNode::get(F.getContext(), LowAndHigh));
1581 }
1582 }
1583
1584 int TySize = DL.getTypeSizeInBits(I.getType());
1585 Type *IntNTy = Builder.getIntNTy(TySize);
1586 Value *ValTrunc = Builder.CreateTrunc(WidenLoad, IntNTy);
1587 Value *ValOrig = Builder.CreateBitCast(ValTrunc, I.getType());
1588 I.replaceAllUsesWith(ValOrig);
1589 DeadVals.push_back(&I);
1590 return true;
1591 }
1592
1593 return false;
1594}
1595
1596bool AMDGPUCodeGenPrepareImpl::visitSelectInst(SelectInst &I) {
1597 FPMathOperator *FPOp = dyn_cast<FPMathOperator>(&I);
1598 if (!FPOp)
1599 return false;
1600
1601 Value *X;
1602 Value *Fract = nullptr;
1603
1604 // Match:
1605 // (x - floor(x)) >= MIN_CONSTANT ? MIN_CONSTANT : (x - floor(x))
1606 //
1607 // This is the preferred way to implement fract.
1608 // TODO: Could also match with compare against 1.0
1609 const APFloat *C;
1611 Value *FractSrc = matchFractPatImpl(*X, *C);
1612 if (!FractSrc)
1613 return false;
1614 IRBuilder<> Builder(&I);
1615 Builder.setFastMathFlags(FPOp->getFastMathFlags());
1616 Fract = applyFractPat(Builder, FractSrc);
1617 } else {
1618 // Match patterns which may appear in legacy implementations of the fract()
1619 // function, built around the nan-avoidant minnum intrinsic. These are the
1620 // core pattern plus additional clamping of inf and nan values on the
1621 // result.
1622 Value *Cond = I.getCondition();
1623 Value *TrueVal = I.getTrueValue();
1624 Value *FalseVal = I.getFalseValue();
1625 Value *CmpVal;
1626 CmpPredicate IsNanPred;
1627
1628 // Match fract pattern with nan check.
1629 if (!match(Cond, m_FCmp(IsNanPred, m_Value(CmpVal), m_NonNaN())))
1630 return false;
1631
1632 IRBuilder<> Builder(&I);
1633 Builder.setFastMathFlags(FPOp->getFastMathFlags());
1634
1635 if (IsNanPred == FCmpInst::FCMP_UNO && TrueVal == CmpVal &&
1636 CmpVal == matchFractPatNanAvoidant(*FalseVal)) {
1637 // isnan(x) ? x : fract(x)
1638 Fract = applyFractPat(Builder, CmpVal);
1639 } else if (IsNanPred == FCmpInst::FCMP_ORD && FalseVal == CmpVal) {
1640 if (CmpVal == matchFractPatNanAvoidant(*TrueVal)) {
1641 // !isnan(x) ? fract(x) : x
1642 Fract = applyFractPat(Builder, CmpVal);
1643 } else {
1644 // Match an intermediate clamp infinity to 0 pattern. i.e.
1645 // !isnan(x) ? (!isinf(x) ? fract(x) : 0.0) : x
1646 CmpPredicate PredInf;
1647 Value *IfNotInf;
1648
1649 if (!match(TrueVal, m_Select(m_FCmp(PredInf, m_FAbs(m_Specific(CmpVal)),
1650 m_PosInf()),
1651 m_Value(IfNotInf), m_PosZeroFP())) ||
1652 PredInf != FCmpInst::FCMP_UNE ||
1653 CmpVal != matchFractPatNanAvoidant(*IfNotInf))
1654 return false;
1655
1656 SelectInst *ClampInfSelect = cast<SelectInst>(TrueVal);
1657
1658 // Insert before the fabs
1659 Value *InsertPt =
1660 cast<Instruction>(ClampInfSelect->getCondition())->getOperand(0);
1661
1662 Builder.SetInsertPoint(cast<Instruction>(InsertPt));
1663 Value *NewFract = applyFractPat(Builder, CmpVal);
1664 NewFract->takeName(TrueVal);
1665
1666 // Thread the new fract into the inf clamping sequence.
1667 DeadVals.push_back(ClampInfSelect->getOperand(1));
1668 ClampInfSelect->setOperand(1, NewFract);
1669
1670 // The outer select nan handling is also absorbed into the fract.
1671 Fract = ClampInfSelect;
1672 }
1673 } else
1674 return false;
1675 }
1676
1677 Fract->takeName(&I);
1678 I.replaceAllUsesWith(Fract);
1679 DeadVals.push_back(&I);
1680 return true;
1681}
1682
1683static bool areInSameBB(const Value *A, const Value *B) {
1684 const auto *IA = dyn_cast<Instruction>(A);
1685 const auto *IB = dyn_cast<Instruction>(B);
1686 return IA && IB && IA->getParent() == IB->getParent();
1687}
1688
1689// Helper for breaking large PHIs that returns true when an extractelement on V
1690// is likely to be folded away by the DAG combiner.
1692 const auto *FVT = dyn_cast<FixedVectorType>(V->getType());
1693 if (!FVT)
1694 return false;
1695
1696 const Value *CurVal = V;
1697
1698 // Check for insertelements, keeping track of the elements covered.
1699 BitVector EltsCovered(FVT->getNumElements());
1700 while (const auto *IE = dyn_cast<InsertElementInst>(CurVal)) {
1701 const auto *Idx = dyn_cast<ConstantInt>(IE->getOperand(2));
1702
1703 // Non constant index/out of bounds index -> folding is unlikely.
1704 // The latter is more of a sanity check because canonical IR should just
1705 // have replaced those with poison.
1706 if (!Idx || Idx->getZExtValue() >= FVT->getNumElements())
1707 return false;
1708
1709 const auto *VecSrc = IE->getOperand(0);
1710
1711 // If the vector source is another instruction, it must be in the same basic
1712 // block. Otherwise, the DAGCombiner won't see the whole thing and is
1713 // unlikely to be able to do anything interesting here.
1714 if (isa<Instruction>(VecSrc) && !areInSameBB(VecSrc, IE))
1715 return false;
1716
1717 CurVal = VecSrc;
1718 EltsCovered.set(Idx->getZExtValue());
1719
1720 // All elements covered.
1721 if (EltsCovered.all())
1722 return true;
1723 }
1724
1725 // We either didn't find a single insertelement, or the insertelement chain
1726 // ended before all elements were covered. Check for other interesting values.
1727
1728 // Constants are always interesting because we can just constant fold the
1729 // extractelements.
1730 if (isa<Constant>(CurVal))
1731 return true;
1732
1733 // shufflevector is likely to be profitable if either operand is a constant,
1734 // or if either source is in the same block.
1735 // This is because shufflevector is most often lowered as a series of
1736 // insert/extract elements anyway.
1737 if (const auto *SV = dyn_cast<ShuffleVectorInst>(CurVal)) {
1738 return isa<Constant>(SV->getOperand(1)) ||
1739 areInSameBB(SV, SV->getOperand(0)) ||
1740 areInSameBB(SV, SV->getOperand(1));
1741 }
1742
1743 return false;
1744}
1745
1746static void collectPHINodes(const PHINode &I,
1748 const auto [It, Inserted] = SeenPHIs.insert(&I);
1749 if (!Inserted)
1750 return;
1751
1752 for (const Value *Inc : I.incoming_values()) {
1753 if (const auto *PhiInc = dyn_cast<PHINode>(Inc))
1754 collectPHINodes(*PhiInc, SeenPHIs);
1755 }
1756
1757 for (const User *U : I.users()) {
1758 if (const auto *PhiU = dyn_cast<PHINode>(U))
1759 collectPHINodes(*PhiU, SeenPHIs);
1760 }
1761}
1762
1763bool AMDGPUCodeGenPrepareImpl::canBreakPHINode(const PHINode &I) {
1764 // Check in the cache first.
1765 if (const auto It = BreakPhiNodesCache.find(&I);
1766 It != BreakPhiNodesCache.end())
1767 return It->second;
1768
1769 // We consider PHI nodes as part of "chains", so given a PHI node I, we
1770 // recursively consider all its users and incoming values that are also PHI
1771 // nodes. We then make a decision about all of those PHIs at once. Either they
1772 // all get broken up, or none of them do. That way, we avoid cases where a
1773 // single PHI is/is not broken and we end up reforming/exploding a vector
1774 // multiple times, or even worse, doing it in a loop.
1775 SmallPtrSet<const PHINode *, 8> WorkList;
1776 collectPHINodes(I, WorkList);
1777
1778#ifndef NDEBUG
1779 // Check that none of the PHI nodes in the worklist are in the map. If some of
1780 // them are, it means we're not good enough at collecting related PHIs.
1781 for (const PHINode *WLP : WorkList) {
1782 assert(BreakPhiNodesCache.count(WLP) == 0);
1783 }
1784#endif
1785
1786 // To consider a PHI profitable to break, we need to see some interesting
1787 // incoming values. At least 2/3rd (rounded up) of all PHIs in the worklist
1788 // must have one to consider all PHIs breakable.
1789 //
1790 // This threshold has been determined through performance testing.
1791 //
1792 // Note that the computation below is equivalent to
1793 //
1794 // (unsigned)ceil((K / 3.0) * 2)
1795 //
1796 // It's simply written this way to avoid mixing integral/FP arithmetic.
1797 const auto Threshold = (alignTo(WorkList.size() * 2, 3) / 3);
1798 unsigned NumBreakablePHIs = 0;
1799 bool CanBreak = false;
1800 for (const PHINode *Cur : WorkList) {
1801 // Don't break PHIs that have no interesting incoming values. That is, where
1802 // there is no clear opportunity to fold the "extractelement" instructions
1803 // we would add.
1804 //
1805 // Note: IC does not run after this pass, so we're only interested in the
1806 // foldings that the DAG combiner can do.
1807 if (any_of(Cur->incoming_values(), isInterestingPHIIncomingValue)) {
1808 if (++NumBreakablePHIs >= Threshold) {
1809 CanBreak = true;
1810 break;
1811 }
1812 }
1813 }
1814
1815 for (const PHINode *Cur : WorkList)
1816 BreakPhiNodesCache[Cur] = CanBreak;
1817
1818 return CanBreak;
1819}
1820
1821/// Helper class for "break large PHIs" (visitPHINode).
1822///
1823/// This represents a slice of a PHI's incoming value, which is made up of:
1824/// - The type of the slice (Ty)
1825/// - The index in the incoming value's vector where the slice starts (Idx)
1826/// - The number of elements in the slice (NumElts).
1827/// It also keeps track of the NewPHI node inserted for this particular slice.
1828///
1829/// Slice examples:
1830/// <4 x i64> -> Split into four i64 slices.
1831/// -> [i64, 0, 1], [i64, 1, 1], [i64, 2, 1], [i64, 3, 1]
1832/// <5 x i16> -> Split into 2 <2 x i16> slices + a i16 tail.
1833/// -> [<2 x i16>, 0, 2], [<2 x i16>, 2, 2], [i16, 4, 1]
1835public:
1836 VectorSlice(Type *Ty, unsigned Idx, unsigned NumElts)
1837 : Ty(Ty), Idx(Idx), NumElts(NumElts) {}
1838
1839 Type *Ty = nullptr;
1840 unsigned Idx = 0;
1841 unsigned NumElts = 0;
1842 PHINode *NewPHI = nullptr;
1843
1844 /// Slice \p Inc according to the information contained within this slice.
1845 /// This is cached, so if called multiple times for the same \p BB & \p Inc
1846 /// pair, it returns the same Sliced value as well.
1847 ///
1848 /// Note this *intentionally* does not return the same value for, say,
1849 /// [%bb.0, %0] & [%bb.1, %0] as:
1850 /// - It could cause issues with dominance (e.g. if bb.1 is seen first, then
1851 /// the value in bb.1 may not be reachable from bb.0 if it's its
1852 /// predecessor.)
1853 /// - We also want to make our extract instructions as local as possible so
1854 /// the DAG has better chances of folding them out. Duplicating them like
1855 /// that is beneficial in that regard.
1856 ///
1857 /// This is both a minor optimization to avoid creating duplicate
1858 /// instructions, but also a requirement for correctness. It is not forbidden
1859 /// for a PHI node to have the same [BB, Val] pair multiple times. If we
1860 /// returned a new value each time, those previously identical pairs would all
1861 /// have different incoming values (from the same block) and it'd cause a "PHI
1862 /// node has multiple entries for the same basic block with different incoming
1863 /// values!" verifier error.
1864 Value *getSlicedVal(BasicBlock *BB, Value *Inc, StringRef NewValName) {
1865 Value *&Res = SlicedVals[{BB, Inc}];
1866 if (Res)
1867 return Res;
1868
1870 if (Instruction *IncInst = dyn_cast<Instruction>(Inc))
1871 B.SetCurrentDebugLocation(IncInst->getDebugLoc());
1872
1873 if (NumElts > 1) {
1875 for (unsigned K = Idx; K < (Idx + NumElts); ++K)
1876 Mask.push_back(K);
1877 Res = B.CreateShuffleVector(Inc, Mask, NewValName);
1878 } else
1879 Res = B.CreateExtractElement(Inc, Idx, NewValName);
1880
1881 return Res;
1882 }
1883
1884private:
1886};
1887
1888bool AMDGPUCodeGenPrepareImpl::visitPHINode(PHINode &I) {
1889 // Break-up fixed-vector PHIs into smaller pieces.
1890 // Default threshold is 32, so it breaks up any vector that's >32 bits into
1891 // its elements, or into 32-bit pieces (for 8/16 bit elts).
1892 //
1893 // This is only helpful for DAGISel because it doesn't handle large PHIs as
1894 // well as GlobalISel. DAGISel lowers PHIs by using CopyToReg/CopyFromReg.
1895 // With large, odd-sized PHIs we may end up needing many `build_vector`
1896 // operations with most elements being "undef". This inhibits a lot of
1897 // optimization opportunities and can result in unreasonably high register
1898 // pressure and the inevitable stack spilling.
1899 if (!BreakLargePHIs || getCGPassBuilderOption().EnableGlobalISelOption)
1900 return false;
1901
1902 FixedVectorType *FVT = dyn_cast<FixedVectorType>(I.getType());
1903 if (!FVT || FVT->getNumElements() == 1 ||
1904 DL.getTypeSizeInBits(FVT) <= BreakLargePHIsThreshold)
1905 return false;
1906
1907 if (!ForceBreakLargePHIs && !canBreakPHINode(I))
1908 return false;
1909
1910 std::vector<VectorSlice> Slices;
1911
1912 Type *EltTy = FVT->getElementType();
1913 {
1914 unsigned Idx = 0;
1915 // For 8/16 bits type, don't scalarize fully but break it up into as many
1916 // 32-bit slices as we can, and scalarize the tail.
1917 const unsigned EltSize = DL.getTypeSizeInBits(EltTy);
1918 const unsigned NumElts = FVT->getNumElements();
1919 if (EltSize == 8 || EltSize == 16) {
1920 const unsigned SubVecSize = (32 / EltSize);
1921 Type *SubVecTy = FixedVectorType::get(EltTy, SubVecSize);
1922 for (unsigned End = alignDown(NumElts, SubVecSize); Idx < End;
1923 Idx += SubVecSize)
1924 Slices.emplace_back(SubVecTy, Idx, SubVecSize);
1925 }
1926
1927 // Scalarize all remaining elements.
1928 for (; Idx < NumElts; ++Idx)
1929 Slices.emplace_back(EltTy, Idx, 1);
1930 }
1931
1932 assert(Slices.size() > 1);
1933
1934 // Create one PHI per vector piece. The "VectorSlice" class takes care of
1935 // creating the necessary instruction to extract the relevant slices of each
1936 // incoming value.
1937 IRBuilder<> B(I.getParent());
1938 B.SetCurrentDebugLocation(I.getDebugLoc());
1939
1940 unsigned IncNameSuffix = 0;
1941 for (VectorSlice &S : Slices) {
1942 // We need to reset the build on each iteration, because getSlicedVal may
1943 // have inserted something into I's BB.
1944 B.SetInsertPoint(I.getParent()->getFirstNonPHIIt());
1945 S.NewPHI = B.CreatePHI(S.Ty, I.getNumIncomingValues());
1946
1947 for (const auto &[Idx, BB] : enumerate(I.blocks())) {
1948 S.NewPHI->addIncoming(S.getSlicedVal(BB, I.getIncomingValue(Idx),
1949 "largephi.extractslice" +
1950 std::to_string(IncNameSuffix++)),
1951 BB);
1952 }
1953 }
1954
1955 // And replace this PHI with a vector of all the previous PHI values.
1956 Value *Vec = PoisonValue::get(FVT);
1957 unsigned NameSuffix = 0;
1958 for (VectorSlice &S : Slices) {
1959 const auto ValName = "largephi.insertslice" + std::to_string(NameSuffix++);
1960 if (S.NumElts > 1)
1961 Vec = B.CreateInsertVector(FVT, Vec, S.NewPHI, S.Idx, ValName);
1962 else
1963 Vec = B.CreateInsertElement(Vec, S.NewPHI, S.Idx, ValName);
1964 }
1965
1966 I.replaceAllUsesWith(Vec);
1967 DeadVals.push_back(&I);
1968 return true;
1969}
1970
1971/// \param V Value to check
1972/// \param DL DataLayout
1973/// \param TM TargetMachine (TODO: remove once DL contains nullptr values)
1974/// \param AS Target Address Space
1975/// \return true if \p V cannot be the null value of \p AS, false otherwise.
1976static bool isPtrKnownNeverNull(const Value *V, const DataLayout &DL,
1977 const AMDGPUTargetMachine &TM, unsigned AS) {
1978 // Pointer cannot be null if it's a block address, GV or alloca.
1979 // NOTE: We don't support extern_weak, but if we did, we'd need to check for
1980 // it as the symbol could be null in such cases.
1982 return true;
1983
1984 // Check nonnull arguments.
1985 if (const auto *Arg = dyn_cast<Argument>(V); Arg && Arg->hasNonNullAttr())
1986 return true;
1987
1988 // Check nonnull loads.
1989 if (const auto *Load = dyn_cast<LoadInst>(V);
1990 Load && Load->hasMetadata(LLVMContext::MD_nonnull))
1991 return true;
1992
1993 // getUnderlyingObject may have looked through another addrspacecast, although
1994 // the optimizable situations most likely folded out by now.
1995 if (AS != cast<PointerType>(V->getType())->getAddressSpace())
1996 return false;
1997
1998 // TODO: Calls that return nonnull?
1999
2000 // For all other things, use KnownBits.
2001 // We either use 0 or all bits set to indicate null, so check whether the
2002 // value can be zero or all ones.
2003 //
2004 // TODO: Use ValueTracking's isKnownNeverNull if it becomes aware that some
2005 // address spaces have non-zero null values.
2006 auto SrcPtrKB = computeKnownBits(V, DL);
2007 const auto NullVal = AMDGPU::getNullPointerValue(AS);
2008
2009 assert(SrcPtrKB.getBitWidth() == DL.getPointerSizeInBits(AS));
2010 assert((NullVal == 0 || NullVal == -1) &&
2011 "don't know how to check for this null value!");
2012 return NullVal ? !SrcPtrKB.getMaxValue().isAllOnes() : SrcPtrKB.isNonZero();
2013}
2014
2015bool AMDGPUCodeGenPrepareImpl::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2016 // Intrinsic doesn't support vectors, also it seems that it's often difficult
2017 // to prove that a vector cannot have any nulls in it so it's unclear if it's
2018 // worth supporting.
2019 if (I.getType()->isVectorTy())
2020 return false;
2021
2022 // Check if this can be lowered to a amdgcn.addrspacecast.nonnull.
2023 // This is only worthwhile for casts from/to priv/local to flat.
2024 const unsigned SrcAS = I.getSrcAddressSpace();
2025 const unsigned DstAS = I.getDestAddressSpace();
2026
2027 bool CanLower = false;
2028 if (SrcAS == AMDGPUAS::FLAT_ADDRESS)
2029 CanLower = (DstAS == AMDGPUAS::LOCAL_ADDRESS ||
2030 DstAS == AMDGPUAS::PRIVATE_ADDRESS);
2031 else if (DstAS == AMDGPUAS::FLAT_ADDRESS)
2032 CanLower = (SrcAS == AMDGPUAS::LOCAL_ADDRESS ||
2033 SrcAS == AMDGPUAS::PRIVATE_ADDRESS);
2034 if (!CanLower)
2035 return false;
2036
2038 getUnderlyingObjects(I.getOperand(0), WorkList);
2039 if (!all_of(WorkList, [&](const Value *V) {
2040 return isPtrKnownNeverNull(V, DL, TM, SrcAS);
2041 }))
2042 return false;
2043
2044 IRBuilder<> B(&I);
2045 auto *Intrin = B.CreateIntrinsic(
2046 I.getType(), Intrinsic::amdgcn_addrspacecast_nonnull, {I.getOperand(0)});
2047 I.replaceAllUsesWith(Intrin);
2048 DeadVals.push_back(&I);
2049 return true;
2050}
2051
2052bool AMDGPUCodeGenPrepareImpl::visitIntrinsicInst(IntrinsicInst &I) {
2053 Intrinsic::ID IID = I.getIntrinsicID();
2054 switch (IID) {
2055 case Intrinsic::minnum:
2056 case Intrinsic::minimumnum:
2057 case Intrinsic::minimum:
2058 return visitFMinLike(I);
2059 case Intrinsic::sqrt:
2060 return visitSqrt(I);
2061 case Intrinsic::log:
2062 case Intrinsic::log10:
2063 return visitLog(cast<FPMathOperator>(I), IID);
2064 case Intrinsic::log2:
2065 // No reason to handle log2.
2066 return false;
2067 case Intrinsic::amdgcn_mbcnt_lo:
2068 return visitMbcntLo(I);
2069 case Intrinsic::amdgcn_mbcnt_hi:
2070 return visitMbcntHi(I);
2071 case Intrinsic::vector_reduce_add:
2072 return visitVectorReduceAdd(I);
2073 case Intrinsic::uadd_sat:
2074 case Intrinsic::sadd_sat:
2075 return visitSaturatingAdd(I);
2076 default:
2077 return false;
2078 }
2079}
2080
2081/// Match the core sequence in the fract pattern (x - floor(x), which doesn't
2082/// need to consider edge case handling.
2083Value *AMDGPUCodeGenPrepareImpl::matchFractPatImpl(Value &FractSrc,
2084 const APFloat &C) const {
2085 if (ST.hasFractBug())
2086 return nullptr;
2087
2088 Type *Ty = FractSrc.getType();
2089 if (!isLegalFloatingTy(Ty->getScalarType()))
2090 return nullptr;
2091
2092 APFloat OneNextDown = APFloat::getOne(C.getSemantics());
2093 OneNextDown.next(true);
2094
2095 // Match nextafter(1.0, -1)
2096 if (OneNextDown != C)
2097 return nullptr;
2098
2099 Value *FloorSrc;
2100 if (match(&FractSrc, m_FSub(m_Value(FloorSrc), m_Intrinsic<Intrinsic::floor>(
2101 m_Deferred(FloorSrc)))))
2102 return FloorSrc;
2103 return nullptr;
2104}
2105
2106/// Match non-nan fract pattern.
2107// MIN_CONSTANT = nextafter(1.0, -1.0)
2108/// minnum(fsub(x, floor(x)), MIN_CONSTANT)
2109/// minimumnum(fsub(x, floor(x)), MIN_CONSTANT)
2110/// minimum(fsub(x, floor(x)), MIN_CONSTANT)
2111
2112// x_sub_floor >= MIN_CONSTANT ? MIN_CONSTANT : x_sub_floor;
2113///
2114/// If fract is a useful instruction for the subtarget. Does not account for the
2115/// nan handling; the instruction has a nan check on the input value.
2116Value *AMDGPUCodeGenPrepareImpl::matchFractPatNanAvoidant(Value &V) {
2117 Value *Arg0;
2118 const APFloat *C;
2119
2120 // The value is only used in contexts where we know the input isn't a nan, so
2121 // any of the fmin variants are fine.
2122 if (!match(&V,
2126 return nullptr;
2127
2128 return matchFractPatImpl(*Arg0, *C);
2129}
2130
2131Value *AMDGPUCodeGenPrepareImpl::applyFractPat(IRBuilder<> &Builder,
2132 Value *FractArg) {
2133 SmallVector<Value *, 4> FractVals;
2134 extractValues(Builder, FractVals, FractArg);
2135
2136 SmallVector<Value *, 4> ResultVals(FractVals.size());
2137
2138 Type *Ty = FractArg->getType()->getScalarType();
2139 for (unsigned I = 0, E = FractVals.size(); I != E; ++I) {
2140 ResultVals[I] =
2141 Builder.CreateIntrinsic(Intrinsic::amdgcn_fract, {Ty}, {FractVals[I]});
2142 }
2143
2144 return insertValues(Builder, FractArg->getType(), ResultVals);
2145}
2146
2147bool AMDGPUCodeGenPrepareImpl::visitFMinLike(IntrinsicInst &I) {
2148 const APFloat *C;
2149 Value *FractArg;
2150
2151 // minimum(x - floor(x), MIN_CONSTANT)
2152 Value *X;
2153 if (!ST.hasFractBug() &&
2155 FractArg = matchFractPatImpl(*X, *C);
2156 if (!FractArg)
2157 return false;
2158 } else {
2159 // minnum(x - floor(x), MIN_CONSTANT)
2160 FractArg = matchFractPatNanAvoidant(I);
2161 if (!FractArg)
2162 return false;
2163
2164 // Match pattern for fract intrinsic in contexts where the nan check has
2165 // been optimized out (and hope the knowledge the source can't be nan wasn't
2166 // lost).
2167 if (!I.hasNoNaNs() && !isKnownNeverNaN(FractArg, SQ.getWithInstruction(&I)))
2168 return false;
2169 }
2170
2171 IRBuilder<> Builder(&I);
2172 FastMathFlags FMF = I.getFastMathFlags();
2173 FMF.setNoNaNs();
2174 Builder.setFastMathFlags(FMF);
2175
2176 Value *Fract = applyFractPat(Builder, FractArg);
2177 Fract->takeName(&I);
2178 I.replaceAllUsesWith(Fract);
2179 DeadVals.push_back(&I);
2180 return true;
2181}
2182
2183// Expand llvm.sqrt.f32 calls with !fpmath metadata in a semi-fast way.
2184bool AMDGPUCodeGenPrepareImpl::visitSqrt(IntrinsicInst &Sqrt) {
2185 Type *Ty = Sqrt.getType()->getScalarType();
2186 if (!Ty->isFloatTy() && (!Ty->isHalfTy() || ST.has16BitInsts()))
2187 return false;
2188
2189 const FPMathOperator *FPOp = cast<const FPMathOperator>(&Sqrt);
2190 FastMathFlags SqrtFMF = FPOp->getFastMathFlags();
2191
2192 // We're trying to handle the fast-but-not-that-fast case only. The lowering
2193 // of fast llvm.sqrt will give the raw instruction anyway.
2194 if (SqrtFMF.approxFunc())
2195 return false;
2196
2197 const float ReqdAccuracy = FPOp->getFPAccuracy();
2198
2199 // Defer correctly rounded expansion to codegen.
2200 if (ReqdAccuracy < 1.0f)
2201 return false;
2202
2203 Value *SrcVal = Sqrt.getOperand(0);
2204 bool CanTreatAsDAZ = canIgnoreDenormalInput(SrcVal, &Sqrt);
2205
2206 // The raw instruction is 1 ulp, but the correction for denormal handling
2207 // brings it to 2.
2208 if (!CanTreatAsDAZ && ReqdAccuracy < 2.0f)
2209 return false;
2210
2211 IRBuilder<> Builder(&Sqrt);
2212 SmallVector<Value *, 4> SrcVals;
2213 extractValues(Builder, SrcVals, SrcVal);
2214
2215 SmallVector<Value *, 4> ResultVals(SrcVals.size());
2216 for (int I = 0, E = SrcVals.size(); I != E; ++I) {
2217 if (CanTreatAsDAZ)
2218 ResultVals[I] = Builder.CreateCall(getSqrtF32(), SrcVals[I]);
2219 else
2220 ResultVals[I] = emitSqrtIEEE2ULP(Builder, SrcVals[I], SqrtFMF);
2221 }
2222
2223 Value *NewSqrt = insertValues(Builder, Sqrt.getType(), ResultVals);
2224 NewSqrt->takeName(&Sqrt);
2225 Sqrt.replaceAllUsesWith(NewSqrt);
2226 DeadVals.push_back(&Sqrt);
2227 return true;
2228}
2229
2230/// Replace log and log10 intrinsic calls based on fpmath metadata.
2231bool AMDGPUCodeGenPrepareImpl::visitLog(FPMathOperator &Log,
2232 Intrinsic::ID IID) {
2233 Type *Ty = Log.getType();
2234 if (!Ty->getScalarType()->isHalfTy() || !ST.has16BitInsts())
2235 return false;
2236
2237 FastMathFlags FMF = Log.getFastMathFlags();
2238
2239 // Defer fast math cases to codegen.
2240 if (FMF.approxFunc())
2241 return false;
2242
2243 // Limit experimentally determined from OpenCL conformance test (1.79)
2244 if (Log.getFPAccuracy() < 1.80f)
2245 return false;
2246
2247 IRBuilder<> Builder(&cast<CallInst>(Log));
2248
2249 // Use the generic intrinsic for convenience in the vector case. Codegen will
2250 // recognize the denormal handling is not necessary from the fpext.
2251 // TODO: Move to generic code
2252 Value *Log2 =
2253 Builder.CreateUnaryIntrinsic(Intrinsic::log2, Log.getOperand(0), FMF);
2254
2255 double Log2BaseInverted =
2256 IID == Intrinsic::log10 ? numbers::ln2 / numbers::ln10 : numbers::ln2;
2257 Value *Mul =
2258 Builder.CreateFMulFMF(Log2, ConstantFP::get(Ty, Log2BaseInverted), FMF);
2259
2260 Mul->takeName(&Log);
2261
2262 Log.replaceAllUsesWith(Mul);
2263 DeadVals.push_back(&Log);
2264 return true;
2265}
2266
2267bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
2268 if (skipFunction(F))
2269 return false;
2270
2271 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
2272 if (!TPC)
2273 return false;
2274
2275 const AMDGPUTargetMachine &TM = TPC->getTM<AMDGPUTargetMachine>();
2276 const TargetLibraryInfo *TLI =
2277 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2278 AssumptionCache *AC =
2279 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2280 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
2281 const DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
2282 const UniformityInfo &UA =
2283 getAnalysis<UniformityInfoWrapperPass>().getUniformityInfo();
2284 return AMDGPUCodeGenPrepareImpl(F, TM, TLI, AC, DT, UA).run();
2285}
2286
2289 const AMDGPUTargetMachine &ATM = static_cast<const AMDGPUTargetMachine &>(TM);
2290 const TargetLibraryInfo *TLI = &FAM.getResult<TargetLibraryAnalysis>(F);
2291 AssumptionCache *AC = &FAM.getResult<AssumptionAnalysis>(F);
2292 const DominatorTree *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
2293 const UniformityInfo &UA = FAM.getResult<UniformityInfoAnalysis>(F);
2294 AMDGPUCodeGenPrepareImpl Impl(F, ATM, TLI, AC, DT, UA);
2295 if (!Impl.run())
2296 return PreservedAnalyses::all();
2298 if (!Impl.FlowChanged)
2300 return PA;
2301}
2302
2303INITIALIZE_PASS_BEGIN(AMDGPUCodeGenPrepare, DEBUG_TYPE,
2304 "AMDGPU IR optimizations", false, false)
2308INITIALIZE_PASS_END(AMDGPUCodeGenPrepare, DEBUG_TYPE, "AMDGPU IR optimizations",
2310
2311/// Create a workitem.id.x intrinsic call with range metadata.
2312CallInst *AMDGPUCodeGenPrepareImpl::createWorkitemIdX(IRBuilder<> &B) const {
2313 CallInst *Tid = B.CreateIntrinsic(Intrinsic::amdgcn_workitem_id_x, {});
2314 ST.makeLIDRangeMetadata(Tid);
2315 return Tid;
2316}
2317
2318/// Replace the instruction with a direct workitem.id.x call.
2319void AMDGPUCodeGenPrepareImpl::replaceWithWorkitemIdX(Instruction &I) const {
2320 IRBuilder<> B(&I);
2321 CallInst *Tid = createWorkitemIdX(B);
2323 ReplaceInstWithValue(BI, Tid);
2324}
2325
2326/// Replace the instruction with (workitem.id.x & mask).
2327void AMDGPUCodeGenPrepareImpl::replaceWithMaskedWorkitemIdX(
2328 Instruction &I, unsigned WaveSize) const {
2329 IRBuilder<> B(&I);
2330 CallInst *Tid = createWorkitemIdX(B);
2331 Constant *Mask = ConstantInt::get(Tid->getType(), WaveSize - 1);
2332 Value *AndInst = B.CreateAnd(Tid, Mask);
2334 ReplaceInstWithValue(BI, AndInst);
2335}
2336
2337/// Try to optimize mbcnt instruction by replacing with workitem.id.x when
2338/// work group size allows direct computation of lane ID.
2339/// Returns true if optimization was applied, false otherwise.
2340bool AMDGPUCodeGenPrepareImpl::tryReplaceWithWorkitemId(Instruction &I,
2341 unsigned Wave) const {
2342 std::optional<unsigned> MaybeX = ST.getReqdWorkGroupSize(F, 0);
2343 if (!MaybeX)
2344 return false;
2345
2346 // When work group size == wave_size, each work group contains exactly one
2347 // wave, so the instruction can be replaced with workitem.id.x directly.
2348 if (*MaybeX == Wave) {
2349 replaceWithWorkitemIdX(I);
2350 return true;
2351 }
2352
2353 // When work group evenly splits into waves, compute lane ID within wave
2354 // using bit masking: lane_id = workitem.id.x & (wave_size - 1).
2355 if (ST.hasWavefrontsEvenlySplittingXDim(F, /*RequiresUniformYZ=*/true)) {
2356 replaceWithMaskedWorkitemIdX(I, Wave);
2357 return true;
2358 }
2359
2360 return false;
2361}
2362
2363/// Optimize mbcnt.lo calls on wave32 architectures for lane ID computation.
2364bool AMDGPUCodeGenPrepareImpl::visitMbcntLo(IntrinsicInst &I) const {
2365 // This optimization only applies to wave32 targets where mbcnt.lo operates on
2366 // the full execution mask.
2367 if (!ST.isWave32())
2368 return false;
2369
2370 // Only optimize the pattern mbcnt.lo(~0, 0) which counts active lanes with
2371 // lower IDs.
2372 if (!match(&I,
2374 return false;
2375
2376 return tryReplaceWithWorkitemId(I, ST.getWavefrontSize());
2377}
2378
2379/// Optimize mbcnt.hi calls for lane ID computation.
2380bool AMDGPUCodeGenPrepareImpl::visitMbcntHi(IntrinsicInst &I) const {
2381 // Abort if wave size is not known at compile time.
2382 if (!ST.isWaveSizeKnown())
2383 return false;
2384
2385 unsigned Wave = ST.getWavefrontSize();
2386
2387 // On wave32, the upper 32 bits of execution mask are always 0, so
2388 // mbcnt.hi(mask, val) always returns val unchanged.
2389 if (ST.isWave32()) {
2390 if (auto MaybeX = ST.getReqdWorkGroupSize(F, 0)) {
2391 // Replace mbcnt.hi(mask, val) with val only when work group size matches
2392 // wave size (single wave per work group).
2393 if (*MaybeX == Wave) {
2395 ReplaceInstWithValue(BI, I.getArgOperand(1));
2396 return true;
2397 }
2398 }
2399 }
2400
2401 // Optimize the complete lane ID computation pattern:
2402 // mbcnt.hi(~0, mbcnt.lo(~0, 0)) which counts all active lanes with lower IDs
2403 // across the full execution mask.
2404 using namespace PatternMatch;
2405
2406 // Check for pattern: mbcnt.hi(~0, mbcnt.lo(~0, 0))
2409 m_AllOnes(), m_Zero()))))
2410 return false;
2411
2412 return tryReplaceWithWorkitemId(I, Wave);
2413}
2414
2415/// Check if type is <4 x i8>.
2416static bool isV4I8(Type *Ty) {
2418 return VTy && VTy->getNumElements() == 4 &&
2419 VTy->getElementType()->isIntegerTy(8);
2420}
2421
2422/// Helper to match the dot4 pattern: mul(zext/sext <4 x i8>, zext/sext <4 x
2423/// i8>) Returns true if pattern matches and signedness matches IsSigned.
2424/// Sets A, B to the <4 x i8> sources.
2425static bool matchDot4Pattern(Value *MulOp, Value *&A, Value *&B,
2426 bool IsSigned) {
2427 Value *Src0, *Src1;
2428 if (!match(MulOp, m_Mul(m_Value(Src0), m_Value(Src1))))
2429 return false;
2430
2431 // Check that result type is <4 x i32>
2433 if (!MulTy || MulTy->getNumElements() != 4 ||
2434 !MulTy->getElementType()->isIntegerTy(32))
2435 return false;
2436
2437 // Match zext or sext based on IsSigned
2438 Value *ExtSrc0, *ExtSrc1;
2439 if (IsSigned) {
2440 if (!match(Src0, m_SExt(m_Value(ExtSrc0))) || !isV4I8(ExtSrc0->getType()))
2441 return false;
2442 if (!match(Src1, m_SExt(m_Value(ExtSrc1))) || !isV4I8(ExtSrc1->getType()))
2443 return false;
2444 } else {
2445 if (!match(Src0, m_ZExt(m_Value(ExtSrc0))) || !isV4I8(ExtSrc0->getType()))
2446 return false;
2447 if (!match(Src1, m_ZExt(m_Value(ExtSrc1))) || !isV4I8(ExtSrc1->getType()))
2448 return false;
2449 }
2450
2451 A = ExtSrc0;
2452 B = ExtSrc1;
2453 return true;
2454}
2455
2456/// Try to convert vector.reduce.add(mul(zext/sext <4 x i8>, zext/sext <4 x
2457/// i8>)) to a dot4 intrinsic call (non-saturating case only).
2458bool AMDGPUCodeGenPrepareImpl::visitVectorReduceAdd(IntrinsicInst &I) {
2459 // Check if we have dot4 instructions available
2460 if (!ST.hasDot7Insts() || (!ST.hasDot1Insts() && !ST.hasDot8Insts()))
2461 return false;
2462
2463 Value *A = nullptr, *B = nullptr;
2464
2465 // Try unsigned first, then signed
2466 bool IsSigned = false;
2467 if (!matchDot4Pattern(I.getArgOperand(0), A, B, /*IsSigned=*/false)) {
2468 if (!matchDot4Pattern(I.getArgOperand(0), A, B, /*IsSigned=*/true))
2469 return false;
2470 IsSigned = true;
2471 }
2472
2473 LLVMContext &Ctx = I.getContext();
2474 Type *I32Ty = Type::getInt32Ty(Ctx);
2475 IRBuilder<> Builder(&I);
2476
2477 // Bitcast <4 x i8> to i32
2478 Value *ASrc = Builder.CreateBitCast(A, I32Ty);
2479 Value *BSrc = Builder.CreateBitCast(B, I32Ty);
2480
2481 // Non-saturating case: accumulator is 0, clamp is false
2482 Value *Acc = ConstantInt::get(I32Ty, 0);
2483 Value *Clamp = ConstantInt::getFalse(Ctx);
2484
2485 Intrinsic::ID DotIID =
2486 IsSigned ? Intrinsic::amdgcn_sdot4 : Intrinsic::amdgcn_udot4;
2487
2488 Value *Dot = Builder.CreateIntrinsic(DotIID, {}, {ASrc, BSrc, Acc, Clamp});
2489 Dot->takeName(&I);
2490
2491 I.replaceAllUsesWith(Dot);
2492 DeadVals.push_back(&I);
2493
2494 return true;
2495}
2496
2497/// Try to convert uadd.sat/sadd.sat(vector.reduce.add(mul(...)), c) to a
2498/// saturating dot4 intrinsic. This combine starts at the root (saturating add)
2499/// and looks at its operands.
2500bool AMDGPUCodeGenPrepareImpl::visitSaturatingAdd(IntrinsicInst &I) {
2501 // Check if we have dot4 instructions available
2502 if (!ST.hasDot7Insts() || (!ST.hasDot1Insts() && !ST.hasDot8Insts()))
2503 return false;
2504
2505 Intrinsic::ID IID = I.getIntrinsicID();
2506 bool IsSigned = (IID == Intrinsic::sadd_sat);
2507
2508 // Look for vector.reduce.add as one of the operands (commutative match)
2509 Value *Op0 = I.getArgOperand(0);
2510 Value *Op1 = I.getArgOperand(1);
2511 Value *MulOp = nullptr;
2512 Value *Accum = nullptr;
2513 IntrinsicInst *ReduceInst = nullptr;
2514
2516 ReduceInst = cast<IntrinsicInst>(Op0);
2517 Accum = Op1;
2518 } else if (match(Op1,
2520 ReduceInst = cast<IntrinsicInst>(Op1);
2521 Accum = Op0;
2522 } else {
2523 return false;
2524 }
2525
2526 Value *A = nullptr, *B = nullptr;
2527
2528 if (!matchDot4Pattern(MulOp, A, B, IsSigned))
2529 return false;
2530
2531 LLVMContext &Ctx = I.getContext();
2532 Type *I32Ty = Type::getInt32Ty(Ctx);
2533 IRBuilder<> Builder(&I);
2534
2535 // Bitcast <4 x i8> to i32
2536 Value *ASrc = Builder.CreateBitCast(A, I32Ty);
2537 Value *BSrc = Builder.CreateBitCast(B, I32Ty);
2538
2539 // Saturating case: use the accumulator and set clamp to true
2540 Value *Clamp = ConstantInt::getTrue(Ctx);
2541
2542 Intrinsic::ID DotIID =
2543 IsSigned ? Intrinsic::amdgcn_sdot4 : Intrinsic::amdgcn_udot4;
2544
2545 Value *Dot = Builder.CreateIntrinsic(DotIID, {}, {ASrc, BSrc, Accum, Clamp});
2546 Dot->takeName(&I);
2547
2548 I.replaceAllUsesWith(Dot);
2549 DeadVals.push_back(&I);
2550 // The reduce.add will be dead after this and cleaned up later
2551 if (ReduceInst->use_empty())
2552 DeadVals.push_back(ReduceInst);
2553
2554 return true;
2555}
2556
2557char AMDGPUCodeGenPrepare::ID = 0;
2558
2560 return new AMDGPUCodeGenPrepare();
2561}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static Value * insertValues(IRBuilder<> &Builder, Type *Ty, SmallVectorImpl< Value * > &Values)
static void extractValues(IRBuilder<> &Builder, SmallVectorImpl< Value * > &Values, Value *V)
static Value * getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS)
static bool isInterestingPHIIncomingValue(const Value *V)
static SelectInst * findSelectThroughCast(Value *V, CastInst *&Cast)
static bool matchDot4Pattern(Value *MulOp, Value *&A, Value *&B, bool IsSigned)
Helper to match the dot4 pattern: mul(zext/sext <4 x i8>, zext/sext <4 x i8>) Returns true if pattern...
static bool isV4I8(Type *Ty)
Check if type is <4 x i8>.
static std::pair< Value *, Value * > getMul64(IRBuilder<> &Builder, Value *LHS, Value *RHS)
static Value * emitRsqIEEE1ULP(IRBuilder<> &Builder, Value *Src, bool IsNegative)
Emit an expansion of 1.0 / sqrt(Src) good for 1ulp that supports denormals.
static Value * getSign32(Value *V, IRBuilder<> &Builder, const DataLayout DL)
static void collectPHINodes(const PHINode &I, SmallPtrSet< const PHINode *, 8 > &SeenPHIs)
static bool isPtrKnownNeverNull(const Value *V, const DataLayout &DL, const AMDGPUTargetMachine &TM, unsigned AS)
static bool areInSameBB(const Value *A, const Value *B)
static cl::opt< bool > WidenLoads("amdgpu-late-codegenprepare-widen-constant-loads", cl::desc("Widen sub-dword constant address space loads in " "AMDGPULateCodeGenPrepare"), cl::ReallyHidden, cl::init(true))
The AMDGPU TargetMachine interface definition for hw codegen targets.
@ Scaled
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:853
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
dxil translate DXIL Translate Metadata
static bool runOnFunction(Function &F, bool PostInlining)
#define DEBUG_TYPE
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
This file implements a set that has insertion order iteration characteristics.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static cl::opt< cl::boolOrDefault > EnableGlobalISelOption("global-isel", cl::Hidden, cl::desc("Enable the \"global\" instruction selector"))
Target-Independent Code Generator Pass Configuration Options pass.
This pass exposes codegen information to IR-level passes.
LLVM IR instance of the generic uniformity analysis.
Value * RHS
Value * LHS
BinaryOperator * Mul
VectorSlice(Type *Ty, unsigned Idx, unsigned NumElts)
Value * getSlicedVal(BasicBlock *BB, Value *Inc, StringRef NewValName)
Slice Inc according to the information contained within this slice.
PreservedAnalyses run(Function &, FunctionAnalysisManager &)
std::optional< unsigned > getReqdWorkGroupSize(const Function &F, unsigned Dim) const
bool hasWavefrontsEvenlySplittingXDim(const Function &F, bool REquiresUniformYZ=false) const
unsigned getWavefrontSize() const
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
Definition APFloat.h:1147
static APFloat getSmallestNormalized(const fltSemantics &Sem, bool Negative=false)
Returns the smallest (by magnitude) normalized finite number in the given semantics.
Definition APFloat.h:1217
opStatus next(bool nextDown)
Definition APFloat.h:1313
This class represents a conversion between pointers from one address space to another.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
Definition BasicBlock.h:237
BinaryOps getOpcode() const
Definition InstrTypes.h:374
BitVector & set()
Set all bits in the bitvector.
Definition BitVector.h:366
bool all() const
Returns true if all bits are set.
Definition BitVector.h:194
Represents analyses that only rely on functions' control flow.
Definition Analysis.h:73
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:537
static LLVM_ABI Constant * getInfinity(Type *Ty, bool Negative=false)
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
LLVM_ABI bool isExactlyValue(const APFloat &V) const
We don't rely on operator== working on double values, as it returns true for things that are clearly ...
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Analysis pass which computes a DominatorTree.
Definition Dominators.h:278
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition Operator.h:200
FastMathFlags getFastMathFlags() const
Convenience function for getting all the fast-math flags.
Definition Operator.h:333
LLVM_ABI float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:23
void setFast(bool B=true)
Definition FMF.h:99
bool noInfs() const
Definition FMF.h:69
bool allowReciprocal() const
Definition FMF.h:71
bool approxFunc() const
Definition FMF.h:73
void setNoNaNs(bool B=true)
Definition FMF.h:81
bool noNaNs() const
Definition FMF.h:68
bool allowContract() const
Definition FMF.h:72
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static LLVM_ABI FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition Type.cpp:873
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
bool isWave32() const
bool isWaveSizeKnown() const
Returns if the wavesize of this subtarget is known reliable.
bool hasFractBug() const
bool isUniformAtDef(ConstValueRefT V) const
Whether V is uniform/non-divergent at its definition.
CallInst * CreateFAbs(Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create call to the fabs intrinsic.
Definition IRBuilder.h:1048
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2627
Value * CreateFDiv(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition IRBuilder.h:1715
Value * CreateSIToFP(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2193
Value * CreateExtractElement(Value *Vec, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2615
IntegerType * getIntNTy(unsigned N)
Fetch the type representing an N-bit integer.
Definition IRBuilder.h:599
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition IRBuilder.h:2138
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2674
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > OverloadTypes, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="", ArrayRef< OperandBundleDef > OpBundles={})
Create a call to intrinsic ID with Args, mangled using OverloadTypes.
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateFPToUI(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2166
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2132
void SetCurrentDebugLocation(const DebugLoc &L)
Set location information used by debugging information.
Definition IRBuilder.h:247
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition IRBuilder.h:586
Value * CreateUIToFP(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2180
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Definition IRBuilder.h:352
Value * CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2429
Value * CreateNeg(Value *V, const Twine &Name="", bool HasNSW=false)
Definition IRBuilder.h:1842
LLVM_ABI Value * createIsFPClass(Value *FPNum, unsigned Test)
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:529
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1461
Value * CreateFMA(Value *Factor1, Value *Factor2, Value *Summand, FMFSource FMFSource={}, const Twine &Name="")
Create call to the fma intrinsic.
Definition IRBuilder.h:1115
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2242
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Definition IRBuilder.h:1918
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1533
FastMathFlags getFastMathFlags() const
Get the flags to be applied to created floating point ops.
Definition IRBuilder.h:341
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Definition IRBuilder.h:2120
Value * CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2414
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1592
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1444
Type * getFloatTy()
Fetch the type representing a 32-bit floating point value.
Definition IRBuilder.h:614
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2553
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2106
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1753
Value * CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:2386
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition IRBuilder.h:207
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition IRBuilder.h:1573
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1644
Value * CreateFMul(Value *L, Value *R, const Twine &Name="", MDNode *FPMD=nullptr)
Definition IRBuilder.h:1696
Value * CreateFNeg(Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1851
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="", bool IsDisjoint=false)
Definition IRBuilder.h:1614
Value * CreateSExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a SExt or Trunc from the integer value V to DestTy.
Definition IRBuilder.h:2153
Value * CreateFMulFMF(Value *L, Value *R, FMFSource FMFSource, const Twine &Name="", MDNode *FPMD=nullptr)
Definition IRBuilder.h:1701
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1478
Value * CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2424
Value * CreateFPToSI(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2173
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2858
Base class for instruction visitors.
Definition InstVisitor.h:78
LLVM_ABI void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1572
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserveSet()
Mark an analysis set as preserved.
Definition Analysis.h:151
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Represent a constant reference to a string, i.e.
Definition StringRef.h:56
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
static LLVM_ABI CastContextHint getCastContextHint(const Instruction *I)
Calculates a CastContextHint from I.
LLVM_ABI InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, TTI::CastContextHint CCH, TTI::TargetCostKind CostKind=TTI::TCK_SizeAndLatency, const Instruction *I=nullptr) const
@ TCK_RecipThroughput
Reciprocal throughput.
LLVM_ABI InstructionCost getArithmeticInstrCost(unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind=TTI::TCK_RecipThroughput, TTI::OperandValueInfo Opd1Info={TTI::OK_AnyValue, TTI::OP_None}, TTI::OperandValueInfo Opd2Info={TTI::OK_AnyValue, TTI::OP_None}, ArrayRef< const Value * > Args={}, const Instruction *CxtI=nullptr, const TargetLibraryInfo *TLibInfo=nullptr) const
This is an approximation of reciprocal throughput of a math/logic op.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
static LLVM_ABI IntegerType * getInt64Ty(LLVMContext &C)
Definition Type.cpp:314
LLVM_ABI unsigned getIntegerBitWidth() const
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:313
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
Definition Type.h:155
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:370
LLVM_ABI Type * getWithNewBitWidth(unsigned NewBitWidth) const
Given an integer or vector type, change the lane bitwidth to NewBitwidth, whilst keeping the old numb...
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
Definition Type.h:144
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
Definition Type.h:158
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:257
LLVM_ABI const fltSemantics & getFltSemantics() const
Definition Type.cpp:110
Analysis pass which computes UniformityInfo.
Legacy analysis pass which computes a CycleInfo.
void setOperand(unsigned i, Value *Val)
Definition User.h:212
Value * getOperand(unsigned i) const
Definition User.h:207
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:255
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:549
bool use_empty() const
Definition Value.h:346
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:399
Type * getElementType() const
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ CONSTANT_ADDRESS_32BIT
Address space for 32-bit constant memory.
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ PRIVATE_ADDRESS
Address space for private memory.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr int64_t getNullPointerValue(unsigned AS)
Get the null pointer value for the given address space.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition ISDOpcodes.h:522
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > OverloadTys={})
Look up the Function declaration of the intrinsic id in the Module M.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > m_UnordFMin(const LHS &L, const RHS &R)
Match an 'unordered' floating point minimum function.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
match_combine_or< typename m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty, typename m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty > m_FMinNum_or_FMinimumNum(const Opnd0 &Op0, const Opnd1 &Op1)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
match_deferred< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
ap_match< APFloat > m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
m_Intrinsic_Ty< Opnd0, Opnd1 >::Ty m_FMinimum(const Opnd0 &Op0, const Opnd1 &Op1)
auto m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
cstfp_pred_ty< is_nonnan > m_NonNaN()
Match a non-NaN FP constant.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
cstfp_pred_ty< is_signed_inf< false > > m_PosInf()
Match a positive infinity FP constant.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
Definition Metadata.h:668
constexpr double ln2
constexpr double ln10
This is an optimization pass for GlobalISel generic memory operations.
GenericUniformityInfo< SSAContext > UniformityInfo
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1738
LLVM_ABI bool RecursivelyDeleteTriviallyDeadInstructions(Value *V, const TargetLibraryInfo *TLI=nullptr, MemorySSAUpdater *MSSAU=nullptr, std::function< void(Value *)> AboutToDeleteCallback=std::function< void(Value *)>())
If the specified value is a trivially dead instruction, delete it.
Definition Local.cpp:535
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2553
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
LLVM_ABI bool expandRemainderUpTo64Bits(BinaryOperator *Rem)
Generate code to calculate the remainder of two integers, replacing Rem with the generated code.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:633
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition MathExtras.h:546
LLVM_ABI void ReplaceInstWithValue(BasicBlock::iterator &BI, Value *V)
Replace all uses of an instruction (specified by BI) with a value, then remove and delete the origina...
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition bit.h:362
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1745
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:403
auto reverse(ContainerTy &&C)
Definition STLExtras.h:407
LLVM_ABI bool expandDivisionUpTo64Bits(BinaryOperator *Div)
Generate code to divide two integers, replacing Div with the generated code.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
constexpr uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
FunctionPass * createAMDGPUCodeGenPreparePass()
To bit_cast(const From &from) noexcept
Definition bit.h:90
DWARFExpression::Operation Op
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI bool isKnownNeverNaN(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the floating-point scalar value is not a NaN or if the floating-point vector value has...
LLVM_ABI unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Get the upper bound on bit size for this Value Op as a signed integer.
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition Alignment.h:197
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
LLVM_ABI CGPassBuilderOption getCGPassBuilderOption()
#define N
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
static constexpr DenormalMode getPreserveSign()
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:106
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:262
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:103
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
LLVM_ABI bool isKnownNeverLogicalZero(DenormalMode Mode) const
Return true if it's known this can never be interpreted as a zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
const DataLayout & DL
const DominatorTree * DT
SimplifyQuery getWithInstruction(const Instruction *I) const
AssumptionCache * AC