Bug Summary

File:build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
Warning:line 747, column 24
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name InstCombineMulDivRem.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/InstCombine -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Transforms/InstCombine -I include -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-09-04-125545-48738-1 -x c++ /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
1//===- InstCombineMulDivRem.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
10// srem, urem, frem.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombineInternal.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/Analysis/InstructionSimplify.h"
18#include "llvm/Analysis/ValueTracking.h"
19#include "llvm/IR/BasicBlock.h"
20#include "llvm/IR/Constant.h"
21#include "llvm/IR/Constants.h"
22#include "llvm/IR/InstrTypes.h"
23#include "llvm/IR/Instruction.h"
24#include "llvm/IR/Instructions.h"
25#include "llvm/IR/IntrinsicInst.h"
26#include "llvm/IR/Intrinsics.h"
27#include "llvm/IR/Operator.h"
28#include "llvm/IR/PatternMatch.h"
29#include "llvm/IR/Type.h"
30#include "llvm/IR/Value.h"
31#include "llvm/Support/Casting.h"
32#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Transforms/InstCombine/InstCombiner.h"
34#include "llvm/Transforms/Utils/BuildLibCalls.h"
35#include <cassert>
36
37#define DEBUG_TYPE"instcombine" "instcombine"
38#include "llvm/Transforms/Utils/InstructionWorklist.h"
39
40using namespace llvm;
41using namespace PatternMatch;
42
43/// The specific integer value is used in a context where it is known to be
44/// non-zero. If this allows us to simplify the computation, do so and return
45/// the new operand, otherwise return null.
46static Value *simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC,
47 Instruction &CxtI) {
48 // If V has multiple uses, then we would have to do more analysis to determine
49 // if this is safe. For example, the use could be in dynamically unreached
50 // code.
51 if (!V->hasOneUse()) return nullptr;
52
53 bool MadeChange = false;
54
55 // ((1 << A) >>u B) --> (1 << (A-B))
56 // Because V cannot be zero, we know that B is less than A.
57 Value *A = nullptr, *B = nullptr, *One = nullptr;
58 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
59 match(One, m_One())) {
60 A = IC.Builder.CreateSub(A, B);
61 return IC.Builder.CreateShl(One, A);
62 }
63
64 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
65 // inexact. Similarly for <<.
66 BinaryOperator *I = dyn_cast<BinaryOperator>(V);
67 if (I && I->isLogicalShift() &&
68 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) {
69 // We know that this is an exact/nuw shift and that the input is a
70 // non-zero context as well.
71 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
72 IC.replaceOperand(*I, 0, V2);
73 MadeChange = true;
74 }
75
76 if (I->getOpcode() == Instruction::LShr && !I->isExact()) {
77 I->setIsExact();
78 MadeChange = true;
79 }
80
81 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) {
82 I->setHasNoUnsignedWrap();
83 MadeChange = true;
84 }
85 }
86
87 // TODO: Lots more we could do here:
88 // If V is a phi node, we can call this on each of its operands.
89 // "select cond, X, 0" can simplify to "X".
90
91 return MadeChange ? V : nullptr;
92}
93
94// TODO: This is a specific form of a much more general pattern.
95// We could detect a select with any binop identity constant, or we
96// could use SimplifyBinOp to see if either arm of the select reduces.
97// But that needs to be done carefully and/or while removing potential
98// reverse canonicalizations as in InstCombiner::foldSelectIntoOp().
99static Value *foldMulSelectToNegate(BinaryOperator &I,
100 InstCombiner::BuilderTy &Builder) {
101 Value *Cond, *OtherOp;
102
103 // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp
104 // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp
105 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_One(), m_AllOnes())),
106 m_Value(OtherOp)))) {
107 bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
108 Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
109 return Builder.CreateSelect(Cond, OtherOp, Neg);
110 }
111 // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp
112 // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp
113 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_AllOnes(), m_One())),
114 m_Value(OtherOp)))) {
115 bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
116 Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
117 return Builder.CreateSelect(Cond, Neg, OtherOp);
118 }
119
120 // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp
121 // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp
122 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(1.0),
123 m_SpecificFP(-1.0))),
124 m_Value(OtherOp)))) {
125 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
126 Builder.setFastMathFlags(I.getFastMathFlags());
127 return Builder.CreateSelect(Cond, OtherOp, Builder.CreateFNeg(OtherOp));
128 }
129
130 // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp
131 // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp
132 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(-1.0),
133 m_SpecificFP(1.0))),
134 m_Value(OtherOp)))) {
135 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
136 Builder.setFastMathFlags(I.getFastMathFlags());
137 return Builder.CreateSelect(Cond, Builder.CreateFNeg(OtherOp), OtherOp);
138 }
139
140 return nullptr;
141}
142
143Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
144 if (Value *V = simplifyMulInst(I.getOperand(0), I.getOperand(1),
145 SQ.getWithInstruction(&I)))
146 return replaceInstUsesWith(I, V);
147
148 if (SimplifyAssociativeOrCommutative(I))
149 return &I;
150
151 if (Instruction *X = foldVectorBinop(I))
152 return X;
153
154 if (Instruction *Phi = foldBinopWithPhiOperands(I))
155 return Phi;
156
157 if (Value *V = SimplifyUsingDistributiveLaws(I))
158 return replaceInstUsesWith(I, V);
159
160 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
161 unsigned BitWidth = I.getType()->getScalarSizeInBits();
162
163 // X * -1 == 0 - X
164 if (match(Op1, m_AllOnes())) {
165 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName());
166 if (I.hasNoSignedWrap())
167 BO->setHasNoSignedWrap();
168 return BO;
169 }
170
171 // Also allow combining multiply instructions on vectors.
172 {
173 Value *NewOp;
174 Constant *C1, *C2;
175 const APInt *IVal;
176 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)),
177 m_Constant(C1))) &&
178 match(C1, m_APInt(IVal))) {
179 // ((X << C2)*C1) == (X * (C1 << C2))
180 Constant *Shl = ConstantExpr::getShl(C1, C2);
181 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0));
182 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl);
183 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap())
184 BO->setHasNoUnsignedWrap();
185 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() &&
186 Shl->isNotMinSignedValue())
187 BO->setHasNoSignedWrap();
188 return BO;
189 }
190
191 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
192 // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
193 if (Constant *NewCst = ConstantExpr::getExactLogBase2(C1)) {
194 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst);
195
196 if (I.hasNoUnsignedWrap())
197 Shl->setHasNoUnsignedWrap();
198 if (I.hasNoSignedWrap()) {
199 const APInt *V;
200 if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1)
201 Shl->setHasNoSignedWrap();
202 }
203
204 return Shl;
205 }
206 }
207 }
208
209 if (Op0->hasOneUse() && match(Op1, m_NegatedPower2())) {
210 // Interpret X * (-1<<C) as (-X) * (1<<C) and try to sink the negation.
211 // The "* (1<<C)" thus becomes a potential shifting opportunity.
212 if (Value *NegOp0 = Negator::Negate(/*IsNegation*/ true, Op0, *this))
213 return BinaryOperator::CreateMul(
214 NegOp0, ConstantExpr::getNeg(cast<Constant>(Op1)), I.getName());
215 }
216
217 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
218 return FoldedMul;
219
220 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
221 return replaceInstUsesWith(I, FoldedMul);
222
223 // Simplify mul instructions with a constant RHS.
224 Constant *MulC;
225 if (match(Op1, m_ImmConstant(MulC))) {
226 // Canonicalize (X+C1)*MulC -> X*MulC+C1*MulC.
227 // Canonicalize (X|C1)*MulC -> X*MulC+C1*MulC.
228 Value *X;
229 Constant *C1;
230 if ((match(Op0, m_OneUse(m_Add(m_Value(X), m_ImmConstant(C1))))) ||
231 (match(Op0, m_OneUse(m_Or(m_Value(X), m_ImmConstant(C1)))) &&
232 haveNoCommonBitsSet(X, C1, DL, &AC, &I, &DT))) {
233 // C1*MulC simplifies to a tidier constant.
234 Value *NewC = Builder.CreateMul(C1, MulC);
235 auto *BOp0 = cast<BinaryOperator>(Op0);
236 bool Op0NUW =
237 (BOp0->getOpcode() == Instruction::Or || BOp0->hasNoUnsignedWrap());
238 Value *NewMul = Builder.CreateMul(X, MulC);
239 auto *BO = BinaryOperator::CreateAdd(NewMul, NewC);
240 if (I.hasNoUnsignedWrap() && Op0NUW) {
241 // If NewMulBO is constant we also can set BO to nuw.
242 if (auto *NewMulBO = dyn_cast<BinaryOperator>(NewMul))
243 NewMulBO->setHasNoUnsignedWrap();
244 BO->setHasNoUnsignedWrap();
245 }
246 return BO;
247 }
248 }
249
250 // abs(X) * abs(X) -> X * X
251 // nabs(X) * nabs(X) -> X * X
252 if (Op0 == Op1) {
253 Value *X, *Y;
254 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
255 if (SPF == SPF_ABS || SPF == SPF_NABS)
256 return BinaryOperator::CreateMul(X, X);
257
258 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
259 return BinaryOperator::CreateMul(X, X);
260 }
261
262 // -X * C --> X * -C
263 Value *X, *Y;
264 Constant *Op1C;
265 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C)))
266 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C));
267
268 // -X * -Y --> X * Y
269 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) {
270 auto *NewMul = BinaryOperator::CreateMul(X, Y);
271 if (I.hasNoSignedWrap() &&
272 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() &&
273 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap())
274 NewMul->setHasNoSignedWrap();
275 return NewMul;
276 }
277
278 // -X * Y --> -(X * Y)
279 // X * -Y --> -(X * Y)
280 if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))
281 return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y));
282
283 // (X / Y) * Y = X - (X % Y)
284 // (X / Y) * -Y = (X % Y) - X
285 {
286 Value *Y = Op1;
287 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0);
288 if (!Div || (Div->getOpcode() != Instruction::UDiv &&
289 Div->getOpcode() != Instruction::SDiv)) {
290 Y = Op0;
291 Div = dyn_cast<BinaryOperator>(Op1);
292 }
293 Value *Neg = dyn_castNegVal(Y);
294 if (Div && Div->hasOneUse() &&
295 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) &&
296 (Div->getOpcode() == Instruction::UDiv ||
297 Div->getOpcode() == Instruction::SDiv)) {
298 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1);
299
300 // If the division is exact, X % Y is zero, so we end up with X or -X.
301 if (Div->isExact()) {
302 if (DivOp1 == Y)
303 return replaceInstUsesWith(I, X);
304 return BinaryOperator::CreateNeg(X);
305 }
306
307 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
308 : Instruction::SRem;
309 // X must be frozen because we are increasing its number of uses.
310 Value *XFreeze = Builder.CreateFreeze(X, X->getName() + ".fr");
311 Value *Rem = Builder.CreateBinOp(RemOpc, XFreeze, DivOp1);
312 if (DivOp1 == Y)
313 return BinaryOperator::CreateSub(XFreeze, Rem);
314 return BinaryOperator::CreateSub(Rem, XFreeze);
315 }
316 }
317
318 // Fold the following two scenarios:
319 // 1) i1 mul -> i1 and.
320 // 2) X * Y --> X & Y, iff X, Y can be only {0,1}.
321 // Note: We could use known bits to generalize this and related patterns with
322 // shifts/truncs
323 Type *Ty = I.getType();
324 if (Ty->isIntOrIntVectorTy(1) ||
325 (match(Op0, m_And(m_Value(), m_One())) &&
326 match(Op1, m_And(m_Value(), m_One()))))
327 return BinaryOperator::CreateAnd(Op0, Op1);
328
329 // X*(1 << Y) --> X << Y
330 // (1 << Y)*X --> X << Y
331 {
332 Value *Y;
333 BinaryOperator *BO = nullptr;
334 bool ShlNSW = false;
335 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) {
336 BO = BinaryOperator::CreateShl(Op1, Y);
337 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap();
338 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) {
339 BO = BinaryOperator::CreateShl(Op0, Y);
340 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap();
341 }
342 if (BO) {
343 if (I.hasNoUnsignedWrap())
344 BO->setHasNoUnsignedWrap();
345 if (I.hasNoSignedWrap() && ShlNSW)
346 BO->setHasNoSignedWrap();
347 return BO;
348 }
349 }
350
351 // (zext bool X) * (zext bool Y) --> zext (and X, Y)
352 // (sext bool X) * (sext bool Y) --> zext (and X, Y)
353 // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same)
354 if (((match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
355 (match(Op0, m_SExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
356 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
357 (Op0->hasOneUse() || Op1->hasOneUse() || X == Y)) {
358 Value *And = Builder.CreateAnd(X, Y, "mulbool");
359 return CastInst::Create(Instruction::ZExt, And, Ty);
360 }
361 // (sext bool X) * (zext bool Y) --> sext (and X, Y)
362 // (zext bool X) * (sext bool Y) --> sext (and X, Y)
363 // Note: -1 * 1 == 1 * -1 == -1
364 if (((match(Op0, m_SExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
365 (match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
366 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
367 (Op0->hasOneUse() || Op1->hasOneUse())) {
368 Value *And = Builder.CreateAnd(X, Y, "mulbool");
369 return CastInst::Create(Instruction::SExt, And, Ty);
370 }
371
372 // (zext bool X) * Y --> X ? Y : 0
373 // Y * (zext bool X) --> X ? Y : 0
374 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
375 return SelectInst::Create(X, Op1, ConstantInt::getNullValue(Ty));
376 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
377 return SelectInst::Create(X, Op0, ConstantInt::getNullValue(Ty));
378
379 Constant *ImmC;
380 if (match(Op1, m_ImmConstant(ImmC))) {
381 // (sext bool X) * C --> X ? -C : 0
382 if (match(Op0, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
383 Constant *NegC = ConstantExpr::getNeg(ImmC);
384 return SelectInst::Create(X, NegC, ConstantInt::getNullValue(Ty));
385 }
386
387 // (ashr i32 X, 31) * C --> (X < 0) ? -C : 0
388 const APInt *C;
389 if (match(Op0, m_OneUse(m_AShr(m_Value(X), m_APInt(C)))) &&
390 *C == C->getBitWidth() - 1) {
391 Constant *NegC = ConstantExpr::getNeg(ImmC);
392 Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
393 return SelectInst::Create(IsNeg, NegC, ConstantInt::getNullValue(Ty));
394 }
395 }
396
397 // (lshr X, 31) * Y --> (X < 0) ? Y : 0
398 // TODO: We are not checking one-use because the elimination of the multiply
399 // is better for analysis?
400 const APInt *C;
401 if (match(&I, m_c_BinOp(m_LShr(m_Value(X), m_APInt(C)), m_Value(Y))) &&
402 *C == C->getBitWidth() - 1) {
403 Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
404 return SelectInst::Create(IsNeg, Y, ConstantInt::getNullValue(Ty));
405 }
406
407 // (and X, 1) * Y --> (trunc X) ? Y : 0
408 if (match(&I, m_c_BinOp(m_OneUse(m_And(m_Value(X), m_One())), m_Value(Y)))) {
409 Value *Tr = Builder.CreateTrunc(X, CmpInst::makeCmpResultType(Ty));
410 return SelectInst::Create(Tr, Y, ConstantInt::getNullValue(Ty));
411 }
412
413 // ((ashr X, 31) | 1) * X --> abs(X)
414 // X * ((ashr X, 31) | 1) --> abs(X)
415 if (match(&I, m_c_BinOp(m_Or(m_AShr(m_Value(X),
416 m_SpecificIntAllowUndef(BitWidth - 1)),
417 m_One()),
418 m_Deferred(X)))) {
419 Value *Abs = Builder.CreateBinaryIntrinsic(
420 Intrinsic::abs, X,
421 ConstantInt::getBool(I.getContext(), I.hasNoSignedWrap()));
422 Abs->takeName(&I);
423 return replaceInstUsesWith(I, Abs);
424 }
425
426 if (Instruction *Ext = narrowMathIfNoOverflow(I))
427 return Ext;
428
429 bool Changed = false;
430 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) {
431 Changed = true;
432 I.setHasNoSignedWrap(true);
433 }
434
435 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) {
436 Changed = true;
437 I.setHasNoUnsignedWrap(true);
438 }
439
440 return Changed ? &I : nullptr;
441}
442
443Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
444 BinaryOperator::BinaryOps Opcode = I.getOpcode();
445 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&(static_cast <bool> ((Opcode == Instruction::FMul || Opcode
== Instruction::FDiv) && "Expected fmul or fdiv") ? void
(0) : __assert_fail ("(Opcode == Instruction::FMul || Opcode == Instruction::FDiv) && \"Expected fmul or fdiv\""
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
446, __extension__ __PRETTY_FUNCTION__))
446 "Expected fmul or fdiv")(static_cast <bool> ((Opcode == Instruction::FMul || Opcode
== Instruction::FDiv) && "Expected fmul or fdiv") ? void
(0) : __assert_fail ("(Opcode == Instruction::FMul || Opcode == Instruction::FDiv) && \"Expected fmul or fdiv\""
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
446, __extension__ __PRETTY_FUNCTION__))
;
447
448 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
449 Value *X, *Y;
450
451 // -X * -Y --> X * Y
452 // -X / -Y --> X / Y
453 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
454 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, Y, &I);
455
456 // fabs(X) * fabs(X) -> X * X
457 // fabs(X) / fabs(X) -> X / X
458 if (Op0 == Op1 && match(Op0, m_FAbs(m_Value(X))))
459 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, X, &I);
460
461 // fabs(X) * fabs(Y) --> fabs(X * Y)
462 // fabs(X) / fabs(Y) --> fabs(X / Y)
463 if (match(Op0, m_FAbs(m_Value(X))) && match(Op1, m_FAbs(m_Value(Y))) &&
464 (Op0->hasOneUse() || Op1->hasOneUse())) {
465 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
466 Builder.setFastMathFlags(I.getFastMathFlags());
467 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
468 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY);
469 Fabs->takeName(&I);
470 return replaceInstUsesWith(I, Fabs);
471 }
472
473 return nullptr;
474}
475
476Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
477 if (Value *V = simplifyFMulInst(I.getOperand(0), I.getOperand(1),
478 I.getFastMathFlags(),
479 SQ.getWithInstruction(&I)))
480 return replaceInstUsesWith(I, V);
481
482 if (SimplifyAssociativeOrCommutative(I))
483 return &I;
484
485 if (Instruction *X = foldVectorBinop(I))
486 return X;
487
488 if (Instruction *Phi = foldBinopWithPhiOperands(I))
489 return Phi;
490
491 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
492 return FoldedMul;
493
494 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
495 return replaceInstUsesWith(I, FoldedMul);
496
497 if (Instruction *R = foldFPSignBitOps(I))
498 return R;
499
500 // X * -1.0 --> -X
501 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
502 if (match(Op1, m_SpecificFP(-1.0)))
503 return UnaryOperator::CreateFNegFMF(Op0, &I);
504
505 // -X * C --> X * -C
506 Value *X, *Y;
507 Constant *C;
508 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C)))
509 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
510 return BinaryOperator::CreateFMulFMF(X, NegC, &I);
511
512 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
513 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
514 return replaceInstUsesWith(I, V);
515
516 if (I.hasAllowReassoc()) {
517 // Reassociate constant RHS with another constant to form constant
518 // expression.
519 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) {
520 Constant *C1;
521 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) {
522 // (C1 / X) * C --> (C * C1) / X
523 Constant *CC1 =
524 ConstantFoldBinaryOpOperands(Instruction::FMul, C, C1, DL);
525 if (CC1 && CC1->isNormalFP())
526 return BinaryOperator::CreateFDivFMF(CC1, X, &I);
527 }
528 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
529 // (X / C1) * C --> X * (C / C1)
530 Constant *CDivC1 =
531 ConstantFoldBinaryOpOperands(Instruction::FDiv, C, C1, DL);
532 if (CDivC1 && CDivC1->isNormalFP())
533 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I);
534
535 // If the constant was a denormal, try reassociating differently.
536 // (X / C1) * C --> X / (C1 / C)
537 Constant *C1DivC =
538 ConstantFoldBinaryOpOperands(Instruction::FDiv, C1, C, DL);
539 if (C1DivC && Op0->hasOneUse() && C1DivC->isNormalFP())
540 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I);
541 }
542
543 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
544 // canonicalized to 'fadd X, C'. Distributing the multiply may allow
545 // further folds and (X * C) + C2 is 'fma'.
546 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) {
547 // (X + C1) * C --> (X * C) + (C * C1)
548 if (Constant *CC1 = ConstantFoldBinaryOpOperands(
549 Instruction::FMul, C, C1, DL)) {
550 Value *XC = Builder.CreateFMulFMF(X, C, &I);
551 return BinaryOperator::CreateFAddFMF(XC, CC1, &I);
552 }
553 }
554 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) {
555 // (C1 - X) * C --> (C * C1) - (X * C)
556 if (Constant *CC1 = ConstantFoldBinaryOpOperands(
557 Instruction::FMul, C, C1, DL)) {
558 Value *XC = Builder.CreateFMulFMF(X, C, &I);
559 return BinaryOperator::CreateFSubFMF(CC1, XC, &I);
560 }
561 }
562 }
563
564 Value *Z;
565 if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))),
566 m_Value(Z)))) {
567 // Sink division: (X / Y) * Z --> (X * Z) / Y
568 Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I);
569 return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I);
570 }
571
572 // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
573 // nnan disallows the possibility of returning a number if both operands are
574 // negative (in that case, we should return NaN).
575 if (I.hasNoNaNs() && match(Op0, m_OneUse(m_Sqrt(m_Value(X)))) &&
576 match(Op1, m_OneUse(m_Sqrt(m_Value(Y))))) {
577 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
578 Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I);
579 return replaceInstUsesWith(I, Sqrt);
580 }
581
582 // The following transforms are done irrespective of the number of uses
583 // for the expression "1.0/sqrt(X)".
584 // 1) 1.0/sqrt(X) * X -> X/sqrt(X)
585 // 2) X * 1.0/sqrt(X) -> X/sqrt(X)
586 // We always expect the backend to reduce X/sqrt(X) to sqrt(X), if it
587 // has the necessary (reassoc) fast-math-flags.
588 if (I.hasNoSignedZeros() &&
589 match(Op0, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
590 match(Y, m_Sqrt(m_Value(X))) && Op1 == X)
591 return BinaryOperator::CreateFDivFMF(X, Y, &I);
592 if (I.hasNoSignedZeros() &&
593 match(Op1, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
594 match(Y, m_Sqrt(m_Value(X))) && Op0 == X)
595 return BinaryOperator::CreateFDivFMF(X, Y, &I);
596
597 // Like the similar transform in instsimplify, this requires 'nsz' because
598 // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0.
599 if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 &&
600 Op0->hasNUses(2)) {
601 // Peek through fdiv to find squaring of square root:
602 // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y
603 if (match(Op0, m_FDiv(m_Value(X), m_Sqrt(m_Value(Y))))) {
604 Value *XX = Builder.CreateFMulFMF(X, X, &I);
605 return BinaryOperator::CreateFDivFMF(XX, Y, &I);
606 }
607 // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X)
608 if (match(Op0, m_FDiv(m_Sqrt(m_Value(Y)), m_Value(X)))) {
609 Value *XX = Builder.CreateFMulFMF(X, X, &I);
610 return BinaryOperator::CreateFDivFMF(Y, XX, &I);
611 }
612 }
613
614 if (I.isOnlyUserOfAnyOperand()) {
615 // pow(x, y) * pow(x, z) -> pow(x, y + z)
616 if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
617 match(Op1, m_Intrinsic<Intrinsic::pow>(m_Specific(X), m_Value(Z)))) {
618 auto *YZ = Builder.CreateFAddFMF(Y, Z, &I);
619 auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, YZ, &I);
620 return replaceInstUsesWith(I, NewPow);
621 }
622
623 // powi(x, y) * powi(x, z) -> powi(x, y + z)
624 if (match(Op0, m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y))) &&
625 match(Op1, m_Intrinsic<Intrinsic::powi>(m_Specific(X), m_Value(Z))) &&
626 Y->getType() == Z->getType()) {
627 auto *YZ = Builder.CreateAdd(Y, Z);
628 auto *NewPow = Builder.CreateIntrinsic(
629 Intrinsic::powi, {X->getType(), YZ->getType()}, {X, YZ}, &I);
630 return replaceInstUsesWith(I, NewPow);
631 }
632
633 // exp(X) * exp(Y) -> exp(X + Y)
634 if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
635 match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y)))) {
636 Value *XY = Builder.CreateFAddFMF(X, Y, &I);
637 Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I);
638 return replaceInstUsesWith(I, Exp);
639 }
640
641 // exp2(X) * exp2(Y) -> exp2(X + Y)
642 if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
643 match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y)))) {
644 Value *XY = Builder.CreateFAddFMF(X, Y, &I);
645 Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I);
646 return replaceInstUsesWith(I, Exp2);
647 }
648 }
649
650 // (X*Y) * X => (X*X) * Y where Y != X
651 // The purpose is two-fold:
652 // 1) to form a power expression (of X).
653 // 2) potentially shorten the critical path: After transformation, the
654 // latency of the instruction Y is amortized by the expression of X*X,
655 // and therefore Y is in a "less critical" position compared to what it
656 // was before the transformation.
657 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) &&
658 Op1 != Y) {
659 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I);
660 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
661 }
662 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) &&
663 Op0 != Y) {
664 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I);
665 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
666 }
667 }
668
669 // log2(X * 0.5) * Y = log2(X) * Y - Y
670 if (I.isFast()) {
671 IntrinsicInst *Log2 = nullptr;
672 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>(
673 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
674 Log2 = cast<IntrinsicInst>(Op0);
675 Y = Op1;
676 }
677 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>(
678 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
679 Log2 = cast<IntrinsicInst>(Op1);
680 Y = Op0;
681 }
682 if (Log2) {
683 Value *Log2 = Builder.CreateUnaryIntrinsic(Intrinsic::log2, X, &I);
684 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I);
685 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I);
686 }
687 }
688
689 // Simplify FMUL recurrences starting with 0.0 to 0.0 if nnan and nsz are set.
690 // Given a phi node with entry value as 0 and it used in fmul operation,
691 // we can replace fmul with 0 safely and eleminate loop operation.
692 PHINode *PN = nullptr;
693 Value *Start = nullptr, *Step = nullptr;
694 if (matchSimpleRecurrence(&I, PN, Start, Step) && I.hasNoNaNs() &&
695 I.hasNoSignedZeros() && match(Start, m_Zero()))
696 return replaceInstUsesWith(I, Start);
697
698 return nullptr;
699}
700
701/// Fold a divide or remainder with a select instruction divisor when one of the
702/// select operands is zero. In that case, we can use the other select operand
703/// because div/rem by zero is undefined.
704bool InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) {
705 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1));
706 if (!SI)
11
Assuming 'SI' is non-null
12
Taking false branch
707 return false;
708
709 int NonNullOperand;
710 if (match(SI->getTrueValue(), m_Zero()))
13
Assuming the condition is true
14
Taking true branch
711 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
712 NonNullOperand = 2;
713 else if (match(SI->getFalseValue(), m_Zero()))
714 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
715 NonNullOperand = 1;
716 else
717 return false;
718
719 // Change the div/rem to use 'Y' instead of the select.
720 replaceOperand(I, 1, SI->getOperand(NonNullOperand));
721
722 // Okay, we know we replace the operand of the div/rem with 'Y' with no
723 // problem. However, the select, or the condition of the select may have
724 // multiple uses. Based on our knowledge that the operand must be non-zero,
725 // propagate the known value for the select into other uses of it, and
726 // propagate a known value of the condition into its other users.
727
728 // If the select and condition only have a single use, don't bother with this,
729 // early exit.
730 Value *SelectCond = SI->getCondition();
731 if (SI->use_empty() && SelectCond->hasOneUse())
732 return true;
733
734 // Scan the current block backward, looking for other uses of SI.
735 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin();
736 Type *CondTy = SelectCond->getType();
737 while (BBI != BBFront) {
15
Loop condition is true. Entering loop body
23
Loop condition is true. Entering loop body
738 --BBI;
739 // If we found an instruction that we can't assume will return, so
740 // information from below it cannot be propagated above it.
741 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI))
16
Assuming the condition is false
17
Taking false branch
24
Assuming the condition is false
25
Taking false branch
742 break;
743
744 // Replace uses of the select or its condition with the known values.
745 for (Use &Op : BBI->operands()) {
18
Assuming '__begin2' is equal to '__end2'
26
Assuming '__begin2' is not equal to '__end2'
746 if (Op == SI) {
27
Assuming the condition is true
28
Taking true branch
747 replaceUse(Op, SI->getOperand(NonNullOperand));
29
Called C++ object pointer is null
748 Worklist.push(&*BBI);
749 } else if (Op == SelectCond) {
750 replaceUse(Op, NonNullOperand == 1 ? ConstantInt::getTrue(CondTy)
751 : ConstantInt::getFalse(CondTy));
752 Worklist.push(&*BBI);
753 }
754 }
755
756 // If we past the instruction, quit looking for it.
757 if (&*BBI == SI)
19
Assuming the condition is true
20
Taking true branch
758 SI = nullptr;
21
Null pointer value stored to 'SI'
759 if (&*BBI == SelectCond)
22
Assuming the condition is false
760 SelectCond = nullptr;
761
762 // If we ran out of things to eliminate, break out of the loop.
763 if (!SelectCond
22.1
'SelectCond' is non-null
&& !SI)
764 break;
765
766 }
767 return true;
768}
769
770/// True if the multiply can not be expressed in an int this size.
771static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
772 bool IsSigned) {
773 bool Overflow;
774 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow);
775 return Overflow;
776}
777
778/// True if C1 is a multiple of C2. Quotient contains C1/C2.
779static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
780 bool IsSigned) {
781 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal")(static_cast <bool> (C1.getBitWidth() == C2.getBitWidth
() && "Constant widths not equal") ? void (0) : __assert_fail
("C1.getBitWidth() == C2.getBitWidth() && \"Constant widths not equal\""
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
781, __extension__ __PRETTY_FUNCTION__))
;
782
783 // Bail if we will divide by zero.
784 if (C2.isZero())
785 return false;
786
787 // Bail if we would divide INT_MIN by -1.
788 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnes())
789 return false;
790
791 APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned);
792 if (IsSigned)
793 APInt::sdivrem(C1, C2, Quotient, Remainder);
794 else
795 APInt::udivrem(C1, C2, Quotient, Remainder);
796
797 return Remainder.isMinValue();
798}
799
800/// This function implements the transforms common to both integer division
801/// instructions (udiv and sdiv). It is called by the visitors to those integer
802/// division instructions.
803/// Common integer divide transforms
804Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
805 if (Instruction *Phi = foldBinopWithPhiOperands(I))
806 return Phi;
807
808 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
809 bool IsSigned = I.getOpcode() == Instruction::SDiv;
810 Type *Ty = I.getType();
811
812 // The RHS is known non-zero.
813 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
814 return replaceOperand(I, 1, V);
815
816 // Handle cases involving: [su]div X, (select Cond, Y, Z)
817 // This does not apply for fdiv.
818 if (simplifyDivRemOfSelectWithZeroOp(I))
819 return &I;
820
821 // If the divisor is a select-of-constants, try to constant fold all div ops:
822 // C / (select Cond, TrueC, FalseC) --> select Cond, (C / TrueC), (C / FalseC)
823 // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
824 if (match(Op0, m_ImmConstant()) &&
825 match(Op1, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
826 if (Instruction *R = FoldOpIntoSelect(I, cast<SelectInst>(Op1),
827 /*FoldWithMultiUse*/ true))
828 return R;
829 }
830
831 const APInt *C2;
832 if (match(Op1, m_APInt(C2))) {
833 Value *X;
834 const APInt *C1;
835
836 // (X / C1) / C2 -> X / (C1*C2)
837 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) ||
838 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) {
839 APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
840 if (!multiplyOverflows(*C1, *C2, Product, IsSigned))
841 return BinaryOperator::Create(I.getOpcode(), X,
842 ConstantInt::get(Ty, Product));
843 }
844
845 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) ||
846 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) {
847 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
848
849 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
850 if (isMultiple(*C2, *C1, Quotient, IsSigned)) {
851 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X,
852 ConstantInt::get(Ty, Quotient));
853 NewDiv->setIsExact(I.isExact());
854 return NewDiv;
855 }
856
857 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
858 if (isMultiple(*C1, *C2, Quotient, IsSigned)) {
859 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
860 ConstantInt::get(Ty, Quotient));
861 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
862 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
863 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
864 return Mul;
865 }
866 }
867
868 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
869 C1->ult(C1->getBitWidth() - 1)) ||
870 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))) &&
871 C1->ult(C1->getBitWidth()))) {
872 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
873 APInt C1Shifted = APInt::getOneBitSet(
874 C1->getBitWidth(), static_cast<unsigned>(C1->getZExtValue()));
875
876 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
877 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
878 auto *BO = BinaryOperator::Create(I.getOpcode(), X,
879 ConstantInt::get(Ty, Quotient));
880 BO->setIsExact(I.isExact());
881 return BO;
882 }
883
884 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
885 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
886 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
887 ConstantInt::get(Ty, Quotient));
888 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
889 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
890 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
891 return Mul;
892 }
893 }
894
895 if (!C2->isZero()) // avoid X udiv 0
896 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I))
897 return FoldedDiv;
898 }
899
900 if (match(Op0, m_One())) {
901 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?")(static_cast <bool> (!Ty->isIntOrIntVectorTy(1) &&
"i1 divide not removed?") ? void (0) : __assert_fail ("!Ty->isIntOrIntVectorTy(1) && \"i1 divide not removed?\""
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
901, __extension__ __PRETTY_FUNCTION__))
;
902 if (IsSigned) {
903 // 1 / 0 --> undef ; 1 / 1 --> 1 ; 1 / -1 --> -1 ; 1 / anything else --> 0
904 // (Op1 + 1) u< 3 ? Op1 : 0
905 // Op1 must be frozen because we are increasing its number of uses.
906 Value *F1 = Builder.CreateFreeze(Op1, Op1->getName() + ".fr");
907 Value *Inc = Builder.CreateAdd(F1, Op0);
908 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
909 return SelectInst::Create(Cmp, F1, ConstantInt::get(Ty, 0));
910 } else {
911 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
912 // result is one, otherwise it's zero.
913 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty);
914 }
915 }
916
917 // See if we can fold away this div instruction.
918 if (SimplifyDemandedInstructionBits(I))
919 return &I;
920
921 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
922 Value *X, *Z;
923 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1
924 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
925 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1)))))
926 return BinaryOperator::Create(I.getOpcode(), X, Op1);
927
928 // (X << Y) / X -> 1 << Y
929 Value *Y;
930 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y))))
931 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y);
932 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y))))
933 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y);
934
935 // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
936 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) {
937 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap();
938 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap();
939 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
940 replaceOperand(I, 0, ConstantInt::get(Ty, 1));
941 replaceOperand(I, 1, Y);
942 return &I;
943 }
944 }
945
946 return nullptr;
947}
948
949static const unsigned MaxDepth = 6;
950
951// Take the exact integer log2 of the value. If DoFold is true, create the
952// actual instructions, otherwise return a non-null dummy value. Return nullptr
953// on failure.
954static Value *takeLog2(IRBuilderBase &Builder, Value *Op, unsigned Depth,
955 bool DoFold) {
956 auto IfFold = [DoFold](function_ref<Value *()> Fn) {
957 if (!DoFold)
958 return reinterpret_cast<Value *>(-1);
959 return Fn();
960 };
961
962 // FIXME: assert that Op1 isn't/doesn't contain undef.
963
964 // log2(2^C) -> C
965 if (match(Op, m_Power2()))
966 return IfFold([&]() {
967 Constant *C = ConstantExpr::getExactLogBase2(cast<Constant>(Op));
968 if (!C)
969 llvm_unreachable("Failed to constant fold udiv -> logbase2")::llvm::llvm_unreachable_internal("Failed to constant fold udiv -> logbase2"
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
969)
;
970 return C;
971 });
972
973 // The remaining tests are all recursive, so bail out if we hit the limit.
974 if (Depth++ == MaxDepth)
975 return nullptr;
976
977 // log2(zext X) -> zext log2(X)
978 // FIXME: Require one use?
979 Value *X, *Y;
980 if (match(Op, m_ZExt(m_Value(X))))
981 if (Value *LogX = takeLog2(Builder, X, Depth, DoFold))
982 return IfFold([&]() { return Builder.CreateZExt(LogX, Op->getType()); });
983
984 // log2(X << Y) -> log2(X) + Y
985 // FIXME: Require one use unless X is 1?
986 if (match(Op, m_Shl(m_Value(X), m_Value(Y))))
987 if (Value *LogX = takeLog2(Builder, X, Depth, DoFold))
988 return IfFold([&]() { return Builder.CreateAdd(LogX, Y); });
989
990 // log2(Cond ? X : Y) -> Cond ? log2(X) : log2(Y)
991 // FIXME: missed optimization: if one of the hands of select is/contains
992 // undef, just directly pick the other one.
993 // FIXME: can both hands contain undef?
994 // FIXME: Require one use?
995 if (SelectInst *SI = dyn_cast<SelectInst>(Op))
996 if (Value *LogX = takeLog2(Builder, SI->getOperand(1), Depth, DoFold))
997 if (Value *LogY = takeLog2(Builder, SI->getOperand(2), Depth, DoFold))
998 return IfFold([&]() {
999 return Builder.CreateSelect(SI->getOperand(0), LogX, LogY);
1000 });
1001
1002 // log2(umin(X, Y)) -> umin(log2(X), log2(Y))
1003 // log2(umax(X, Y)) -> umax(log2(X), log2(Y))
1004 auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op);
1005 if (MinMax && MinMax->hasOneUse() && !MinMax->isSigned())
1006 if (Value *LogX = takeLog2(Builder, MinMax->getLHS(), Depth, DoFold))
1007 if (Value *LogY = takeLog2(Builder, MinMax->getRHS(), Depth, DoFold))
1008 return IfFold([&]() {
1009 return Builder.CreateBinaryIntrinsic(
1010 MinMax->getIntrinsicID(), LogX, LogY);
1011 });
1012
1013 return nullptr;
1014}
1015
1016/// If we have zero-extended operands of an unsigned div or rem, we may be able
1017/// to narrow the operation (sink the zext below the math).
1018static Instruction *narrowUDivURem(BinaryOperator &I,
1019 InstCombiner::BuilderTy &Builder) {
1020 Instruction::BinaryOps Opcode = I.getOpcode();
1021 Value *N = I.getOperand(0);
1022 Value *D = I.getOperand(1);
1023 Type *Ty = I.getType();
1024 Value *X, *Y;
1025 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) &&
1026 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) {
1027 // udiv (zext X), (zext Y) --> zext (udiv X, Y)
1028 // urem (zext X), (zext Y) --> zext (urem X, Y)
1029 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y);
1030 return new ZExtInst(NarrowOp, Ty);
1031 }
1032
1033 Constant *C;
1034 if (isa<Instruction>(N) && match(N, m_OneUse(m_ZExt(m_Value(X)))) &&
1035 match(D, m_Constant(C))) {
1036 // If the constant is the same in the smaller type, use the narrow version.
1037 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
1038 if (ConstantExpr::getZExt(TruncC, Ty) != C)
1039 return nullptr;
1040
1041 // udiv (zext X), C --> zext (udiv X, C')
1042 // urem (zext X), C --> zext (urem X, C')
1043 return new ZExtInst(Builder.CreateBinOp(Opcode, X, TruncC), Ty);
1044 }
1045 if (isa<Instruction>(D) && match(D, m_OneUse(m_ZExt(m_Value(X)))) &&
1046 match(N, m_Constant(C))) {
1047 // If the constant is the same in the smaller type, use the narrow version.
1048 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
1049 if (ConstantExpr::getZExt(TruncC, Ty) != C)
1050 return nullptr;
1051
1052 // udiv C, (zext X) --> zext (udiv C', X)
1053 // urem C, (zext X) --> zext (urem C', X)
1054 return new ZExtInst(Builder.CreateBinOp(Opcode, TruncC, X), Ty);
1055 }
1056
1057 return nullptr;
1058}
1059
1060Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
1061 if (Value *V = simplifyUDivInst(I.getOperand(0), I.getOperand(1),
1062 SQ.getWithInstruction(&I)))
1063 return replaceInstUsesWith(I, V);
1064
1065 if (Instruction *X = foldVectorBinop(I))
1066 return X;
1067
1068 // Handle the integer div common cases
1069 if (Instruction *Common = commonIDivTransforms(I))
1070 return Common;
1071
1072 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1073 Value *X;
1074 const APInt *C1, *C2;
1075 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) {
1076 // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
1077 bool Overflow;
1078 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow);
1079 if (!Overflow) {
1080 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value()));
1081 BinaryOperator *BO = BinaryOperator::CreateUDiv(
1082 X, ConstantInt::get(X->getType(), C2ShlC1));
1083 if (IsExact)
1084 BO->setIsExact();
1085 return BO;
1086 }
1087 }
1088
1089 // Op0 / C where C is large (negative) --> zext (Op0 >= C)
1090 // TODO: Could use isKnownNegative() to handle non-constant values.
1091 Type *Ty = I.getType();
1092 if (match(Op1, m_Negative())) {
1093 Value *Cmp = Builder.CreateICmpUGE(Op0, Op1);
1094 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1095 }
1096 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
1097 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1098 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1099 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1100 }
1101
1102 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder))
1103 return NarrowDiv;
1104
1105 // If the udiv operands are non-overflowing multiplies with a common operand,
1106 // then eliminate the common factor:
1107 // (A * B) / (A * X) --> B / X (and commuted variants)
1108 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching.
1109 // TODO: If -reassociation handled this generally, we could remove this.
1110 Value *A, *B;
1111 if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) {
1112 if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) ||
1113 match(Op1, m_NUWMul(m_Value(X), m_Specific(A))))
1114 return BinaryOperator::CreateUDiv(B, X);
1115 if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) ||
1116 match(Op1, m_NUWMul(m_Value(X), m_Specific(B))))
1117 return BinaryOperator::CreateUDiv(A, X);
1118 }
1119
1120 // Op1 udiv Op2 -> Op1 lshr log2(Op2), if log2() folds away.
1121 if (takeLog2(Builder, Op1, /*Depth*/0, /*DoFold*/false)) {
1122 Value *Res = takeLog2(Builder, Op1, /*Depth*/0, /*DoFold*/true);
1123 return replaceInstUsesWith(
1124 I, Builder.CreateLShr(Op0, Res, I.getName(), I.isExact()));
1125 }
1126
1127 return nullptr;
1128}
1129
1130Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
1131 if (Value *V = simplifySDivInst(I.getOperand(0), I.getOperand(1),
1132 SQ.getWithInstruction(&I)))
1133 return replaceInstUsesWith(I, V);
1134
1135 if (Instruction *X = foldVectorBinop(I))
1136 return X;
1137
1138 // Handle the integer div common cases
1139 if (Instruction *Common = commonIDivTransforms(I))
1140 return Common;
1141
1142 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1143 Type *Ty = I.getType();
1144 Value *X;
1145 // sdiv Op0, -1 --> -Op0
1146 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
1147 if (match(Op1, m_AllOnes()) ||
1148 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1149 return BinaryOperator::CreateNeg(Op0);
1150
1151 // X / INT_MIN --> X == INT_MIN
1152 if (match(Op1, m_SignMask()))
1153 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), Ty);
1154
1155 // sdiv exact X, 1<<C --> ashr exact X, C iff 1<<C is non-negative
1156 // sdiv exact X, -1<<C --> -(ashr exact X, C)
1157 if (I.isExact() && ((match(Op1, m_Power2()) && match(Op1, m_NonNegative())) ||
1158 match(Op1, m_NegatedPower2()))) {
1159 bool DivisorWasNegative = match(Op1, m_NegatedPower2());
1160 if (DivisorWasNegative)
1161 Op1 = ConstantExpr::getNeg(cast<Constant>(Op1));
1162 auto *AShr = BinaryOperator::CreateExactAShr(
1163 Op0, ConstantExpr::getExactLogBase2(cast<Constant>(Op1)), I.getName());
1164 if (!DivisorWasNegative)
1165 return AShr;
1166 Builder.Insert(AShr);
1167 AShr->setName(I.getName() + ".neg");
1168 return BinaryOperator::CreateNeg(AShr, I.getName());
1169 }
1170
1171 const APInt *Op1C;
1172 if (match(Op1, m_APInt(Op1C))) {
1173 // If the dividend is sign-extended and the constant divisor is small enough
1174 // to fit in the source type, shrink the division to the narrower type:
1175 // (sext X) sdiv C --> sext (X sdiv C)
1176 Value *Op0Src;
1177 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) &&
1178 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) {
1179
1180 // In the general case, we need to make sure that the dividend is not the
1181 // minimum signed value because dividing that by -1 is UB. But here, we
1182 // know that the -1 divisor case is already handled above.
1183
1184 Constant *NarrowDivisor =
1185 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
1186 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
1187 return new SExtInst(NarrowOp, Ty);
1188 }
1189
1190 // -X / C --> X / -C (if the negation doesn't overflow).
1191 // TODO: This could be enhanced to handle arbitrary vector constants by
1192 // checking if all elements are not the min-signed-val.
1193 if (!Op1C->isMinSignedValue() &&
1194 match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) {
1195 Constant *NegC = ConstantInt::get(Ty, -(*Op1C));
1196 Instruction *BO = BinaryOperator::CreateSDiv(X, NegC);
1197 BO->setIsExact(I.isExact());
1198 return BO;
1199 }
1200 }
1201
1202 // -X / Y --> -(X / Y)
1203 Value *Y;
1204 if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1205 return BinaryOperator::CreateNSWNeg(
1206 Builder.CreateSDiv(X, Y, I.getName(), I.isExact()));
1207
1208 // abs(X) / X --> X > -1 ? 1 : -1
1209 // X / abs(X) --> X > -1 ? 1 : -1
1210 if (match(&I, m_c_BinOp(
1211 m_OneUse(m_Intrinsic<Intrinsic::abs>(m_Value(X), m_One())),
1212 m_Deferred(X)))) {
1213 Value *Cond = Builder.CreateIsNotNeg(X);
1214 return SelectInst::Create(Cond, ConstantInt::get(Ty, 1),
1215 ConstantInt::getAllOnesValue(Ty));
1216 }
1217
1218 // If the sign bits of both operands are zero (i.e. we can prove they are
1219 // unsigned inputs), turn this into a udiv.
1220 APInt Mask(APInt::getSignMask(Ty->getScalarSizeInBits()));
1221 if (MaskedValueIsZero(Op0, Mask, 0, &I)) {
1222 if (MaskedValueIsZero(Op1, Mask, 0, &I)) {
1223 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
1224 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1225 BO->setIsExact(I.isExact());
1226 return BO;
1227 }
1228
1229 if (match(Op1, m_NegatedPower2())) {
1230 // X sdiv (-(1 << C)) -> -(X sdiv (1 << C)) ->
1231 // -> -(X udiv (1 << C)) -> -(X u>> C)
1232 Constant *CNegLog2 = ConstantExpr::getExactLogBase2(
1233 ConstantExpr::getNeg(cast<Constant>(Op1)));
1234 Value *Shr = Builder.CreateLShr(Op0, CNegLog2, I.getName(), I.isExact());
1235 return BinaryOperator::CreateNeg(Shr);
1236 }
1237
1238 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1239 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
1240 // Safe because the only negative value (1 << Y) can take on is
1241 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
1242 // the sign bit set.
1243 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1244 BO->setIsExact(I.isExact());
1245 return BO;
1246 }
1247 }
1248
1249 return nullptr;
1250}
1251
1252/// Remove negation and try to convert division into multiplication.
1253static Instruction *foldFDivConstantDivisor(BinaryOperator &I) {
1254 Constant *C;
1255 if (!match(I.getOperand(1), m_Constant(C)))
1256 return nullptr;
1257
1258 // -X / C --> X / -C
1259 Value *X;
1260 const DataLayout &DL = I.getModule()->getDataLayout();
1261 if (match(I.getOperand(0), m_FNeg(m_Value(X))))
1262 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
1263 return BinaryOperator::CreateFDivFMF(X, NegC, &I);
1264
1265 // If the constant divisor has an exact inverse, this is always safe. If not,
1266 // then we can still create a reciprocal if fast-math-flags allow it and the
1267 // constant is a regular number (not zero, infinite, or denormal).
1268 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP())))
1269 return nullptr;
1270
1271 // Disallow denormal constants because we don't know what would happen
1272 // on all targets.
1273 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1274 // denorms are flushed?
1275 auto *RecipC = ConstantFoldBinaryOpOperands(
1276 Instruction::FDiv, ConstantFP::get(I.getType(), 1.0), C, DL);
1277 if (!RecipC || !RecipC->isNormalFP())
1278 return nullptr;
1279
1280 // X / C --> X * (1 / C)
1281 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I);
1282}
1283
1284/// Remove negation and try to reassociate constant math.
1285static Instruction *foldFDivConstantDividend(BinaryOperator &I) {
1286 Constant *C;
1287 if (!match(I.getOperand(0), m_Constant(C)))
1288 return nullptr;
1289
1290 // C / -X --> -C / X
1291 Value *X;
1292 const DataLayout &DL = I.getModule()->getDataLayout();
1293 if (match(I.getOperand(1), m_FNeg(m_Value(X))))
1294 if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
1295 return BinaryOperator::CreateFDivFMF(NegC, X, &I);
1296
1297 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
1298 return nullptr;
1299
1300 // Try to reassociate C / X expressions where X includes another constant.
1301 Constant *C2, *NewC = nullptr;
1302 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) {
1303 // C / (X * C2) --> (C / C2) / X
1304 NewC = ConstantFoldBinaryOpOperands(Instruction::FDiv, C, C2, DL);
1305 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) {
1306 // C / (X / C2) --> (C * C2) / X
1307 NewC = ConstantFoldBinaryOpOperands(Instruction::FMul, C, C2, DL);
1308 }
1309 // Disallow denormal constants because we don't know what would happen
1310 // on all targets.
1311 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1312 // denorms are flushed?
1313 if (!NewC || !NewC->isNormalFP())
1314 return nullptr;
1315
1316 return BinaryOperator::CreateFDivFMF(NewC, X, &I);
1317}
1318
1319/// Negate the exponent of pow/exp to fold division-by-pow() into multiply.
1320static Instruction *foldFDivPowDivisor(BinaryOperator &I,
1321 InstCombiner::BuilderTy &Builder) {
1322 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1323 auto *II = dyn_cast<IntrinsicInst>(Op1);
1324 if (!II || !II->hasOneUse() || !I.hasAllowReassoc() ||
1325 !I.hasAllowReciprocal())
1326 return nullptr;
1327
1328 // Z / pow(X, Y) --> Z * pow(X, -Y)
1329 // Z / exp{2}(Y) --> Z * exp{2}(-Y)
1330 // In the general case, this creates an extra instruction, but fmul allows
1331 // for better canonicalization and optimization than fdiv.
1332 Intrinsic::ID IID = II->getIntrinsicID();
1333 SmallVector<Value *> Args;
1334 switch (IID) {
1335 case Intrinsic::pow:
1336 Args.push_back(II->getArgOperand(0));
1337 Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(1), &I));
1338 break;
1339 case Intrinsic::powi: {
1340 // Require 'ninf' assuming that makes powi(X, -INT_MIN) acceptable.
1341 // That is, X ** (huge negative number) is 0.0, ~1.0, or INF and so
1342 // dividing by that is INF, ~1.0, or 0.0. Code that uses powi allows
1343 // non-standard results, so this corner case should be acceptable if the
1344 // code rules out INF values.
1345 if (!I.hasNoInfs())
1346 return nullptr;
1347 Args.push_back(II->getArgOperand(0));
1348 Args.push_back(Builder.CreateNeg(II->getArgOperand(1)));
1349 Type *Tys[] = {I.getType(), II->getArgOperand(1)->getType()};
1350 Value *Pow = Builder.CreateIntrinsic(IID, Tys, Args, &I);
1351 return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
1352 }
1353 case Intrinsic::exp:
1354 case Intrinsic::exp2:
1355 Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(0), &I));
1356 break;
1357 default:
1358 return nullptr;
1359 }
1360 Value *Pow = Builder.CreateIntrinsic(IID, I.getType(), Args, &I);
1361 return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
1362}
1363
1364Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
1365 Module *M = I.getModule();
1366
1367 if (Value *V = simplifyFDivInst(I.getOperand(0), I.getOperand(1),
1368 I.getFastMathFlags(),
1369 SQ.getWithInstruction(&I)))
1370 return replaceInstUsesWith(I, V);
1371
1372 if (Instruction *X = foldVectorBinop(I))
1373 return X;
1374
1375 if (Instruction *Phi = foldBinopWithPhiOperands(I))
1376 return Phi;
1377
1378 if (Instruction *R = foldFDivConstantDivisor(I))
1379 return R;
1380
1381 if (Instruction *R = foldFDivConstantDividend(I))
1382 return R;
1383
1384 if (Instruction *R = foldFPSignBitOps(I))
1385 return R;
1386
1387 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1388 if (isa<Constant>(Op0))
1389 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1390 if (Instruction *R = FoldOpIntoSelect(I, SI))
1391 return R;
1392
1393 if (isa<Constant>(Op1))
1394 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1395 if (Instruction *R = FoldOpIntoSelect(I, SI))
1396 return R;
1397
1398 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) {
1399 Value *X, *Y;
1400 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1401 (!isa<Constant>(Y) || !isa<Constant>(Op1))) {
1402 // (X / Y) / Z => X / (Y * Z)
1403 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I);
1404 return BinaryOperator::CreateFDivFMF(X, YZ, &I);
1405 }
1406 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1407 (!isa<Constant>(Y) || !isa<Constant>(Op0))) {
1408 // Z / (X / Y) => (Y * Z) / X
1409 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I);
1410 return BinaryOperator::CreateFDivFMF(YZ, X, &I);
1411 }
1412 // Z / (1.0 / Y) => (Y * Z)
1413 //
1414 // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The
1415 // m_OneUse check is avoided because even in the case of the multiple uses
1416 // for 1.0/Y, the number of instructions remain the same and a division is
1417 // replaced by a multiplication.
1418 if (match(Op1, m_FDiv(m_SpecificFP(1.0), m_Value(Y))))
1419 return BinaryOperator::CreateFMulFMF(Y, Op0, &I);
1420 }
1421
1422 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) {
1423 // sin(X) / cos(X) -> tan(X)
1424 // cos(X) / sin(X) -> 1/tan(X) (cotangent)
1425 Value *X;
1426 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) &&
1427 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X)));
1428 bool IsCot =
1429 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) &&
1430 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X)));
1431
1432 if ((IsTan || IsCot) && hasFloatFn(M, &TLI, I.getType(), LibFunc_tan,
1433 LibFunc_tanf, LibFunc_tanl)) {
1434 IRBuilder<> B(&I);
1435 IRBuilder<>::FastMathFlagGuard FMFGuard(B);
1436 B.setFastMathFlags(I.getFastMathFlags());
1437 AttributeList Attrs =
1438 cast<CallBase>(Op0)->getCalledFunction()->getAttributes();
1439 Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf,
1440 LibFunc_tanl, B, Attrs);
1441 if (IsCot)
1442 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res);
1443 return replaceInstUsesWith(I, Res);
1444 }
1445 }
1446
1447 // X / (X * Y) --> 1.0 / Y
1448 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
1449 // We can ignore the possibility that X is infinity because INF/INF is NaN.
1450 Value *X, *Y;
1451 if (I.hasNoNaNs() && I.hasAllowReassoc() &&
1452 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) {
1453 replaceOperand(I, 0, ConstantFP::get(I.getType(), 1.0));
1454 replaceOperand(I, 1, Y);
1455 return &I;
1456 }
1457
1458 // X / fabs(X) -> copysign(1.0, X)
1459 // fabs(X) / X -> copysign(1.0, X)
1460 if (I.hasNoNaNs() && I.hasNoInfs() &&
1461 (match(&I, m_FDiv(m_Value(X), m_FAbs(m_Deferred(X)))) ||
1462 match(&I, m_FDiv(m_FAbs(m_Value(X)), m_Deferred(X))))) {
1463 Value *V = Builder.CreateBinaryIntrinsic(
1464 Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I);
1465 return replaceInstUsesWith(I, V);
1466 }
1467
1468 if (Instruction *Mul = foldFDivPowDivisor(I, Builder))
1469 return Mul;
1470
1471 return nullptr;
1472}
1473
1474/// This function implements the transforms common to both integer remainder
1475/// instructions (urem and srem). It is called by the visitors to those integer
1476/// remainder instructions.
1477/// Common integer remainder transforms
1478Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) {
1479 if (Instruction *Phi = foldBinopWithPhiOperands(I))
6
Assuming 'Phi' is null
7
Taking false branch
1480 return Phi;
1481
1482 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1483
1484 // The RHS is known non-zero.
1485 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
8
Assuming 'V' is null
9
Taking false branch
1486 return replaceOperand(I, 1, V);
1487
1488 // Handle cases involving: rem X, (select Cond, Y, Z)
1489 if (simplifyDivRemOfSelectWithZeroOp(I))
10
Calling 'InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp'
1490 return &I;
1491
1492 // If the divisor is a select-of-constants, try to constant fold all rem ops:
1493 // C % (select Cond, TrueC, FalseC) --> select Cond, (C % TrueC), (C % FalseC)
1494 // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
1495 if (match(Op0, m_ImmConstant()) &&
1496 match(Op1, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
1497 if (Instruction *R = FoldOpIntoSelect(I, cast<SelectInst>(Op1),
1498 /*FoldWithMultiUse*/ true))
1499 return R;
1500 }
1501
1502 if (isa<Constant>(Op1)) {
1503 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
1504 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
1505 if (Instruction *R = FoldOpIntoSelect(I, SI))
1506 return R;
1507 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) {
1508 const APInt *Op1Int;
1509 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() &&
1510 (I.getOpcode() == Instruction::URem ||
1511 !Op1Int->isMinSignedValue())) {
1512 // foldOpIntoPhi will speculate instructions to the end of the PHI's
1513 // predecessor blocks, so do this only if we know the srem or urem
1514 // will not fault.
1515 if (Instruction *NV = foldOpIntoPhi(I, PN))
1516 return NV;
1517 }
1518 }
1519
1520 // See if we can fold away this rem instruction.
1521 if (SimplifyDemandedInstructionBits(I))
1522 return &I;
1523 }
1524 }
1525
1526 return nullptr;
1527}
1528
1529Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
1530 if (Value *V = simplifyURemInst(I.getOperand(0), I.getOperand(1),
1
Assuming 'V' is null
2
Taking false branch
1531 SQ.getWithInstruction(&I)))
1532 return replaceInstUsesWith(I, V);
1533
1534 if (Instruction *X = foldVectorBinop(I))
3
Assuming 'X' is null
4
Taking false branch
1535 return X;
1536
1537 if (Instruction *common = commonIRemTransforms(I))
5
Calling 'InstCombinerImpl::commonIRemTransforms'
1538 return common;
1539
1540 if (Instruction *NarrowRem = narrowUDivURem(I, Builder))
1541 return NarrowRem;
1542
1543 // X urem Y -> X and Y-1, where Y is a power of 2,
1544 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1545 Type *Ty = I.getType();
1546 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1547 // This may increase instruction count, we don't enforce that Y is a
1548 // constant.
1549 Constant *N1 = Constant::getAllOnesValue(Ty);
1550 Value *Add = Builder.CreateAdd(Op1, N1);
1551 return BinaryOperator::CreateAnd(Op0, Add);
1552 }
1553
1554 // 1 urem X -> zext(X != 1)
1555 if (match(Op0, m_One())) {
1556 Value *Cmp = Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1));
1557 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1558 }
1559
1560 // Op0 urem C -> Op0 < C ? Op0 : Op0 - C, where C >= signbit.
1561 // Op0 must be frozen because we are increasing its number of uses.
1562 if (match(Op1, m_Negative())) {
1563 Value *F0 = Builder.CreateFreeze(Op0, Op0->getName() + ".fr");
1564 Value *Cmp = Builder.CreateICmpULT(F0, Op1);
1565 Value *Sub = Builder.CreateSub(F0, Op1);
1566 return SelectInst::Create(Cmp, F0, Sub);
1567 }
1568
1569 // If the divisor is a sext of a boolean, then the divisor must be max
1570 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
1571 // max unsigned value. In that case, the remainder is 0:
1572 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
1573 Value *X;
1574 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1575 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1576 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0);
1577 }
1578
1579 return nullptr;
1580}
1581
1582Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
1583 if (Value *V = simplifySRemInst(I.getOperand(0), I.getOperand(1),
1584 SQ.getWithInstruction(&I)))
1585 return replaceInstUsesWith(I, V);
1586
1587 if (Instruction *X = foldVectorBinop(I))
1588 return X;
1589
1590 // Handle the integer rem common cases
1591 if (Instruction *Common = commonIRemTransforms(I))
1592 return Common;
1593
1594 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1595 {
1596 const APInt *Y;
1597 // X % -Y -> X % Y
1598 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue())
1599 return replaceOperand(I, 1, ConstantInt::get(I.getType(), -*Y));
1600 }
1601
1602 // -X srem Y --> -(X srem Y)
1603 Value *X, *Y;
1604 if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1605 return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y));
1606
1607 // If the sign bits of both operands are zero (i.e. we can prove they are
1608 // unsigned inputs), turn this into a urem.
1609 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1610 if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
1611 MaskedValueIsZero(Op0, Mask, 0, &I)) {
1612 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
1613 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
1614 }
1615
1616 // If it's a constant vector, flip any negative values positive.
1617 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) {
1618 Constant *C = cast<Constant>(Op1);
1619 unsigned VWidth = cast<FixedVectorType>(C->getType())->getNumElements();
1620
1621 bool hasNegative = false;
1622 bool hasMissing = false;
1623 for (unsigned i = 0; i != VWidth; ++i) {
1624 Constant *Elt = C->getAggregateElement(i);
1625 if (!Elt) {
1626 hasMissing = true;
1627 break;
1628 }
1629
1630 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt))
1631 if (RHS->isNegative())
1632 hasNegative = true;
1633 }
1634
1635 if (hasNegative && !hasMissing) {
1636 SmallVector<Constant *, 16> Elts(VWidth);
1637 for (unsigned i = 0; i != VWidth; ++i) {
1638 Elts[i] = C->getAggregateElement(i); // Handle undef, etc.
1639 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) {
1640 if (RHS->isNegative())
1641 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
1642 }
1643 }
1644
1645 Constant *NewRHSV = ConstantVector::get(Elts);
1646 if (NewRHSV != C) // Don't loop on -MININT
1647 return replaceOperand(I, 1, NewRHSV);
1648 }
1649 }
1650
1651 return nullptr;
1652}
1653
1654Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) {
1655 if (Value *V = simplifyFRemInst(I.getOperand(0), I.getOperand(1),
1656 I.getFastMathFlags(),
1657 SQ.getWithInstruction(&I)))
1658 return replaceInstUsesWith(I, V);
1659
1660 if (Instruction *X = foldVectorBinop(I))
1661 return X;
1662
1663 if (Instruction *Phi = foldBinopWithPhiOperands(I))
1664 return Phi;
1665
1666 return nullptr;
1667}