Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
Warning:line 696, column 24
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name InstCombineMulDivRem.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/InstCombine -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Transforms/InstCombine -I include -I /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-04-20-140412-16051-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
1//===- InstCombineMulDivRem.cpp -------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
10// srem, urem, frem.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombineInternal.h"
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/Analysis/InstructionSimplify.h"
18#include "llvm/IR/BasicBlock.h"
19#include "llvm/IR/Constant.h"
20#include "llvm/IR/Constants.h"
21#include "llvm/IR/InstrTypes.h"
22#include "llvm/IR/Instruction.h"
23#include "llvm/IR/Instructions.h"
24#include "llvm/IR/IntrinsicInst.h"
25#include "llvm/IR/Intrinsics.h"
26#include "llvm/IR/Operator.h"
27#include "llvm/IR/PatternMatch.h"
28#include "llvm/IR/Type.h"
29#include "llvm/IR/Value.h"
30#include "llvm/Support/Casting.h"
31#include "llvm/Support/ErrorHandling.h"
32#include "llvm/Transforms/InstCombine/InstCombiner.h"
33#include "llvm/Transforms/Utils/BuildLibCalls.h"
34#include <cassert>
35
36#define DEBUG_TYPE"instcombine" "instcombine"
37#include "llvm/Transforms/Utils/InstructionWorklist.h"
38
39using namespace llvm;
40using namespace PatternMatch;
41
42/// The specific integer value is used in a context where it is known to be
43/// non-zero. If this allows us to simplify the computation, do so and return
44/// the new operand, otherwise return null.
45static Value *simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC,
46 Instruction &CxtI) {
47 // If V has multiple uses, then we would have to do more analysis to determine
48 // if this is safe. For example, the use could be in dynamically unreached
49 // code.
50 if (!V->hasOneUse()) return nullptr;
51
52 bool MadeChange = false;
53
54 // ((1 << A) >>u B) --> (1 << (A-B))
55 // Because V cannot be zero, we know that B is less than A.
56 Value *A = nullptr, *B = nullptr, *One = nullptr;
57 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
58 match(One, m_One())) {
59 A = IC.Builder.CreateSub(A, B);
60 return IC.Builder.CreateShl(One, A);
61 }
62
63 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
64 // inexact. Similarly for <<.
65 BinaryOperator *I = dyn_cast<BinaryOperator>(V);
66 if (I && I->isLogicalShift() &&
67 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) {
68 // We know that this is an exact/nuw shift and that the input is a
69 // non-zero context as well.
70 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
71 IC.replaceOperand(*I, 0, V2);
72 MadeChange = true;
73 }
74
75 if (I->getOpcode() == Instruction::LShr && !I->isExact()) {
76 I->setIsExact();
77 MadeChange = true;
78 }
79
80 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) {
81 I->setHasNoUnsignedWrap();
82 MadeChange = true;
83 }
84 }
85
86 // TODO: Lots more we could do here:
87 // If V is a phi node, we can call this on each of its operands.
88 // "select cond, X, 0" can simplify to "X".
89
90 return MadeChange ? V : nullptr;
91}
92
93// TODO: This is a specific form of a much more general pattern.
94// We could detect a select with any binop identity constant, or we
95// could use SimplifyBinOp to see if either arm of the select reduces.
96// But that needs to be done carefully and/or while removing potential
97// reverse canonicalizations as in InstCombiner::foldSelectIntoOp().
98static Value *foldMulSelectToNegate(BinaryOperator &I,
99 InstCombiner::BuilderTy &Builder) {
100 Value *Cond, *OtherOp;
101
102 // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp
103 // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp
104 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_One(), m_AllOnes())),
105 m_Value(OtherOp)))) {
106 bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
107 Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
108 return Builder.CreateSelect(Cond, OtherOp, Neg);
109 }
110 // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp
111 // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp
112 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_AllOnes(), m_One())),
113 m_Value(OtherOp)))) {
114 bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
115 Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
116 return Builder.CreateSelect(Cond, Neg, OtherOp);
117 }
118
119 // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp
120 // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp
121 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(1.0),
122 m_SpecificFP(-1.0))),
123 m_Value(OtherOp)))) {
124 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
125 Builder.setFastMathFlags(I.getFastMathFlags());
126 return Builder.CreateSelect(Cond, OtherOp, Builder.CreateFNeg(OtherOp));
127 }
128
129 // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp
130 // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp
131 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(-1.0),
132 m_SpecificFP(1.0))),
133 m_Value(OtherOp)))) {
134 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
135 Builder.setFastMathFlags(I.getFastMathFlags());
136 return Builder.CreateSelect(Cond, Builder.CreateFNeg(OtherOp), OtherOp);
137 }
138
139 return nullptr;
140}
141
142Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
143 if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1),
144 SQ.getWithInstruction(&I)))
145 return replaceInstUsesWith(I, V);
146
147 if (SimplifyAssociativeOrCommutative(I))
148 return &I;
149
150 if (Instruction *X = foldVectorBinop(I))
151 return X;
152
153 if (Instruction *Phi = foldBinopWithPhiOperands(I))
154 return Phi;
155
156 if (Value *V = SimplifyUsingDistributiveLaws(I))
157 return replaceInstUsesWith(I, V);
158
159 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
160 unsigned BitWidth = I.getType()->getScalarSizeInBits();
161
162 // X * -1 == 0 - X
163 if (match(Op1, m_AllOnes())) {
164 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName());
165 if (I.hasNoSignedWrap())
166 BO->setHasNoSignedWrap();
167 return BO;
168 }
169
170 // Also allow combining multiply instructions on vectors.
171 {
172 Value *NewOp;
173 Constant *C1, *C2;
174 const APInt *IVal;
175 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)),
176 m_Constant(C1))) &&
177 match(C1, m_APInt(IVal))) {
178 // ((X << C2)*C1) == (X * (C1 << C2))
179 Constant *Shl = ConstantExpr::getShl(C1, C2);
180 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0));
181 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl);
182 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap())
183 BO->setHasNoUnsignedWrap();
184 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() &&
185 Shl->isNotMinSignedValue())
186 BO->setHasNoSignedWrap();
187 return BO;
188 }
189
190 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
191 // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
192 if (Constant *NewCst = ConstantExpr::getExactLogBase2(C1)) {
193 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst);
194
195 if (I.hasNoUnsignedWrap())
196 Shl->setHasNoUnsignedWrap();
197 if (I.hasNoSignedWrap()) {
198 const APInt *V;
199 if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1)
200 Shl->setHasNoSignedWrap();
201 }
202
203 return Shl;
204 }
205 }
206 }
207
208 if (Op0->hasOneUse() && match(Op1, m_NegatedPower2())) {
209 // Interpret X * (-1<<C) as (-X) * (1<<C) and try to sink the negation.
210 // The "* (1<<C)" thus becomes a potential shifting opportunity.
211 if (Value *NegOp0 = Negator::Negate(/*IsNegation*/ true, Op0, *this))
212 return BinaryOperator::CreateMul(
213 NegOp0, ConstantExpr::getNeg(cast<Constant>(Op1)), I.getName());
214 }
215
216 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
217 return FoldedMul;
218
219 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
220 return replaceInstUsesWith(I, FoldedMul);
221
222 // Simplify mul instructions with a constant RHS.
223 if (isa<Constant>(Op1)) {
224 // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
225 Value *X;
226 Constant *C1;
227 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
228 Value *Mul = Builder.CreateMul(C1, Op1);
229 // Only go forward with the transform if C1*CI simplifies to a tidier
230 // constant.
231 if (!match(Mul, m_Mul(m_Value(), m_Value())))
232 return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul);
233 }
234 }
235
236 // abs(X) * abs(X) -> X * X
237 // nabs(X) * nabs(X) -> X * X
238 if (Op0 == Op1) {
239 Value *X, *Y;
240 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
241 if (SPF == SPF_ABS || SPF == SPF_NABS)
242 return BinaryOperator::CreateMul(X, X);
243
244 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
245 return BinaryOperator::CreateMul(X, X);
246 }
247
248 // -X * C --> X * -C
249 Value *X, *Y;
250 Constant *Op1C;
251 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C)))
252 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C));
253
254 // -X * -Y --> X * Y
255 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) {
256 auto *NewMul = BinaryOperator::CreateMul(X, Y);
257 if (I.hasNoSignedWrap() &&
258 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() &&
259 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap())
260 NewMul->setHasNoSignedWrap();
261 return NewMul;
262 }
263
264 // -X * Y --> -(X * Y)
265 // X * -Y --> -(X * Y)
266 if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))
267 return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y));
268
269 // (X / Y) * Y = X - (X % Y)
270 // (X / Y) * -Y = (X % Y) - X
271 {
272 Value *Y = Op1;
273 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0);
274 if (!Div || (Div->getOpcode() != Instruction::UDiv &&
275 Div->getOpcode() != Instruction::SDiv)) {
276 Y = Op0;
277 Div = dyn_cast<BinaryOperator>(Op1);
278 }
279 Value *Neg = dyn_castNegVal(Y);
280 if (Div && Div->hasOneUse() &&
281 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) &&
282 (Div->getOpcode() == Instruction::UDiv ||
283 Div->getOpcode() == Instruction::SDiv)) {
284 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1);
285
286 // If the division is exact, X % Y is zero, so we end up with X or -X.
287 if (Div->isExact()) {
288 if (DivOp1 == Y)
289 return replaceInstUsesWith(I, X);
290 return BinaryOperator::CreateNeg(X);
291 }
292
293 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
294 : Instruction::SRem;
295 Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1);
296 if (DivOp1 == Y)
297 return BinaryOperator::CreateSub(X, Rem);
298 return BinaryOperator::CreateSub(Rem, X);
299 }
300 }
301
302 /// i1 mul -> i1 and.
303 if (I.getType()->isIntOrIntVectorTy(1))
304 return BinaryOperator::CreateAnd(Op0, Op1);
305
306 // X*(1 << Y) --> X << Y
307 // (1 << Y)*X --> X << Y
308 {
309 Value *Y;
310 BinaryOperator *BO = nullptr;
311 bool ShlNSW = false;
312 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) {
313 BO = BinaryOperator::CreateShl(Op1, Y);
314 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap();
315 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) {
316 BO = BinaryOperator::CreateShl(Op0, Y);
317 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap();
318 }
319 if (BO) {
320 if (I.hasNoUnsignedWrap())
321 BO->setHasNoUnsignedWrap();
322 if (I.hasNoSignedWrap() && ShlNSW)
323 BO->setHasNoSignedWrap();
324 return BO;
325 }
326 }
327
328 // (zext bool X) * (zext bool Y) --> zext (and X, Y)
329 // (sext bool X) * (sext bool Y) --> zext (and X, Y)
330 // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same)
331 if (((match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
332 (match(Op0, m_SExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
333 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
334 (Op0->hasOneUse() || Op1->hasOneUse() || X == Y)) {
335 Value *And = Builder.CreateAnd(X, Y, "mulbool");
336 return CastInst::Create(Instruction::ZExt, And, I.getType());
337 }
338 // (sext bool X) * (zext bool Y) --> sext (and X, Y)
339 // (zext bool X) * (sext bool Y) --> sext (and X, Y)
340 // Note: -1 * 1 == 1 * -1 == -1
341 if (((match(Op0, m_SExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
342 (match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
343 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
344 (Op0->hasOneUse() || Op1->hasOneUse())) {
345 Value *And = Builder.CreateAnd(X, Y, "mulbool");
346 return CastInst::Create(Instruction::SExt, And, I.getType());
347 }
348
349 // (zext bool X) * Y --> X ? Y : 0
350 // Y * (zext bool X) --> X ? Y : 0
351 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
352 return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0));
353 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
354 return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0));
355
356 // (sext bool X) * C --> X ? -C : 0
357 Constant *ImmC;
358 if (match(Op0, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1) &&
359 match(Op1, m_ImmConstant(ImmC))) {
360 Constant *NegC = ConstantExpr::getNeg(ImmC);
361 return SelectInst::Create(X, NegC, ConstantInt::getNullValue(I.getType()));
362 }
363
364 // (lshr X, 31) * Y --> (ashr X, 31) & Y
365 // Y * (lshr X, 31) --> (ashr X, 31) & Y
366 // TODO: We are not checking one-use because the elimination of the multiply
367 // is better for analysis?
368 // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be
369 // more similar to what we're doing above.
370 const APInt *C;
371 if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
372 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1);
373 if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
374 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0);
375
376 // ((ashr X, 31) | 1) * X --> abs(X)
377 // X * ((ashr X, 31) | 1) --> abs(X)
378 if (match(&I, m_c_BinOp(m_Or(m_AShr(m_Value(X),
379 m_SpecificIntAllowUndef(BitWidth - 1)),
380 m_One()),
381 m_Deferred(X)))) {
382 Value *Abs = Builder.CreateBinaryIntrinsic(
383 Intrinsic::abs, X,
384 ConstantInt::getBool(I.getContext(), I.hasNoSignedWrap()));
385 Abs->takeName(&I);
386 return replaceInstUsesWith(I, Abs);
387 }
388
389 if (Instruction *Ext = narrowMathIfNoOverflow(I))
390 return Ext;
391
392 bool Changed = false;
393 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) {
394 Changed = true;
395 I.setHasNoSignedWrap(true);
396 }
397
398 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) {
399 Changed = true;
400 I.setHasNoUnsignedWrap(true);
401 }
402
403 return Changed ? &I : nullptr;
404}
405
406Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
407 BinaryOperator::BinaryOps Opcode = I.getOpcode();
408 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&(static_cast <bool> ((Opcode == Instruction::FMul || Opcode
== Instruction::FDiv) && "Expected fmul or fdiv") ? void
(0) : __assert_fail ("(Opcode == Instruction::FMul || Opcode == Instruction::FDiv) && \"Expected fmul or fdiv\""
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
409, __extension__ __PRETTY_FUNCTION__))
409 "Expected fmul or fdiv")(static_cast <bool> ((Opcode == Instruction::FMul || Opcode
== Instruction::FDiv) && "Expected fmul or fdiv") ? void
(0) : __assert_fail ("(Opcode == Instruction::FMul || Opcode == Instruction::FDiv) && \"Expected fmul or fdiv\""
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
409, __extension__ __PRETTY_FUNCTION__))
;
410
411 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
412 Value *X, *Y;
413
414 // -X * -Y --> X * Y
415 // -X / -Y --> X / Y
416 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
417 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, Y, &I);
418
419 // fabs(X) * fabs(X) -> X * X
420 // fabs(X) / fabs(X) -> X / X
421 if (Op0 == Op1 && match(Op0, m_FAbs(m_Value(X))))
422 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, X, &I);
423
424 // fabs(X) * fabs(Y) --> fabs(X * Y)
425 // fabs(X) / fabs(Y) --> fabs(X / Y)
426 if (match(Op0, m_FAbs(m_Value(X))) && match(Op1, m_FAbs(m_Value(Y))) &&
427 (Op0->hasOneUse() || Op1->hasOneUse())) {
428 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
429 Builder.setFastMathFlags(I.getFastMathFlags());
430 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
431 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY);
432 Fabs->takeName(&I);
433 return replaceInstUsesWith(I, Fabs);
434 }
435
436 return nullptr;
437}
438
439Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
440 if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1),
441 I.getFastMathFlags(),
442 SQ.getWithInstruction(&I)))
443 return replaceInstUsesWith(I, V);
444
445 if (SimplifyAssociativeOrCommutative(I))
446 return &I;
447
448 if (Instruction *X = foldVectorBinop(I))
449 return X;
450
451 if (Instruction *Phi = foldBinopWithPhiOperands(I))
452 return Phi;
453
454 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
455 return FoldedMul;
456
457 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
458 return replaceInstUsesWith(I, FoldedMul);
459
460 if (Instruction *R = foldFPSignBitOps(I))
461 return R;
462
463 // X * -1.0 --> -X
464 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
465 if (match(Op1, m_SpecificFP(-1.0)))
466 return UnaryOperator::CreateFNegFMF(Op0, &I);
467
468 // -X * C --> X * -C
469 Value *X, *Y;
470 Constant *C;
471 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C)))
472 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I);
473
474 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
475 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
476 return replaceInstUsesWith(I, V);
477
478 if (I.hasAllowReassoc()) {
479 // Reassociate constant RHS with another constant to form constant
480 // expression.
481 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) {
482 Constant *C1;
483 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) {
484 // (C1 / X) * C --> (C * C1) / X
485 Constant *CC1 = ConstantExpr::getFMul(C, C1);
486 if (CC1->isNormalFP())
487 return BinaryOperator::CreateFDivFMF(CC1, X, &I);
488 }
489 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
490 // (X / C1) * C --> X * (C / C1)
491 Constant *CDivC1 = ConstantExpr::getFDiv(C, C1);
492 if (CDivC1->isNormalFP())
493 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I);
494
495 // If the constant was a denormal, try reassociating differently.
496 // (X / C1) * C --> X / (C1 / C)
497 Constant *C1DivC = ConstantExpr::getFDiv(C1, C);
498 if (Op0->hasOneUse() && C1DivC->isNormalFP())
499 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I);
500 }
501
502 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
503 // canonicalized to 'fadd X, C'. Distributing the multiply may allow
504 // further folds and (X * C) + C2 is 'fma'.
505 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) {
506 // (X + C1) * C --> (X * C) + (C * C1)
507 Constant *CC1 = ConstantExpr::getFMul(C, C1);
508 Value *XC = Builder.CreateFMulFMF(X, C, &I);
509 return BinaryOperator::CreateFAddFMF(XC, CC1, &I);
510 }
511 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) {
512 // (C1 - X) * C --> (C * C1) - (X * C)
513 Constant *CC1 = ConstantExpr::getFMul(C, C1);
514 Value *XC = Builder.CreateFMulFMF(X, C, &I);
515 return BinaryOperator::CreateFSubFMF(CC1, XC, &I);
516 }
517 }
518
519 Value *Z;
520 if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))),
521 m_Value(Z)))) {
522 // Sink division: (X / Y) * Z --> (X * Z) / Y
523 Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I);
524 return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I);
525 }
526
527 // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
528 // nnan disallows the possibility of returning a number if both operands are
529 // negative (in that case, we should return NaN).
530 if (I.hasNoNaNs() &&
531 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) &&
532 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) {
533 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
534 Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I);
535 return replaceInstUsesWith(I, Sqrt);
536 }
537
538 // The following transforms are done irrespective of the number of uses
539 // for the expression "1.0/sqrt(X)".
540 // 1) 1.0/sqrt(X) * X -> X/sqrt(X)
541 // 2) X * 1.0/sqrt(X) -> X/sqrt(X)
542 // We always expect the backend to reduce X/sqrt(X) to sqrt(X), if it
543 // has the necessary (reassoc) fast-math-flags.
544 if (I.hasNoSignedZeros() &&
545 match(Op0, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
546 match(Y, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && Op1 == X)
547 return BinaryOperator::CreateFDivFMF(X, Y, &I);
548 if (I.hasNoSignedZeros() &&
549 match(Op1, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
550 match(Y, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && Op0 == X)
551 return BinaryOperator::CreateFDivFMF(X, Y, &I);
552
553 // Like the similar transform in instsimplify, this requires 'nsz' because
554 // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0.
555 if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 &&
556 Op0->hasNUses(2)) {
557 // Peek through fdiv to find squaring of square root:
558 // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y
559 if (match(Op0, m_FDiv(m_Value(X),
560 m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) {
561 Value *XX = Builder.CreateFMulFMF(X, X, &I);
562 return BinaryOperator::CreateFDivFMF(XX, Y, &I);
563 }
564 // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X)
565 if (match(Op0, m_FDiv(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y)),
566 m_Value(X)))) {
567 Value *XX = Builder.CreateFMulFMF(X, X, &I);
568 return BinaryOperator::CreateFDivFMF(Y, XX, &I);
569 }
570 }
571
572 if (I.isOnlyUserOfAnyOperand()) {
573 // pow(x, y) * pow(x, z) -> pow(x, y + z)
574 if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
575 match(Op1, m_Intrinsic<Intrinsic::pow>(m_Specific(X), m_Value(Z)))) {
576 auto *YZ = Builder.CreateFAddFMF(Y, Z, &I);
577 auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, YZ, &I);
578 return replaceInstUsesWith(I, NewPow);
579 }
580
581 // powi(x, y) * powi(x, z) -> powi(x, y + z)
582 if (match(Op0, m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y))) &&
583 match(Op1, m_Intrinsic<Intrinsic::powi>(m_Specific(X), m_Value(Z))) &&
584 Y->getType() == Z->getType()) {
585 auto *YZ = Builder.CreateAdd(Y, Z);
586 auto *NewPow = Builder.CreateIntrinsic(
587 Intrinsic::powi, {X->getType(), YZ->getType()}, {X, YZ}, &I);
588 return replaceInstUsesWith(I, NewPow);
589 }
590
591 // exp(X) * exp(Y) -> exp(X + Y)
592 if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
593 match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y)))) {
594 Value *XY = Builder.CreateFAddFMF(X, Y, &I);
595 Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I);
596 return replaceInstUsesWith(I, Exp);
597 }
598
599 // exp2(X) * exp2(Y) -> exp2(X + Y)
600 if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
601 match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y)))) {
602 Value *XY = Builder.CreateFAddFMF(X, Y, &I);
603 Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I);
604 return replaceInstUsesWith(I, Exp2);
605 }
606 }
607
608 // (X*Y) * X => (X*X) * Y where Y != X
609 // The purpose is two-fold:
610 // 1) to form a power expression (of X).
611 // 2) potentially shorten the critical path: After transformation, the
612 // latency of the instruction Y is amortized by the expression of X*X,
613 // and therefore Y is in a "less critical" position compared to what it
614 // was before the transformation.
615 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) &&
616 Op1 != Y) {
617 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I);
618 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
619 }
620 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) &&
621 Op0 != Y) {
622 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I);
623 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
624 }
625 }
626
627 // log2(X * 0.5) * Y = log2(X) * Y - Y
628 if (I.isFast()) {
629 IntrinsicInst *Log2 = nullptr;
630 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>(
631 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
632 Log2 = cast<IntrinsicInst>(Op0);
633 Y = Op1;
634 }
635 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>(
636 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
637 Log2 = cast<IntrinsicInst>(Op1);
638 Y = Op0;
639 }
640 if (Log2) {
641 Value *Log2 = Builder.CreateUnaryIntrinsic(Intrinsic::log2, X, &I);
642 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I);
643 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I);
644 }
645 }
646
647 return nullptr;
648}
649
650/// Fold a divide or remainder with a select instruction divisor when one of the
651/// select operands is zero. In that case, we can use the other select operand
652/// because div/rem by zero is undefined.
653bool InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) {
654 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1));
655 if (!SI)
11
Assuming 'SI' is non-null
12
Taking false branch
656 return false;
657
658 int NonNullOperand;
659 if (match(SI->getTrueValue(), m_Zero()))
13
Assuming the condition is true
14
Taking true branch
660 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
661 NonNullOperand = 2;
662 else if (match(SI->getFalseValue(), m_Zero()))
663 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
664 NonNullOperand = 1;
665 else
666 return false;
667
668 // Change the div/rem to use 'Y' instead of the select.
669 replaceOperand(I, 1, SI->getOperand(NonNullOperand));
670
671 // Okay, we know we replace the operand of the div/rem with 'Y' with no
672 // problem. However, the select, or the condition of the select may have
673 // multiple uses. Based on our knowledge that the operand must be non-zero,
674 // propagate the known value for the select into other uses of it, and
675 // propagate a known value of the condition into its other users.
676
677 // If the select and condition only have a single use, don't bother with this,
678 // early exit.
679 Value *SelectCond = SI->getCondition();
680 if (SI->use_empty() && SelectCond->hasOneUse())
681 return true;
682
683 // Scan the current block backward, looking for other uses of SI.
684 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin();
685 Type *CondTy = SelectCond->getType();
686 while (BBI != BBFront) {
15
Loop condition is true. Entering loop body
23
Loop condition is true. Entering loop body
687 --BBI;
688 // If we found an instruction that we can't assume will return, so
689 // information from below it cannot be propagated above it.
690 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI))
16
Assuming the condition is false
17
Taking false branch
24
Assuming the condition is false
25
Taking false branch
691 break;
692
693 // Replace uses of the select or its condition with the known values.
694 for (Use &Op : BBI->operands()) {
18
Assuming '__begin2' is equal to '__end2'
26
Assuming '__begin2' is not equal to '__end2'
695 if (Op == SI) {
27
Assuming the condition is true
28
Taking true branch
696 replaceUse(Op, SI->getOperand(NonNullOperand));
29
Called C++ object pointer is null
697 Worklist.push(&*BBI);
698 } else if (Op == SelectCond) {
699 replaceUse(Op, NonNullOperand == 1 ? ConstantInt::getTrue(CondTy)
700 : ConstantInt::getFalse(CondTy));
701 Worklist.push(&*BBI);
702 }
703 }
704
705 // If we past the instruction, quit looking for it.
706 if (&*BBI == SI)
19
Assuming the condition is true
20
Taking true branch
707 SI = nullptr;
21
Null pointer value stored to 'SI'
708 if (&*BBI == SelectCond)
22
Assuming the condition is false
709 SelectCond = nullptr;
710
711 // If we ran out of things to eliminate, break out of the loop.
712 if (!SelectCond
22.1
'SelectCond' is non-null
&& !SI)
713 break;
714
715 }
716 return true;
717}
718
719/// True if the multiply can not be expressed in an int this size.
720static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
721 bool IsSigned) {
722 bool Overflow;
723 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow);
724 return Overflow;
725}
726
727/// True if C1 is a multiple of C2. Quotient contains C1/C2.
728static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
729 bool IsSigned) {
730 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal")(static_cast <bool> (C1.getBitWidth() == C2.getBitWidth
() && "Constant widths not equal") ? void (0) : __assert_fail
("C1.getBitWidth() == C2.getBitWidth() && \"Constant widths not equal\""
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
730, __extension__ __PRETTY_FUNCTION__))
;
731
732 // Bail if we will divide by zero.
733 if (C2.isZero())
734 return false;
735
736 // Bail if we would divide INT_MIN by -1.
737 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnes())
738 return false;
739
740 APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned);
741 if (IsSigned)
742 APInt::sdivrem(C1, C2, Quotient, Remainder);
743 else
744 APInt::udivrem(C1, C2, Quotient, Remainder);
745
746 return Remainder.isMinValue();
747}
748
749/// This function implements the transforms common to both integer division
750/// instructions (udiv and sdiv). It is called by the visitors to those integer
751/// division instructions.
752/// Common integer divide transforms
753Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
754 if (Instruction *Phi = foldBinopWithPhiOperands(I))
755 return Phi;
756
757 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
758 bool IsSigned = I.getOpcode() == Instruction::SDiv;
759 Type *Ty = I.getType();
760
761 // The RHS is known non-zero.
762 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
763 return replaceOperand(I, 1, V);
764
765 // Handle cases involving: [su]div X, (select Cond, Y, Z)
766 // This does not apply for fdiv.
767 if (simplifyDivRemOfSelectWithZeroOp(I))
768 return &I;
769
770 // If the divisor is a select-of-constants, try to constant fold all div ops:
771 // C / (select Cond, TrueC, FalseC) --> select Cond, (C / TrueC), (C / FalseC)
772 // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
773 if (match(Op0, m_ImmConstant()) &&
774 match(Op1, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
775 if (Instruction *R = FoldOpIntoSelect(I, cast<SelectInst>(Op1),
776 /*FoldWithMultiUse*/ true))
777 return R;
778 }
779
780 const APInt *C2;
781 if (match(Op1, m_APInt(C2))) {
782 Value *X;
783 const APInt *C1;
784
785 // (X / C1) / C2 -> X / (C1*C2)
786 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) ||
787 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) {
788 APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
789 if (!multiplyOverflows(*C1, *C2, Product, IsSigned))
790 return BinaryOperator::Create(I.getOpcode(), X,
791 ConstantInt::get(Ty, Product));
792 }
793
794 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) ||
795 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) {
796 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
797
798 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
799 if (isMultiple(*C2, *C1, Quotient, IsSigned)) {
800 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X,
801 ConstantInt::get(Ty, Quotient));
802 NewDiv->setIsExact(I.isExact());
803 return NewDiv;
804 }
805
806 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
807 if (isMultiple(*C1, *C2, Quotient, IsSigned)) {
808 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
809 ConstantInt::get(Ty, Quotient));
810 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
811 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
812 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
813 return Mul;
814 }
815 }
816
817 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
818 C1->ult(C1->getBitWidth() - 1)) ||
819 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))) &&
820 C1->ult(C1->getBitWidth()))) {
821 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
822 APInt C1Shifted = APInt::getOneBitSet(
823 C1->getBitWidth(), static_cast<unsigned>(C1->getZExtValue()));
824
825 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
826 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
827 auto *BO = BinaryOperator::Create(I.getOpcode(), X,
828 ConstantInt::get(Ty, Quotient));
829 BO->setIsExact(I.isExact());
830 return BO;
831 }
832
833 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
834 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
835 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
836 ConstantInt::get(Ty, Quotient));
837 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
838 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
839 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
840 return Mul;
841 }
842 }
843
844 if (!C2->isZero()) // avoid X udiv 0
845 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I))
846 return FoldedDiv;
847 }
848
849 if (match(Op0, m_One())) {
850 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?")(static_cast <bool> (!Ty->isIntOrIntVectorTy(1) &&
"i1 divide not removed?") ? void (0) : __assert_fail ("!Ty->isIntOrIntVectorTy(1) && \"i1 divide not removed?\""
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
850, __extension__ __PRETTY_FUNCTION__))
;
851 if (IsSigned) {
852 // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
853 // result is one, if Op1 is -1 then the result is minus one, otherwise
854 // it's zero.
855 Value *Inc = Builder.CreateAdd(Op1, Op0);
856 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
857 return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0));
858 } else {
859 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
860 // result is one, otherwise it's zero.
861 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty);
862 }
863 }
864
865 // See if we can fold away this div instruction.
866 if (SimplifyDemandedInstructionBits(I))
867 return &I;
868
869 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
870 Value *X, *Z;
871 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1
872 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
873 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1)))))
874 return BinaryOperator::Create(I.getOpcode(), X, Op1);
875
876 // (X << Y) / X -> 1 << Y
877 Value *Y;
878 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y))))
879 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y);
880 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y))))
881 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y);
882
883 // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
884 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) {
885 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap();
886 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap();
887 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
888 replaceOperand(I, 0, ConstantInt::get(Ty, 1));
889 replaceOperand(I, 1, Y);
890 return &I;
891 }
892 }
893
894 return nullptr;
895}
896
897static const unsigned MaxDepth = 6;
898
899// Take the exact integer log2 of the value. If DoFold is true, create the
900// actual instructions, otherwise return a non-null dummy value. Return nullptr
901// on failure.
902static Value *takeLog2(IRBuilderBase &Builder, Value *Op, unsigned Depth,
903 bool DoFold) {
904 auto IfFold = [DoFold](function_ref<Value *()> Fn) {
905 if (!DoFold)
906 return reinterpret_cast<Value *>(-1);
907 return Fn();
908 };
909
910 // FIXME: assert that Op1 isn't/doesn't contain undef.
911
912 // log2(2^C) -> C
913 if (match(Op, m_Power2()))
914 return IfFold([&]() {
915 Constant *C = ConstantExpr::getExactLogBase2(cast<Constant>(Op));
916 if (!C)
917 llvm_unreachable("Failed to constant fold udiv -> logbase2")::llvm::llvm_unreachable_internal("Failed to constant fold udiv -> logbase2"
, "llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp",
917)
;
918 return C;
919 });
920
921 // The remaining tests are all recursive, so bail out if we hit the limit.
922 if (Depth++ == MaxDepth)
923 return nullptr;
924
925 // log2(zext X) -> zext log2(X)
926 // FIXME: Require one use?
927 Value *X, *Y;
928 if (match(Op, m_ZExt(m_Value(X))))
929 if (Value *LogX = takeLog2(Builder, X, Depth, DoFold))
930 return IfFold([&]() { return Builder.CreateZExt(LogX, Op->getType()); });
931
932 // log2(X << Y) -> log2(X) + Y
933 // FIXME: Require one use unless X is 1?
934 if (match(Op, m_Shl(m_Value(X), m_Value(Y))))
935 if (Value *LogX = takeLog2(Builder, X, Depth, DoFold))
936 return IfFold([&]() { return Builder.CreateAdd(LogX, Y); });
937
938 // log2(Cond ? X : Y) -> Cond ? log2(X) : log2(Y)
939 // FIXME: missed optimization: if one of the hands of select is/contains
940 // undef, just directly pick the other one.
941 // FIXME: can both hands contain undef?
942 // FIXME: Require one use?
943 if (SelectInst *SI = dyn_cast<SelectInst>(Op))
944 if (Value *LogX = takeLog2(Builder, SI->getOperand(1), Depth, DoFold))
945 if (Value *LogY = takeLog2(Builder, SI->getOperand(2), Depth, DoFold))
946 return IfFold([&]() {
947 return Builder.CreateSelect(SI->getOperand(0), LogX, LogY);
948 });
949
950 // log2(umin(X, Y)) -> umin(log2(X), log2(Y))
951 // log2(umax(X, Y)) -> umax(log2(X), log2(Y))
952 auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op);
953 if (MinMax && MinMax->hasOneUse() && !MinMax->isSigned())
954 if (Value *LogX = takeLog2(Builder, MinMax->getLHS(), Depth, DoFold))
955 if (Value *LogY = takeLog2(Builder, MinMax->getRHS(), Depth, DoFold))
956 return IfFold([&]() {
957 return Builder.CreateBinaryIntrinsic(
958 MinMax->getIntrinsicID(), LogX, LogY);
959 });
960
961 return nullptr;
962}
963
964/// If we have zero-extended operands of an unsigned div or rem, we may be able
965/// to narrow the operation (sink the zext below the math).
966static Instruction *narrowUDivURem(BinaryOperator &I,
967 InstCombiner::BuilderTy &Builder) {
968 Instruction::BinaryOps Opcode = I.getOpcode();
969 Value *N = I.getOperand(0);
970 Value *D = I.getOperand(1);
971 Type *Ty = I.getType();
972 Value *X, *Y;
973 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) &&
974 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) {
975 // udiv (zext X), (zext Y) --> zext (udiv X, Y)
976 // urem (zext X), (zext Y) --> zext (urem X, Y)
977 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y);
978 return new ZExtInst(NarrowOp, Ty);
979 }
980
981 Constant *C;
982 if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) ||
983 (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) {
984 // If the constant is the same in the smaller type, use the narrow version.
985 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
986 if (ConstantExpr::getZExt(TruncC, Ty) != C)
987 return nullptr;
988
989 // udiv (zext X), C --> zext (udiv X, C')
990 // urem (zext X), C --> zext (urem X, C')
991 // udiv C, (zext X) --> zext (udiv C', X)
992 // urem C, (zext X) --> zext (urem C', X)
993 Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC)
994 : Builder.CreateBinOp(Opcode, TruncC, X);
995 return new ZExtInst(NarrowOp, Ty);
996 }
997
998 return nullptr;
999}
1000
1001Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
1002 if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1),
1003 SQ.getWithInstruction(&I)))
1004 return replaceInstUsesWith(I, V);
1005
1006 if (Instruction *X = foldVectorBinop(I))
1007 return X;
1008
1009 // Handle the integer div common cases
1010 if (Instruction *Common = commonIDivTransforms(I))
1011 return Common;
1012
1013 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1014 Value *X;
1015 const APInt *C1, *C2;
1016 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) {
1017 // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
1018 bool Overflow;
1019 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow);
1020 if (!Overflow) {
1021 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value()));
1022 BinaryOperator *BO = BinaryOperator::CreateUDiv(
1023 X, ConstantInt::get(X->getType(), C2ShlC1));
1024 if (IsExact)
1025 BO->setIsExact();
1026 return BO;
1027 }
1028 }
1029
1030 // Op0 / C where C is large (negative) --> zext (Op0 >= C)
1031 // TODO: Could use isKnownNegative() to handle non-constant values.
1032 Type *Ty = I.getType();
1033 if (match(Op1, m_Negative())) {
1034 Value *Cmp = Builder.CreateICmpUGE(Op0, Op1);
1035 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1036 }
1037 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
1038 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1039 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1040 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1041 }
1042
1043 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder))
1044 return NarrowDiv;
1045
1046 // If the udiv operands are non-overflowing multiplies with a common operand,
1047 // then eliminate the common factor:
1048 // (A * B) / (A * X) --> B / X (and commuted variants)
1049 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching.
1050 // TODO: If -reassociation handled this generally, we could remove this.
1051 Value *A, *B;
1052 if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) {
1053 if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) ||
1054 match(Op1, m_NUWMul(m_Value(X), m_Specific(A))))
1055 return BinaryOperator::CreateUDiv(B, X);
1056 if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) ||
1057 match(Op1, m_NUWMul(m_Value(X), m_Specific(B))))
1058 return BinaryOperator::CreateUDiv(A, X);
1059 }
1060
1061 // Op1 udiv Op2 -> Op1 lshr log2(Op2), if log2() folds away.
1062 if (takeLog2(Builder, Op1, /*Depth*/0, /*DoFold*/false)) {
1063 Value *Res = takeLog2(Builder, Op1, /*Depth*/0, /*DoFold*/true);
1064 return replaceInstUsesWith(
1065 I, Builder.CreateLShr(Op0, Res, I.getName(), I.isExact()));
1066 }
1067
1068 return nullptr;
1069}
1070
1071Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
1072 if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1),
1073 SQ.getWithInstruction(&I)))
1074 return replaceInstUsesWith(I, V);
1075
1076 if (Instruction *X = foldVectorBinop(I))
1077 return X;
1078
1079 // Handle the integer div common cases
1080 if (Instruction *Common = commonIDivTransforms(I))
1081 return Common;
1082
1083 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1084 Type *Ty = I.getType();
1085 Value *X;
1086 // sdiv Op0, -1 --> -Op0
1087 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
1088 if (match(Op1, m_AllOnes()) ||
1089 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1090 return BinaryOperator::CreateNeg(Op0);
1091
1092 // X / INT_MIN --> X == INT_MIN
1093 if (match(Op1, m_SignMask()))
1094 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), Ty);
1095
1096 // sdiv exact X, 1<<C --> ashr exact X, C iff 1<<C is non-negative
1097 // sdiv exact X, -1<<C --> -(ashr exact X, C)
1098 if (I.isExact() && ((match(Op1, m_Power2()) && match(Op1, m_NonNegative())) ||
1099 match(Op1, m_NegatedPower2()))) {
1100 bool DivisorWasNegative = match(Op1, m_NegatedPower2());
1101 if (DivisorWasNegative)
1102 Op1 = ConstantExpr::getNeg(cast<Constant>(Op1));
1103 auto *AShr = BinaryOperator::CreateExactAShr(
1104 Op0, ConstantExpr::getExactLogBase2(cast<Constant>(Op1)), I.getName());
1105 if (!DivisorWasNegative)
1106 return AShr;
1107 Builder.Insert(AShr);
1108 AShr->setName(I.getName() + ".neg");
1109 return BinaryOperator::CreateNeg(AShr, I.getName());
1110 }
1111
1112 const APInt *Op1C;
1113 if (match(Op1, m_APInt(Op1C))) {
1114 // If the dividend is sign-extended and the constant divisor is small enough
1115 // to fit in the source type, shrink the division to the narrower type:
1116 // (sext X) sdiv C --> sext (X sdiv C)
1117 Value *Op0Src;
1118 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) &&
1119 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) {
1120
1121 // In the general case, we need to make sure that the dividend is not the
1122 // minimum signed value because dividing that by -1 is UB. But here, we
1123 // know that the -1 divisor case is already handled above.
1124
1125 Constant *NarrowDivisor =
1126 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
1127 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
1128 return new SExtInst(NarrowOp, Ty);
1129 }
1130
1131 // -X / C --> X / -C (if the negation doesn't overflow).
1132 // TODO: This could be enhanced to handle arbitrary vector constants by
1133 // checking if all elements are not the min-signed-val.
1134 if (!Op1C->isMinSignedValue() &&
1135 match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) {
1136 Constant *NegC = ConstantInt::get(Ty, -(*Op1C));
1137 Instruction *BO = BinaryOperator::CreateSDiv(X, NegC);
1138 BO->setIsExact(I.isExact());
1139 return BO;
1140 }
1141 }
1142
1143 // -X / Y --> -(X / Y)
1144 Value *Y;
1145 if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1146 return BinaryOperator::CreateNSWNeg(
1147 Builder.CreateSDiv(X, Y, I.getName(), I.isExact()));
1148
1149 // abs(X) / X --> X > -1 ? 1 : -1
1150 // X / abs(X) --> X > -1 ? 1 : -1
1151 if (match(&I, m_c_BinOp(
1152 m_OneUse(m_Intrinsic<Intrinsic::abs>(m_Value(X), m_One())),
1153 m_Deferred(X)))) {
1154 Constant *NegOne = ConstantInt::getAllOnesValue(Ty);
1155 Value *Cond = Builder.CreateICmpSGT(X, NegOne);
1156 return SelectInst::Create(Cond, ConstantInt::get(Ty, 1), NegOne);
1157 }
1158
1159 // If the sign bits of both operands are zero (i.e. we can prove they are
1160 // unsigned inputs), turn this into a udiv.
1161 APInt Mask(APInt::getSignMask(Ty->getScalarSizeInBits()));
1162 if (MaskedValueIsZero(Op0, Mask, 0, &I)) {
1163 if (MaskedValueIsZero(Op1, Mask, 0, &I)) {
1164 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
1165 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1166 BO->setIsExact(I.isExact());
1167 return BO;
1168 }
1169
1170 if (match(Op1, m_NegatedPower2())) {
1171 // X sdiv (-(1 << C)) -> -(X sdiv (1 << C)) ->
1172 // -> -(X udiv (1 << C)) -> -(X u>> C)
1173 Constant *CNegLog2 = ConstantExpr::getExactLogBase2(
1174 ConstantExpr::getNeg(cast<Constant>(Op1)));
1175 Value *Shr = Builder.CreateLShr(Op0, CNegLog2, I.getName(), I.isExact());
1176 return BinaryOperator::CreateNeg(Shr);
1177 }
1178
1179 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1180 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
1181 // Safe because the only negative value (1 << Y) can take on is
1182 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
1183 // the sign bit set.
1184 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1185 BO->setIsExact(I.isExact());
1186 return BO;
1187 }
1188 }
1189
1190 return nullptr;
1191}
1192
1193/// Remove negation and try to convert division into multiplication.
1194static Instruction *foldFDivConstantDivisor(BinaryOperator &I) {
1195 Constant *C;
1196 if (!match(I.getOperand(1), m_Constant(C)))
1197 return nullptr;
1198
1199 // -X / C --> X / -C
1200 Value *X;
1201 if (match(I.getOperand(0), m_FNeg(m_Value(X))))
1202 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I);
1203
1204 // If the constant divisor has an exact inverse, this is always safe. If not,
1205 // then we can still create a reciprocal if fast-math-flags allow it and the
1206 // constant is a regular number (not zero, infinite, or denormal).
1207 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP())))
1208 return nullptr;
1209
1210 // Disallow denormal constants because we don't know what would happen
1211 // on all targets.
1212 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1213 // denorms are flushed?
1214 auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C);
1215 if (!RecipC->isNormalFP())
1216 return nullptr;
1217
1218 // X / C --> X * (1 / C)
1219 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I);
1220}
1221
1222/// Remove negation and try to reassociate constant math.
1223static Instruction *foldFDivConstantDividend(BinaryOperator &I) {
1224 Constant *C;
1225 if (!match(I.getOperand(0), m_Constant(C)))
1226 return nullptr;
1227
1228 // C / -X --> -C / X
1229 Value *X;
1230 if (match(I.getOperand(1), m_FNeg(m_Value(X))))
1231 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I);
1232
1233 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
1234 return nullptr;
1235
1236 // Try to reassociate C / X expressions where X includes another constant.
1237 Constant *C2, *NewC = nullptr;
1238 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) {
1239 // C / (X * C2) --> (C / C2) / X
1240 NewC = ConstantExpr::getFDiv(C, C2);
1241 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) {
1242 // C / (X / C2) --> (C * C2) / X
1243 NewC = ConstantExpr::getFMul(C, C2);
1244 }
1245 // Disallow denormal constants because we don't know what would happen
1246 // on all targets.
1247 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1248 // denorms are flushed?
1249 if (!NewC || !NewC->isNormalFP())
1250 return nullptr;
1251
1252 return BinaryOperator::CreateFDivFMF(NewC, X, &I);
1253}
1254
1255/// Negate the exponent of pow/exp to fold division-by-pow() into multiply.
1256static Instruction *foldFDivPowDivisor(BinaryOperator &I,
1257 InstCombiner::BuilderTy &Builder) {
1258 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1259 auto *II = dyn_cast<IntrinsicInst>(Op1);
1260 if (!II || !II->hasOneUse() || !I.hasAllowReassoc() ||
1261 !I.hasAllowReciprocal())
1262 return nullptr;
1263
1264 // Z / pow(X, Y) --> Z * pow(X, -Y)
1265 // Z / exp{2}(Y) --> Z * exp{2}(-Y)
1266 // In the general case, this creates an extra instruction, but fmul allows
1267 // for better canonicalization and optimization than fdiv.
1268 Intrinsic::ID IID = II->getIntrinsicID();
1269 SmallVector<Value *> Args;
1270 switch (IID) {
1271 case Intrinsic::pow:
1272 Args.push_back(II->getArgOperand(0));
1273 Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(1), &I));
1274 break;
1275 case Intrinsic::powi: {
1276 // Require 'ninf' assuming that makes powi(X, -INT_MIN) acceptable.
1277 // That is, X ** (huge negative number) is 0.0, ~1.0, or INF and so
1278 // dividing by that is INF, ~1.0, or 0.0. Code that uses powi allows
1279 // non-standard results, so this corner case should be acceptable if the
1280 // code rules out INF values.
1281 if (!I.hasNoInfs())
1282 return nullptr;
1283 Args.push_back(II->getArgOperand(0));
1284 Args.push_back(Builder.CreateNeg(II->getArgOperand(1)));
1285 Type *Tys[] = {I.getType(), II->getArgOperand(1)->getType()};
1286 Value *Pow = Builder.CreateIntrinsic(IID, Tys, Args, &I);
1287 return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
1288 }
1289 case Intrinsic::exp:
1290 case Intrinsic::exp2:
1291 Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(0), &I));
1292 break;
1293 default:
1294 return nullptr;
1295 }
1296 Value *Pow = Builder.CreateIntrinsic(IID, I.getType(), Args, &I);
1297 return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
1298}
1299
1300Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
1301 if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1),
1302 I.getFastMathFlags(),
1303 SQ.getWithInstruction(&I)))
1304 return replaceInstUsesWith(I, V);
1305
1306 if (Instruction *X = foldVectorBinop(I))
1307 return X;
1308
1309 if (Instruction *Phi = foldBinopWithPhiOperands(I))
1310 return Phi;
1311
1312 if (Instruction *R = foldFDivConstantDivisor(I))
1313 return R;
1314
1315 if (Instruction *R = foldFDivConstantDividend(I))
1316 return R;
1317
1318 if (Instruction *R = foldFPSignBitOps(I))
1319 return R;
1320
1321 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1322 if (isa<Constant>(Op0))
1323 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1324 if (Instruction *R = FoldOpIntoSelect(I, SI))
1325 return R;
1326
1327 if (isa<Constant>(Op1))
1328 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1329 if (Instruction *R = FoldOpIntoSelect(I, SI))
1330 return R;
1331
1332 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) {
1333 Value *X, *Y;
1334 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1335 (!isa<Constant>(Y) || !isa<Constant>(Op1))) {
1336 // (X / Y) / Z => X / (Y * Z)
1337 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I);
1338 return BinaryOperator::CreateFDivFMF(X, YZ, &I);
1339 }
1340 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1341 (!isa<Constant>(Y) || !isa<Constant>(Op0))) {
1342 // Z / (X / Y) => (Y * Z) / X
1343 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I);
1344 return BinaryOperator::CreateFDivFMF(YZ, X, &I);
1345 }
1346 // Z / (1.0 / Y) => (Y * Z)
1347 //
1348 // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The
1349 // m_OneUse check is avoided because even in the case of the multiple uses
1350 // for 1.0/Y, the number of instructions remain the same and a division is
1351 // replaced by a multiplication.
1352 if (match(Op1, m_FDiv(m_SpecificFP(1.0), m_Value(Y))))
1353 return BinaryOperator::CreateFMulFMF(Y, Op0, &I);
1354 }
1355
1356 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) {
1357 // sin(X) / cos(X) -> tan(X)
1358 // cos(X) / sin(X) -> 1/tan(X) (cotangent)
1359 Value *X;
1360 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) &&
1361 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X)));
1362 bool IsCot =
1363 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) &&
1364 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X)));
1365
1366 if ((IsTan || IsCot) &&
1367 hasFloatFn(&TLI, I.getType(), LibFunc_tan, LibFunc_tanf, LibFunc_tanl)) {
1368 IRBuilder<> B(&I);
1369 IRBuilder<>::FastMathFlagGuard FMFGuard(B);
1370 B.setFastMathFlags(I.getFastMathFlags());
1371 AttributeList Attrs =
1372 cast<CallBase>(Op0)->getCalledFunction()->getAttributes();
1373 Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf,
1374 LibFunc_tanl, B, Attrs);
1375 if (IsCot)
1376 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res);
1377 return replaceInstUsesWith(I, Res);
1378 }
1379 }
1380
1381 // X / (X * Y) --> 1.0 / Y
1382 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
1383 // We can ignore the possibility that X is infinity because INF/INF is NaN.
1384 Value *X, *Y;
1385 if (I.hasNoNaNs() && I.hasAllowReassoc() &&
1386 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) {
1387 replaceOperand(I, 0, ConstantFP::get(I.getType(), 1.0));
1388 replaceOperand(I, 1, Y);
1389 return &I;
1390 }
1391
1392 // X / fabs(X) -> copysign(1.0, X)
1393 // fabs(X) / X -> copysign(1.0, X)
1394 if (I.hasNoNaNs() && I.hasNoInfs() &&
1395 (match(&I, m_FDiv(m_Value(X), m_FAbs(m_Deferred(X)))) ||
1396 match(&I, m_FDiv(m_FAbs(m_Value(X)), m_Deferred(X))))) {
1397 Value *V = Builder.CreateBinaryIntrinsic(
1398 Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I);
1399 return replaceInstUsesWith(I, V);
1400 }
1401
1402 if (Instruction *Mul = foldFDivPowDivisor(I, Builder))
1403 return Mul;
1404
1405 return nullptr;
1406}
1407
1408/// This function implements the transforms common to both integer remainder
1409/// instructions (urem and srem). It is called by the visitors to those integer
1410/// remainder instructions.
1411/// Common integer remainder transforms
1412Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) {
1413 if (Instruction *Phi = foldBinopWithPhiOperands(I))
6
Assuming 'Phi' is null
7
Taking false branch
1414 return Phi;
1415
1416 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1417
1418 // The RHS is known non-zero.
1419 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
8
Assuming 'V' is null
9
Taking false branch
1420 return replaceOperand(I, 1, V);
1421
1422 // Handle cases involving: rem X, (select Cond, Y, Z)
1423 if (simplifyDivRemOfSelectWithZeroOp(I))
10
Calling 'InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp'
1424 return &I;
1425
1426 // If the divisor is a select-of-constants, try to constant fold all rem ops:
1427 // C % (select Cond, TrueC, FalseC) --> select Cond, (C % TrueC), (C % FalseC)
1428 // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
1429 if (match(Op0, m_ImmConstant()) &&
1430 match(Op1, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
1431 if (Instruction *R = FoldOpIntoSelect(I, cast<SelectInst>(Op1),
1432 /*FoldWithMultiUse*/ true))
1433 return R;
1434 }
1435
1436 if (isa<Constant>(Op1)) {
1437 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
1438 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
1439 if (Instruction *R = FoldOpIntoSelect(I, SI))
1440 return R;
1441 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) {
1442 const APInt *Op1Int;
1443 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() &&
1444 (I.getOpcode() == Instruction::URem ||
1445 !Op1Int->isMinSignedValue())) {
1446 // foldOpIntoPhi will speculate instructions to the end of the PHI's
1447 // predecessor blocks, so do this only if we know the srem or urem
1448 // will not fault.
1449 if (Instruction *NV = foldOpIntoPhi(I, PN))
1450 return NV;
1451 }
1452 }
1453
1454 // See if we can fold away this rem instruction.
1455 if (SimplifyDemandedInstructionBits(I))
1456 return &I;
1457 }
1458 }
1459
1460 return nullptr;
1461}
1462
1463Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
1464 if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1),
1
Assuming 'V' is null
2
Taking false branch
1465 SQ.getWithInstruction(&I)))
1466 return replaceInstUsesWith(I, V);
1467
1468 if (Instruction *X = foldVectorBinop(I))
3
Assuming 'X' is null
4
Taking false branch
1469 return X;
1470
1471 if (Instruction *common = commonIRemTransforms(I))
5
Calling 'InstCombinerImpl::commonIRemTransforms'
1472 return common;
1473
1474 if (Instruction *NarrowRem = narrowUDivURem(I, Builder))
1475 return NarrowRem;
1476
1477 // X urem Y -> X and Y-1, where Y is a power of 2,
1478 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1479 Type *Ty = I.getType();
1480 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1481 // This may increase instruction count, we don't enforce that Y is a
1482 // constant.
1483 Constant *N1 = Constant::getAllOnesValue(Ty);
1484 Value *Add = Builder.CreateAdd(Op1, N1);
1485 return BinaryOperator::CreateAnd(Op0, Add);
1486 }
1487
1488 // 1 urem X -> zext(X != 1)
1489 if (match(Op0, m_One())) {
1490 Value *Cmp = Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1));
1491 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1492 }
1493
1494 // X urem C -> X < C ? X : X - C, where C >= signbit.
1495 if (match(Op1, m_Negative())) {
1496 Value *Cmp = Builder.CreateICmpULT(Op0, Op1);
1497 Value *Sub = Builder.CreateSub(Op0, Op1);
1498 return SelectInst::Create(Cmp, Op0, Sub);
1499 }
1500
1501 // If the divisor is a sext of a boolean, then the divisor must be max
1502 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
1503 // max unsigned value. In that case, the remainder is 0:
1504 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
1505 Value *X;
1506 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1507 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1508 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0);
1509 }
1510
1511 return nullptr;
1512}
1513
1514Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
1515 if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1),
1516 SQ.getWithInstruction(&I)))
1517 return replaceInstUsesWith(I, V);
1518
1519 if (Instruction *X = foldVectorBinop(I))
1520 return X;
1521
1522 // Handle the integer rem common cases
1523 if (Instruction *Common = commonIRemTransforms(I))
1524 return Common;
1525
1526 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1527 {
1528 const APInt *Y;
1529 // X % -Y -> X % Y
1530 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue())
1531 return replaceOperand(I, 1, ConstantInt::get(I.getType(), -*Y));
1532 }
1533
1534 // -X srem Y --> -(X srem Y)
1535 Value *X, *Y;
1536 if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1537 return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y));
1538
1539 // If the sign bits of both operands are zero (i.e. we can prove they are
1540 // unsigned inputs), turn this into a urem.
1541 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1542 if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
1543 MaskedValueIsZero(Op0, Mask, 0, &I)) {
1544 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
1545 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
1546 }
1547
1548 // If it's a constant vector, flip any negative values positive.
1549 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) {
1550 Constant *C = cast<Constant>(Op1);
1551 unsigned VWidth = cast<FixedVectorType>(C->getType())->getNumElements();
1552
1553 bool hasNegative = false;
1554 bool hasMissing = false;
1555 for (unsigned i = 0; i != VWidth; ++i) {
1556 Constant *Elt = C->getAggregateElement(i);
1557 if (!Elt) {
1558 hasMissing = true;
1559 break;
1560 }
1561
1562 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt))
1563 if (RHS->isNegative())
1564 hasNegative = true;
1565 }
1566
1567 if (hasNegative && !hasMissing) {
1568 SmallVector<Constant *, 16> Elts(VWidth);
1569 for (unsigned i = 0; i != VWidth; ++i) {
1570 Elts[i] = C->getAggregateElement(i); // Handle undef, etc.
1571 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) {
1572 if (RHS->isNegative())
1573 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
1574 }
1575 }
1576
1577 Constant *NewRHSV = ConstantVector::get(Elts);
1578 if (NewRHSV != C) // Don't loop on -MININT
1579 return replaceOperand(I, 1, NewRHSV);
1580 }
1581 }
1582
1583 return nullptr;
1584}
1585
1586Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) {
1587 if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1),
1588 I.getFastMathFlags(),
1589 SQ.getWithInstruction(&I)))
1590 return replaceInstUsesWith(I, V);
1591
1592 if (Instruction *X = foldVectorBinop(I))
1593 return X;
1594
1595 if (Instruction *Phi = foldBinopWithPhiOperands(I))
1596 return Phi;
1597
1598 return nullptr;
1599}