Bug Summary

File:llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
Warning:line 750, column 5
Value stored to 'Pred' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name InstCombineSelect.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/InstCombine -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/InstCombine -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/InstCombine -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/include -I /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/include -D NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/build-llvm/lib/Transforms/InstCombine -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-04-040900-46481-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210903100615+fd66b44ec19e/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp
1//===- InstCombineSelect.cpp ----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the visitSelect function.
10//
11//===----------------------------------------------------------------------===//
12
13#include "InstCombineInternal.h"
14#include "llvm/ADT/APInt.h"
15#include "llvm/ADT/Optional.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/Analysis/AssumptionCache.h"
19#include "llvm/Analysis/CmpInstAnalysis.h"
20#include "llvm/Analysis/InstructionSimplify.h"
21#include "llvm/Analysis/OverflowInstAnalysis.h"
22#include "llvm/Analysis/ValueTracking.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/Constants.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/IRBuilder.h"
28#include "llvm/IR/InstrTypes.h"
29#include "llvm/IR/Instruction.h"
30#include "llvm/IR/Instructions.h"
31#include "llvm/IR/IntrinsicInst.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/Operator.h"
34#include "llvm/IR/PatternMatch.h"
35#include "llvm/IR/Type.h"
36#include "llvm/IR/User.h"
37#include "llvm/IR/Value.h"
38#include "llvm/Support/Casting.h"
39#include "llvm/Support/ErrorHandling.h"
40#include "llvm/Support/KnownBits.h"
41#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
42#include "llvm/Transforms/InstCombine/InstCombiner.h"
43#include <cassert>
44#include <utility>
45
46using namespace llvm;
47using namespace PatternMatch;
48
49#define DEBUG_TYPE"instcombine" "instcombine"
50
51static Value *createMinMax(InstCombiner::BuilderTy &Builder,
52 SelectPatternFlavor SPF, Value *A, Value *B) {
53 CmpInst::Predicate Pred = getMinMaxPred(SPF);
54 assert(CmpInst::isIntPredicate(Pred) && "Expected integer predicate")(static_cast<void> (0));
55 return Builder.CreateSelect(Builder.CreateICmp(Pred, A, B), A, B);
56}
57
58/// Replace a select operand based on an equality comparison with the identity
59/// constant of a binop.
60static Instruction *foldSelectBinOpIdentity(SelectInst &Sel,
61 const TargetLibraryInfo &TLI,
62 InstCombinerImpl &IC) {
63 // The select condition must be an equality compare with a constant operand.
64 Value *X;
65 Constant *C;
66 CmpInst::Predicate Pred;
67 if (!match(Sel.getCondition(), m_Cmp(Pred, m_Value(X), m_Constant(C))))
68 return nullptr;
69
70 bool IsEq;
71 if (ICmpInst::isEquality(Pred))
72 IsEq = Pred == ICmpInst::ICMP_EQ;
73 else if (Pred == FCmpInst::FCMP_OEQ)
74 IsEq = true;
75 else if (Pred == FCmpInst::FCMP_UNE)
76 IsEq = false;
77 else
78 return nullptr;
79
80 // A select operand must be a binop.
81 BinaryOperator *BO;
82 if (!match(Sel.getOperand(IsEq ? 1 : 2), m_BinOp(BO)))
83 return nullptr;
84
85 // The compare constant must be the identity constant for that binop.
86 // If this a floating-point compare with 0.0, any zero constant will do.
87 Type *Ty = BO->getType();
88 Constant *IdC = ConstantExpr::getBinOpIdentity(BO->getOpcode(), Ty, true);
89 if (IdC != C) {
90 if (!IdC || !CmpInst::isFPPredicate(Pred))
91 return nullptr;
92 if (!match(IdC, m_AnyZeroFP()) || !match(C, m_AnyZeroFP()))
93 return nullptr;
94 }
95
96 // Last, match the compare variable operand with a binop operand.
97 Value *Y;
98 if (!BO->isCommutative() && !match(BO, m_BinOp(m_Value(Y), m_Specific(X))))
99 return nullptr;
100 if (!match(BO, m_c_BinOp(m_Value(Y), m_Specific(X))))
101 return nullptr;
102
103 // +0.0 compares equal to -0.0, and so it does not behave as required for this
104 // transform. Bail out if we can not exclude that possibility.
105 if (isa<FPMathOperator>(BO))
106 if (!BO->hasNoSignedZeros() && !CannotBeNegativeZero(Y, &TLI))
107 return nullptr;
108
109 // BO = binop Y, X
110 // S = { select (cmp eq X, C), BO, ? } or { select (cmp ne X, C), ?, BO }
111 // =>
112 // S = { select (cmp eq X, C), Y, ? } or { select (cmp ne X, C), ?, Y }
113 return IC.replaceOperand(Sel, IsEq ? 1 : 2, Y);
114}
115
116/// This folds:
117/// select (icmp eq (and X, C1)), TC, FC
118/// iff C1 is a power 2 and the difference between TC and FC is a power-of-2.
119/// To something like:
120/// (shr (and (X, C1)), (log2(C1) - log2(TC-FC))) + FC
121/// Or:
122/// (shl (and (X, C1)), (log2(TC-FC) - log2(C1))) + FC
123/// With some variations depending if FC is larger than TC, or the shift
124/// isn't needed, or the bit widths don't match.
125static Value *foldSelectICmpAnd(SelectInst &Sel, ICmpInst *Cmp,
126 InstCombiner::BuilderTy &Builder) {
127 const APInt *SelTC, *SelFC;
128 if (!match(Sel.getTrueValue(), m_APInt(SelTC)) ||
129 !match(Sel.getFalseValue(), m_APInt(SelFC)))
130 return nullptr;
131
132 // If this is a vector select, we need a vector compare.
133 Type *SelType = Sel.getType();
134 if (SelType->isVectorTy() != Cmp->getType()->isVectorTy())
135 return nullptr;
136
137 Value *V;
138 APInt AndMask;
139 bool CreateAnd = false;
140 ICmpInst::Predicate Pred = Cmp->getPredicate();
141 if (ICmpInst::isEquality(Pred)) {
142 if (!match(Cmp->getOperand(1), m_Zero()))
143 return nullptr;
144
145 V = Cmp->getOperand(0);
146 const APInt *AndRHS;
147 if (!match(V, m_And(m_Value(), m_Power2(AndRHS))))
148 return nullptr;
149
150 AndMask = *AndRHS;
151 } else if (decomposeBitTestICmp(Cmp->getOperand(0), Cmp->getOperand(1),
152 Pred, V, AndMask)) {
153 assert(ICmpInst::isEquality(Pred) && "Not equality test?")(static_cast<void> (0));
154 if (!AndMask.isPowerOf2())
155 return nullptr;
156
157 CreateAnd = true;
158 } else {
159 return nullptr;
160 }
161
162 // In general, when both constants are non-zero, we would need an offset to
163 // replace the select. This would require more instructions than we started
164 // with. But there's one special-case that we handle here because it can
165 // simplify/reduce the instructions.
166 APInt TC = *SelTC;
167 APInt FC = *SelFC;
168 if (!TC.isNullValue() && !FC.isNullValue()) {
169 // If the select constants differ by exactly one bit and that's the same
170 // bit that is masked and checked by the select condition, the select can
171 // be replaced by bitwise logic to set/clear one bit of the constant result.
172 if (TC.getBitWidth() != AndMask.getBitWidth() || (TC ^ FC) != AndMask)
173 return nullptr;
174 if (CreateAnd) {
175 // If we have to create an 'and', then we must kill the cmp to not
176 // increase the instruction count.
177 if (!Cmp->hasOneUse())
178 return nullptr;
179 V = Builder.CreateAnd(V, ConstantInt::get(SelType, AndMask));
180 }
181 bool ExtraBitInTC = TC.ugt(FC);
182 if (Pred == ICmpInst::ICMP_EQ) {
183 // If the masked bit in V is clear, clear or set the bit in the result:
184 // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) ^ TC
185 // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) | TC
186 Constant *C = ConstantInt::get(SelType, TC);
187 return ExtraBitInTC ? Builder.CreateXor(V, C) : Builder.CreateOr(V, C);
188 }
189 if (Pred == ICmpInst::ICMP_NE) {
190 // If the masked bit in V is set, set or clear the bit in the result:
191 // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) | FC
192 // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) ^ FC
193 Constant *C = ConstantInt::get(SelType, FC);
194 return ExtraBitInTC ? Builder.CreateOr(V, C) : Builder.CreateXor(V, C);
195 }
196 llvm_unreachable("Only expecting equality predicates")__builtin_unreachable();
197 }
198
199 // Make sure one of the select arms is a power-of-2.
200 if (!TC.isPowerOf2() && !FC.isPowerOf2())
201 return nullptr;
202
203 // Determine which shift is needed to transform result of the 'and' into the
204 // desired result.
205 const APInt &ValC = !TC.isNullValue() ? TC : FC;
206 unsigned ValZeros = ValC.logBase2();
207 unsigned AndZeros = AndMask.logBase2();
208
209 // Insert the 'and' instruction on the input to the truncate.
210 if (CreateAnd)
211 V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), AndMask));
212
213 // If types don't match, we can still convert the select by introducing a zext
214 // or a trunc of the 'and'.
215 if (ValZeros > AndZeros) {
216 V = Builder.CreateZExtOrTrunc(V, SelType);
217 V = Builder.CreateShl(V, ValZeros - AndZeros);
218 } else if (ValZeros < AndZeros) {
219 V = Builder.CreateLShr(V, AndZeros - ValZeros);
220 V = Builder.CreateZExtOrTrunc(V, SelType);
221 } else {
222 V = Builder.CreateZExtOrTrunc(V, SelType);
223 }
224
225 // Okay, now we know that everything is set up, we just don't know whether we
226 // have a icmp_ne or icmp_eq and whether the true or false val is the zero.
227 bool ShouldNotVal = !TC.isNullValue();
228 ShouldNotVal ^= Pred == ICmpInst::ICMP_NE;
229 if (ShouldNotVal)
230 V = Builder.CreateXor(V, ValC);
231
232 return V;
233}
234
235/// We want to turn code that looks like this:
236/// %C = or %A, %B
237/// %D = select %cond, %C, %A
238/// into:
239/// %C = select %cond, %B, 0
240/// %D = or %A, %C
241///
242/// Assuming that the specified instruction is an operand to the select, return
243/// a bitmask indicating which operands of this instruction are foldable if they
244/// equal the other incoming value of the select.
245static unsigned getSelectFoldableOperands(BinaryOperator *I) {
246 switch (I->getOpcode()) {
247 case Instruction::Add:
248 case Instruction::Mul:
249 case Instruction::And:
250 case Instruction::Or:
251 case Instruction::Xor:
252 return 3; // Can fold through either operand.
253 case Instruction::Sub: // Can only fold on the amount subtracted.
254 case Instruction::Shl: // Can only fold on the shift amount.
255 case Instruction::LShr:
256 case Instruction::AShr:
257 return 1;
258 default:
259 return 0; // Cannot fold
260 }
261}
262
263/// We have (select c, TI, FI), and we know that TI and FI have the same opcode.
264Instruction *InstCombinerImpl::foldSelectOpOp(SelectInst &SI, Instruction *TI,
265 Instruction *FI) {
266 // Don't break up min/max patterns. The hasOneUse checks below prevent that
267 // for most cases, but vector min/max with bitcasts can be transformed. If the
268 // one-use restrictions are eased for other patterns, we still don't want to
269 // obfuscate min/max.
270 if ((match(&SI, m_SMin(m_Value(), m_Value())) ||
271 match(&SI, m_SMax(m_Value(), m_Value())) ||
272 match(&SI, m_UMin(m_Value(), m_Value())) ||
273 match(&SI, m_UMax(m_Value(), m_Value()))))
274 return nullptr;
275
276 // If this is a cast from the same type, merge.
277 Value *Cond = SI.getCondition();
278 Type *CondTy = Cond->getType();
279 if (TI->getNumOperands() == 1 && TI->isCast()) {
280 Type *FIOpndTy = FI->getOperand(0)->getType();
281 if (TI->getOperand(0)->getType() != FIOpndTy)
282 return nullptr;
283
284 // The select condition may be a vector. We may only change the operand
285 // type if the vector width remains the same (and matches the condition).
286 if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) {
287 if (!FIOpndTy->isVectorTy() ||
288 CondVTy->getElementCount() !=
289 cast<VectorType>(FIOpndTy)->getElementCount())
290 return nullptr;
291
292 // TODO: If the backend knew how to deal with casts better, we could
293 // remove this limitation. For now, there's too much potential to create
294 // worse codegen by promoting the select ahead of size-altering casts
295 // (PR28160).
296 //
297 // Note that ValueTracking's matchSelectPattern() looks through casts
298 // without checking 'hasOneUse' when it matches min/max patterns, so this
299 // transform may end up happening anyway.
300 if (TI->getOpcode() != Instruction::BitCast &&
301 (!TI->hasOneUse() || !FI->hasOneUse()))
302 return nullptr;
303 } else if (!TI->hasOneUse() || !FI->hasOneUse()) {
304 // TODO: The one-use restrictions for a scalar select could be eased if
305 // the fold of a select in visitLoadInst() was enhanced to match a pattern
306 // that includes a cast.
307 return nullptr;
308 }
309
310 // Fold this by inserting a select from the input values.
311 Value *NewSI =
312 Builder.CreateSelect(Cond, TI->getOperand(0), FI->getOperand(0),
313 SI.getName() + ".v", &SI);
314 return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
315 TI->getType());
316 }
317
318 // Cond ? -X : -Y --> -(Cond ? X : Y)
319 Value *X, *Y;
320 if (match(TI, m_FNeg(m_Value(X))) && match(FI, m_FNeg(m_Value(Y))) &&
321 (TI->hasOneUse() || FI->hasOneUse())) {
322 // Intersect FMF from the fneg instructions and union those with the select.
323 FastMathFlags FMF = TI->getFastMathFlags();
324 FMF &= FI->getFastMathFlags();
325 FMF |= SI.getFastMathFlags();
326 Value *NewSel = Builder.CreateSelect(Cond, X, Y, SI.getName() + ".v", &SI);
327 if (auto *NewSelI = dyn_cast<Instruction>(NewSel))
328 NewSelI->setFastMathFlags(FMF);
329 Instruction *NewFNeg = UnaryOperator::CreateFNeg(NewSel);
330 NewFNeg->setFastMathFlags(FMF);
331 return NewFNeg;
332 }
333
334 // Min/max intrinsic with a common operand can have the common operand pulled
335 // after the select. This is the same transform as below for binops, but
336 // specialized for intrinsic matching and without the restrictive uses clause.
337 auto *TII = dyn_cast<IntrinsicInst>(TI);
338 auto *FII = dyn_cast<IntrinsicInst>(FI);
339 if (TII && FII && TII->getIntrinsicID() == FII->getIntrinsicID() &&
340 (TII->hasOneUse() || FII->hasOneUse())) {
341 Value *T0, *T1, *F0, *F1;
342 if (match(TII, m_MaxOrMin(m_Value(T0), m_Value(T1))) &&
343 match(FII, m_MaxOrMin(m_Value(F0), m_Value(F1)))) {
344 if (T0 == F0) {
345 Value *NewSel = Builder.CreateSelect(Cond, T1, F1, "minmaxop", &SI);
346 return CallInst::Create(TII->getCalledFunction(), {NewSel, T0});
347 }
348 if (T0 == F1) {
349 Value *NewSel = Builder.CreateSelect(Cond, T1, F0, "minmaxop", &SI);
350 return CallInst::Create(TII->getCalledFunction(), {NewSel, T0});
351 }
352 if (T1 == F0) {
353 Value *NewSel = Builder.CreateSelect(Cond, T0, F1, "minmaxop", &SI);
354 return CallInst::Create(TII->getCalledFunction(), {NewSel, T1});
355 }
356 if (T1 == F1) {
357 Value *NewSel = Builder.CreateSelect(Cond, T0, F0, "minmaxop", &SI);
358 return CallInst::Create(TII->getCalledFunction(), {NewSel, T1});
359 }
360 }
361 }
362
363 // Only handle binary operators (including two-operand getelementptr) with
364 // one-use here. As with the cast case above, it may be possible to relax the
365 // one-use constraint, but that needs be examined carefully since it may not
366 // reduce the total number of instructions.
367 if (TI->getNumOperands() != 2 || FI->getNumOperands() != 2 ||
368 (!isa<BinaryOperator>(TI) && !isa<GetElementPtrInst>(TI)) ||
369 !TI->hasOneUse() || !FI->hasOneUse())
370 return nullptr;
371
372 // Figure out if the operations have any operands in common.
373 Value *MatchOp, *OtherOpT, *OtherOpF;
374 bool MatchIsOpZero;
375 if (TI->getOperand(0) == FI->getOperand(0)) {
376 MatchOp = TI->getOperand(0);
377 OtherOpT = TI->getOperand(1);
378 OtherOpF = FI->getOperand(1);
379 MatchIsOpZero = true;
380 } else if (TI->getOperand(1) == FI->getOperand(1)) {
381 MatchOp = TI->getOperand(1);
382 OtherOpT = TI->getOperand(0);
383 OtherOpF = FI->getOperand(0);
384 MatchIsOpZero = false;
385 } else if (!TI->isCommutative()) {
386 return nullptr;
387 } else if (TI->getOperand(0) == FI->getOperand(1)) {
388 MatchOp = TI->getOperand(0);
389 OtherOpT = TI->getOperand(1);
390 OtherOpF = FI->getOperand(0);
391 MatchIsOpZero = true;
392 } else if (TI->getOperand(1) == FI->getOperand(0)) {
393 MatchOp = TI->getOperand(1);
394 OtherOpT = TI->getOperand(0);
395 OtherOpF = FI->getOperand(1);
396 MatchIsOpZero = true;
397 } else {
398 return nullptr;
399 }
400
401 // If the select condition is a vector, the operands of the original select's
402 // operands also must be vectors. This may not be the case for getelementptr
403 // for example.
404 if (CondTy->isVectorTy() && (!OtherOpT->getType()->isVectorTy() ||
405 !OtherOpF->getType()->isVectorTy()))
406 return nullptr;
407
408 // If we reach here, they do have operations in common.
409 Value *NewSI = Builder.CreateSelect(Cond, OtherOpT, OtherOpF,
410 SI.getName() + ".v", &SI);
411 Value *Op0 = MatchIsOpZero ? MatchOp : NewSI;
412 Value *Op1 = MatchIsOpZero ? NewSI : MatchOp;
413 if (auto *BO = dyn_cast<BinaryOperator>(TI)) {
414 BinaryOperator *NewBO = BinaryOperator::Create(BO->getOpcode(), Op0, Op1);
415 NewBO->copyIRFlags(TI);
416 NewBO->andIRFlags(FI);
417 return NewBO;
418 }
419 if (auto *TGEP = dyn_cast<GetElementPtrInst>(TI)) {
420 auto *FGEP = cast<GetElementPtrInst>(FI);
421 Type *ElementType = TGEP->getResultElementType();
422 return TGEP->isInBounds() && FGEP->isInBounds()
423 ? GetElementPtrInst::CreateInBounds(ElementType, Op0, {Op1})
424 : GetElementPtrInst::Create(ElementType, Op0, {Op1});
425 }
426 llvm_unreachable("Expected BinaryOperator or GEP")__builtin_unreachable();
427 return nullptr;
428}
429
430static bool isSelect01(const APInt &C1I, const APInt &C2I) {
431 if (!C1I.isNullValue() && !C2I.isNullValue()) // One side must be zero.
432 return false;
433 return C1I.isOneValue() || C1I.isAllOnesValue() ||
434 C2I.isOneValue() || C2I.isAllOnesValue();
435}
436
437/// Try to fold the select into one of the operands to allow further
438/// optimization.
439Instruction *InstCombinerImpl::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
440 Value *FalseVal) {
441 // See the comment above GetSelectFoldableOperands for a description of the
442 // transformation we are doing here.
443 if (auto *TVI = dyn_cast<BinaryOperator>(TrueVal)) {
444 if (TVI->hasOneUse() && !isa<Constant>(FalseVal)) {
445 if (unsigned SFO = getSelectFoldableOperands(TVI)) {
446 unsigned OpToFold = 0;
447 if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
448 OpToFold = 1;
449 } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
450 OpToFold = 2;
451 }
452
453 if (OpToFold) {
454 Constant *C = ConstantExpr::getBinOpIdentity(TVI->getOpcode(),
455 TVI->getType(), true);
456 Value *OOp = TVI->getOperand(2-OpToFold);
457 // Avoid creating select between 2 constants unless it's selecting
458 // between 0, 1 and -1.
459 const APInt *OOpC;
460 bool OOpIsAPInt = match(OOp, m_APInt(OOpC));
461 if (!isa<Constant>(OOp) ||
462 (OOpIsAPInt && isSelect01(C->getUniqueInteger(), *OOpC))) {
463 Value *NewSel = Builder.CreateSelect(SI.getCondition(), OOp, C);
464 NewSel->takeName(TVI);
465 BinaryOperator *BO = BinaryOperator::Create(TVI->getOpcode(),
466 FalseVal, NewSel);
467 BO->copyIRFlags(TVI);
468 return BO;
469 }
470 }
471 }
472 }
473 }
474
475 if (auto *FVI = dyn_cast<BinaryOperator>(FalseVal)) {
476 if (FVI->hasOneUse() && !isa<Constant>(TrueVal)) {
477 if (unsigned SFO = getSelectFoldableOperands(FVI)) {
478 unsigned OpToFold = 0;
479 if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
480 OpToFold = 1;
481 } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
482 OpToFold = 2;
483 }
484
485 if (OpToFold) {
486 Constant *C = ConstantExpr::getBinOpIdentity(FVI->getOpcode(),
487 FVI->getType(), true);
488 Value *OOp = FVI->getOperand(2-OpToFold);
489 // Avoid creating select between 2 constants unless it's selecting
490 // between 0, 1 and -1.
491 const APInt *OOpC;
492 bool OOpIsAPInt = match(OOp, m_APInt(OOpC));
493 if (!isa<Constant>(OOp) ||
494 (OOpIsAPInt && isSelect01(C->getUniqueInteger(), *OOpC))) {
495 Value *NewSel = Builder.CreateSelect(SI.getCondition(), C, OOp);
496 NewSel->takeName(FVI);
497 BinaryOperator *BO = BinaryOperator::Create(FVI->getOpcode(),
498 TrueVal, NewSel);
499 BO->copyIRFlags(FVI);
500 return BO;
501 }
502 }
503 }
504 }
505 }
506
507 return nullptr;
508}
509
510/// We want to turn:
511/// (select (icmp eq (and X, Y), 0), (and (lshr X, Z), 1), 1)
512/// into:
513/// zext (icmp ne i32 (and X, (or Y, (shl 1, Z))), 0)
514/// Note:
515/// Z may be 0 if lshr is missing.
516/// Worst-case scenario is that we will replace 5 instructions with 5 different
517/// instructions, but we got rid of select.
518static Instruction *foldSelectICmpAndAnd(Type *SelType, const ICmpInst *Cmp,
519 Value *TVal, Value *FVal,
520 InstCombiner::BuilderTy &Builder) {
521 if (!(Cmp->hasOneUse() && Cmp->getOperand(0)->hasOneUse() &&
522 Cmp->getPredicate() == ICmpInst::ICMP_EQ &&
523 match(Cmp->getOperand(1), m_Zero()) && match(FVal, m_One())))
524 return nullptr;
525
526 // The TrueVal has general form of: and %B, 1
527 Value *B;
528 if (!match(TVal, m_OneUse(m_And(m_Value(B), m_One()))))
529 return nullptr;
530
531 // Where %B may be optionally shifted: lshr %X, %Z.
532 Value *X, *Z;
533 const bool HasShift = match(B, m_OneUse(m_LShr(m_Value(X), m_Value(Z))));
534 if (!HasShift)
535 X = B;
536
537 Value *Y;
538 if (!match(Cmp->getOperand(0), m_c_And(m_Specific(X), m_Value(Y))))
539 return nullptr;
540
541 // ((X & Y) == 0) ? ((X >> Z) & 1) : 1 --> (X & (Y | (1 << Z))) != 0
542 // ((X & Y) == 0) ? (X & 1) : 1 --> (X & (Y | 1)) != 0
543 Constant *One = ConstantInt::get(SelType, 1);
544 Value *MaskB = HasShift ? Builder.CreateShl(One, Z) : One;
545 Value *FullMask = Builder.CreateOr(Y, MaskB);
546 Value *MaskedX = Builder.CreateAnd(X, FullMask);
547 Value *ICmpNeZero = Builder.CreateIsNotNull(MaskedX);
548 return new ZExtInst(ICmpNeZero, SelType);
549}
550
551/// We want to turn:
552/// (select (icmp sgt x, C), lshr (X, Y), ashr (X, Y)); iff C s>= -1
553/// (select (icmp slt x, C), ashr (X, Y), lshr (X, Y)); iff C s>= 0
554/// into:
555/// ashr (X, Y)
556static Value *foldSelectICmpLshrAshr(const ICmpInst *IC, Value *TrueVal,
557 Value *FalseVal,
558 InstCombiner::BuilderTy &Builder) {
559 ICmpInst::Predicate Pred = IC->getPredicate();
560 Value *CmpLHS = IC->getOperand(0);
561 Value *CmpRHS = IC->getOperand(1);
562 if (!CmpRHS->getType()->isIntOrIntVectorTy())
563 return nullptr;
564
565 Value *X, *Y;
566 unsigned Bitwidth = CmpRHS->getType()->getScalarSizeInBits();
567 if ((Pred != ICmpInst::ICMP_SGT ||
568 !match(CmpRHS,
569 m_SpecificInt_ICMP(ICmpInst::ICMP_SGE, APInt(Bitwidth, -1)))) &&
570 (Pred != ICmpInst::ICMP_SLT ||
571 !match(CmpRHS,
572 m_SpecificInt_ICMP(ICmpInst::ICMP_SGE, APInt(Bitwidth, 0)))))
573 return nullptr;
574
575 // Canonicalize so that ashr is in FalseVal.
576 if (Pred == ICmpInst::ICMP_SLT)
577 std::swap(TrueVal, FalseVal);
578
579 if (match(TrueVal, m_LShr(m_Value(X), m_Value(Y))) &&
580 match(FalseVal, m_AShr(m_Specific(X), m_Specific(Y))) &&
581 match(CmpLHS, m_Specific(X))) {
582 const auto *Ashr = cast<Instruction>(FalseVal);
583 // if lshr is not exact and ashr is, this new ashr must not be exact.
584 bool IsExact = Ashr->isExact() && cast<Instruction>(TrueVal)->isExact();
585 return Builder.CreateAShr(X, Y, IC->getName(), IsExact);
586 }
587
588 return nullptr;
589}
590
591/// We want to turn:
592/// (select (icmp eq (and X, C1), 0), Y, (or Y, C2))
593/// into:
594/// (or (shl (and X, C1), C3), Y)
595/// iff:
596/// C1 and C2 are both powers of 2
597/// where:
598/// C3 = Log(C2) - Log(C1)
599///
600/// This transform handles cases where:
601/// 1. The icmp predicate is inverted
602/// 2. The select operands are reversed
603/// 3. The magnitude of C2 and C1 are flipped
604static Value *foldSelectICmpAndOr(const ICmpInst *IC, Value *TrueVal,
605 Value *FalseVal,
606 InstCombiner::BuilderTy &Builder) {
607 // Only handle integer compares. Also, if this is a vector select, we need a
608 // vector compare.
609 if (!TrueVal->getType()->isIntOrIntVectorTy() ||
610 TrueVal->getType()->isVectorTy() != IC->getType()->isVectorTy())
611 return nullptr;
612
613 Value *CmpLHS = IC->getOperand(0);
614 Value *CmpRHS = IC->getOperand(1);
615
616 Value *V;
617 unsigned C1Log;
618 bool IsEqualZero;
619 bool NeedAnd = false;
620 if (IC->isEquality()) {
621 if (!match(CmpRHS, m_Zero()))
622 return nullptr;
623
624 const APInt *C1;
625 if (!match(CmpLHS, m_And(m_Value(), m_Power2(C1))))
626 return nullptr;
627
628 V = CmpLHS;
629 C1Log = C1->logBase2();
630 IsEqualZero = IC->getPredicate() == ICmpInst::ICMP_EQ;
631 } else if (IC->getPredicate() == ICmpInst::ICMP_SLT ||
632 IC->getPredicate() == ICmpInst::ICMP_SGT) {
633 // We also need to recognize (icmp slt (trunc (X)), 0) and
634 // (icmp sgt (trunc (X)), -1).
635 IsEqualZero = IC->getPredicate() == ICmpInst::ICMP_SGT;
636 if ((IsEqualZero && !match(CmpRHS, m_AllOnes())) ||
637 (!IsEqualZero && !match(CmpRHS, m_Zero())))
638 return nullptr;
639
640 if (!match(CmpLHS, m_OneUse(m_Trunc(m_Value(V)))))
641 return nullptr;
642
643 C1Log = CmpLHS->getType()->getScalarSizeInBits() - 1;
644 NeedAnd = true;
645 } else {
646 return nullptr;
647 }
648
649 const APInt *C2;
650 bool OrOnTrueVal = false;
651 bool OrOnFalseVal = match(FalseVal, m_Or(m_Specific(TrueVal), m_Power2(C2)));
652 if (!OrOnFalseVal)
653 OrOnTrueVal = match(TrueVal, m_Or(m_Specific(FalseVal), m_Power2(C2)));
654
655 if (!OrOnFalseVal && !OrOnTrueVal)
656 return nullptr;
657
658 Value *Y = OrOnFalseVal ? TrueVal : FalseVal;
659
660 unsigned C2Log = C2->logBase2();
661
662 bool NeedXor = (!IsEqualZero && OrOnFalseVal) || (IsEqualZero && OrOnTrueVal);
663 bool NeedShift = C1Log != C2Log;
664 bool NeedZExtTrunc = Y->getType()->getScalarSizeInBits() !=
665 V->getType()->getScalarSizeInBits();
666
667 // Make sure we don't create more instructions than we save.
668 Value *Or = OrOnFalseVal ? FalseVal : TrueVal;
669 if ((NeedShift + NeedXor + NeedZExtTrunc) >
670 (IC->hasOneUse() + Or->hasOneUse()))
671 return nullptr;
672
673 if (NeedAnd) {
674 // Insert the AND instruction on the input to the truncate.
675 APInt C1 = APInt::getOneBitSet(V->getType()->getScalarSizeInBits(), C1Log);
676 V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), C1));
677 }
678
679 if (C2Log > C1Log) {
680 V = Builder.CreateZExtOrTrunc(V, Y->getType());
681 V = Builder.CreateShl(V, C2Log - C1Log);
682 } else if (C1Log > C2Log) {
683 V = Builder.CreateLShr(V, C1Log - C2Log);
684 V = Builder.CreateZExtOrTrunc(V, Y->getType());
685 } else
686 V = Builder.CreateZExtOrTrunc(V, Y->getType());
687
688 if (NeedXor)
689 V = Builder.CreateXor(V, *C2);
690
691 return Builder.CreateOr(V, Y);
692}
693
694/// Canonicalize a set or clear of a masked set of constant bits to
695/// select-of-constants form.
696static Instruction *foldSetClearBits(SelectInst &Sel,
697 InstCombiner::BuilderTy &Builder) {
698 Value *Cond = Sel.getCondition();
699 Value *T = Sel.getTrueValue();
700 Value *F = Sel.getFalseValue();
701 Type *Ty = Sel.getType();
702 Value *X;
703 const APInt *NotC, *C;
704
705 // Cond ? (X & ~C) : (X | C) --> (X & ~C) | (Cond ? 0 : C)
706 if (match(T, m_And(m_Value(X), m_APInt(NotC))) &&
707 match(F, m_OneUse(m_Or(m_Specific(X), m_APInt(C)))) && *NotC == ~(*C)) {
708 Constant *Zero = ConstantInt::getNullValue(Ty);
709 Constant *OrC = ConstantInt::get(Ty, *C);
710 Value *NewSel = Builder.CreateSelect(Cond, Zero, OrC, "masksel", &Sel);
711 return BinaryOperator::CreateOr(T, NewSel);
712 }
713
714 // Cond ? (X | C) : (X & ~C) --> (X & ~C) | (Cond ? C : 0)
715 if (match(F, m_And(m_Value(X), m_APInt(NotC))) &&
716 match(T, m_OneUse(m_Or(m_Specific(X), m_APInt(C)))) && *NotC == ~(*C)) {
717 Constant *Zero = ConstantInt::getNullValue(Ty);
718 Constant *OrC = ConstantInt::get(Ty, *C);
719 Value *NewSel = Builder.CreateSelect(Cond, OrC, Zero, "masksel", &Sel);
720 return BinaryOperator::CreateOr(F, NewSel);
721 }
722
723 return nullptr;
724}
725
726/// Transform patterns such as (a > b) ? a - b : 0 into usub.sat(a, b).
727/// There are 8 commuted/swapped variants of this pattern.
728/// TODO: Also support a - UMIN(a,b) patterns.
729static Value *canonicalizeSaturatedSubtract(const ICmpInst *ICI,
730 const Value *TrueVal,
731 const Value *FalseVal,
732 InstCombiner::BuilderTy &Builder) {
733 ICmpInst::Predicate Pred = ICI->getPredicate();
734 if (!ICmpInst::isUnsigned(Pred))
735 return nullptr;
736
737 // (b > a) ? 0 : a - b -> (b <= a) ? a - b : 0
738 if (match(TrueVal, m_Zero())) {
739 Pred = ICmpInst::getInversePredicate(Pred);
740 std::swap(TrueVal, FalseVal);
741 }
742 if (!match(FalseVal, m_Zero()))
743 return nullptr;
744
745 Value *A = ICI->getOperand(0);
746 Value *B = ICI->getOperand(1);
747 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_ULT) {
748 // (b < a) ? a - b : 0 -> (a > b) ? a - b : 0
749 std::swap(A, B);
750 Pred = ICmpInst::getSwappedPredicate(Pred);
Value stored to 'Pred' is never read
751 }
752
753 assert((Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_UGT) &&(static_cast<void> (0))
754 "Unexpected isUnsigned predicate!")(static_cast<void> (0));
755
756 // Ensure the sub is of the form:
757 // (a > b) ? a - b : 0 -> usub.sat(a, b)
758 // (a > b) ? b - a : 0 -> -usub.sat(a, b)
759 // Checking for both a-b and a+(-b) as a constant.
760 bool IsNegative = false;
761 const APInt *C;
762 if (match(TrueVal, m_Sub(m_Specific(B), m_Specific(A))) ||
763 (match(A, m_APInt(C)) &&
764 match(TrueVal, m_Add(m_Specific(B), m_SpecificInt(-*C)))))
765 IsNegative = true;
766 else if (!match(TrueVal, m_Sub(m_Specific(A), m_Specific(B))) &&
767 !(match(B, m_APInt(C)) &&
768 match(TrueVal, m_Add(m_Specific(A), m_SpecificInt(-*C)))))
769 return nullptr;
770
771 // If we are adding a negate and the sub and icmp are used anywhere else, we
772 // would end up with more instructions.
773 if (IsNegative && !TrueVal->hasOneUse() && !ICI->hasOneUse())
774 return nullptr;
775
776 // (a > b) ? a - b : 0 -> usub.sat(a, b)
777 // (a > b) ? b - a : 0 -> -usub.sat(a, b)
778 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, A, B);
779 if (IsNegative)
780 Result = Builder.CreateNeg(Result);
781 return Result;
782}
783
784static Value *canonicalizeSaturatedAdd(ICmpInst *Cmp, Value *TVal, Value *FVal,
785 InstCombiner::BuilderTy &Builder) {
786 if (!Cmp->hasOneUse())
787 return nullptr;
788
789 // Match unsigned saturated add with constant.
790 Value *Cmp0 = Cmp->getOperand(0);
791 Value *Cmp1 = Cmp->getOperand(1);
792 ICmpInst::Predicate Pred = Cmp->getPredicate();
793 Value *X;
794 const APInt *C, *CmpC;
795 if (Pred == ICmpInst::ICMP_ULT &&
796 match(TVal, m_Add(m_Value(X), m_APInt(C))) && X == Cmp0 &&
797 match(FVal, m_AllOnes()) && match(Cmp1, m_APInt(CmpC)) && *CmpC == ~*C) {
798 // (X u< ~C) ? (X + C) : -1 --> uadd.sat(X, C)
799 return Builder.CreateBinaryIntrinsic(
800 Intrinsic::uadd_sat, X, ConstantInt::get(X->getType(), *C));
801 }
802
803 // Match unsigned saturated add of 2 variables with an unnecessary 'not'.
804 // There are 8 commuted variants.
805 // Canonicalize -1 (saturated result) to true value of the select.
806 if (match(FVal, m_AllOnes())) {
807 std::swap(TVal, FVal);
808 Pred = CmpInst::getInversePredicate(Pred);
809 }
810 if (!match(TVal, m_AllOnes()))
811 return nullptr;
812
813 // Canonicalize predicate to less-than or less-or-equal-than.
814 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
815 std::swap(Cmp0, Cmp1);
816 Pred = CmpInst::getSwappedPredicate(Pred);
817 }
818 if (Pred != ICmpInst::ICMP_ULT && Pred != ICmpInst::ICMP_ULE)
819 return nullptr;
820
821 // Match unsigned saturated add of 2 variables with an unnecessary 'not'.
822 // Strictness of the comparison is irrelevant.
823 Value *Y;
824 if (match(Cmp0, m_Not(m_Value(X))) &&
825 match(FVal, m_c_Add(m_Specific(X), m_Value(Y))) && Y == Cmp1) {
826 // (~X u< Y) ? -1 : (X + Y) --> uadd.sat(X, Y)
827 // (~X u< Y) ? -1 : (Y + X) --> uadd.sat(X, Y)
828 return Builder.CreateBinaryIntrinsic(Intrinsic::uadd_sat, X, Y);
829 }
830 // The 'not' op may be included in the sum but not the compare.
831 // Strictness of the comparison is irrelevant.
832 X = Cmp0;
833 Y = Cmp1;
834 if (match(FVal, m_c_Add(m_Not(m_Specific(X)), m_Specific(Y)))) {
835 // (X u< Y) ? -1 : (~X + Y) --> uadd.sat(~X, Y)
836 // (X u< Y) ? -1 : (Y + ~X) --> uadd.sat(Y, ~X)
837 BinaryOperator *BO = cast<BinaryOperator>(FVal);
838 return Builder.CreateBinaryIntrinsic(
839 Intrinsic::uadd_sat, BO->getOperand(0), BO->getOperand(1));
840 }
841 // The overflow may be detected via the add wrapping round.
842 // This is only valid for strict comparison!
843 if (Pred == ICmpInst::ICMP_ULT &&
844 match(Cmp0, m_c_Add(m_Specific(Cmp1), m_Value(Y))) &&
845 match(FVal, m_c_Add(m_Specific(Cmp1), m_Specific(Y)))) {
846 // ((X + Y) u< X) ? -1 : (X + Y) --> uadd.sat(X, Y)
847 // ((X + Y) u< Y) ? -1 : (X + Y) --> uadd.sat(X, Y)
848 return Builder.CreateBinaryIntrinsic(Intrinsic::uadd_sat, Cmp1, Y);
849 }
850
851 return nullptr;
852}
853
854/// Fold the following code sequence:
855/// \code
856/// int a = ctlz(x & -x);
857// x ? 31 - a : a;
858/// \code
859///
860/// into:
861/// cttz(x)
862static Instruction *foldSelectCtlzToCttz(ICmpInst *ICI, Value *TrueVal,
863 Value *FalseVal,
864 InstCombiner::BuilderTy &Builder) {
865 unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits();
866 if (!ICI->isEquality() || !match(ICI->getOperand(1), m_Zero()))
867 return nullptr;
868
869 if (ICI->getPredicate() == ICmpInst::ICMP_NE)
870 std::swap(TrueVal, FalseVal);
871
872 if (!match(FalseVal,
873 m_Xor(m_Deferred(TrueVal), m_SpecificInt(BitWidth - 1))))
874 return nullptr;
875
876 if (!match(TrueVal, m_Intrinsic<Intrinsic::ctlz>()))
877 return nullptr;
878
879 Value *X = ICI->getOperand(0);
880 auto *II = cast<IntrinsicInst>(TrueVal);
881 if (!match(II->getOperand(0), m_c_And(m_Specific(X), m_Neg(m_Specific(X)))))
882 return nullptr;
883
884 Function *F = Intrinsic::getDeclaration(II->getModule(), Intrinsic::cttz,
885 II->getType());
886 return CallInst::Create(F, {X, II->getArgOperand(1)});
887}
888
889/// Attempt to fold a cttz/ctlz followed by a icmp plus select into a single
890/// call to cttz/ctlz with flag 'is_zero_undef' cleared.
891///
892/// For example, we can fold the following code sequence:
893/// \code
894/// %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
895/// %1 = icmp ne i32 %x, 0
896/// %2 = select i1 %1, i32 %0, i32 32
897/// \code
898///
899/// into:
900/// %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
901static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
902 InstCombiner::BuilderTy &Builder) {
903 ICmpInst::Predicate Pred = ICI->getPredicate();
904 Value *CmpLHS = ICI->getOperand(0);
905 Value *CmpRHS = ICI->getOperand(1);
906
907 // Check if the condition value compares a value for equality against zero.
908 if (!ICI->isEquality() || !match(CmpRHS, m_Zero()))
909 return nullptr;
910
911 Value *SelectArg = FalseVal;
912 Value *ValueOnZero = TrueVal;
913 if (Pred == ICmpInst::ICMP_NE)
914 std::swap(SelectArg, ValueOnZero);
915
916 // Skip zero extend/truncate.
917 Value *Count = nullptr;
918 if (!match(SelectArg, m_ZExt(m_Value(Count))) &&
919 !match(SelectArg, m_Trunc(m_Value(Count))))
920 Count = SelectArg;
921
922 // Check that 'Count' is a call to intrinsic cttz/ctlz. Also check that the
923 // input to the cttz/ctlz is used as LHS for the compare instruction.
924 if (!match(Count, m_Intrinsic<Intrinsic::cttz>(m_Specific(CmpLHS))) &&
925 !match(Count, m_Intrinsic<Intrinsic::ctlz>(m_Specific(CmpLHS))))
926 return nullptr;
927
928 IntrinsicInst *II = cast<IntrinsicInst>(Count);
929
930 // Check if the value propagated on zero is a constant number equal to the
931 // sizeof in bits of 'Count'.
932 unsigned SizeOfInBits = Count->getType()->getScalarSizeInBits();
933 if (match(ValueOnZero, m_SpecificInt(SizeOfInBits))) {
934 // Explicitly clear the 'undef_on_zero' flag. It's always valid to go from
935 // true to false on this flag, so we can replace it for all users.
936 II->setArgOperand(1, ConstantInt::getFalse(II->getContext()));
937 return SelectArg;
938 }
939
940 // The ValueOnZero is not the bitwidth. But if the cttz/ctlz (and optional
941 // zext/trunc) have one use (ending at the select), the cttz/ctlz result will
942 // not be used if the input is zero. Relax to 'undef_on_zero' for that case.
943 if (II->hasOneUse() && SelectArg->hasOneUse() &&
944 !match(II->getArgOperand(1), m_One()))
945 II->setArgOperand(1, ConstantInt::getTrue(II->getContext()));
946
947 return nullptr;
948}
949
950/// Return true if we find and adjust an icmp+select pattern where the compare
951/// is with a constant that can be incremented or decremented to match the
952/// minimum or maximum idiom.
953static bool adjustMinMax(SelectInst &Sel, ICmpInst &Cmp) {
954 ICmpInst::Predicate Pred = Cmp.getPredicate();
955 Value *CmpLHS = Cmp.getOperand(0);
956 Value *CmpRHS = Cmp.getOperand(1);
957 Value *TrueVal = Sel.getTrueValue();
958 Value *FalseVal = Sel.getFalseValue();
959
960 // We may move or edit the compare, so make sure the select is the only user.
961 const APInt *CmpC;
962 if (!Cmp.hasOneUse() || !match(CmpRHS, m_APInt(CmpC)))
963 return false;
964
965 // These transforms only work for selects of integers or vector selects of
966 // integer vectors.
967 Type *SelTy = Sel.getType();
968 auto *SelEltTy = dyn_cast<IntegerType>(SelTy->getScalarType());
969 if (!SelEltTy || SelTy->isVectorTy() != Cmp.getType()->isVectorTy())
970 return false;
971
972 Constant *AdjustedRHS;
973 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SGT)
974 AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC + 1);
975 else if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT)
976 AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC - 1);
977 else
978 return false;
979
980 // X > C ? X : C+1 --> X < C+1 ? C+1 : X
981 // X < C ? X : C-1 --> X > C-1 ? C-1 : X
982 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
983 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
984 ; // Nothing to do here. Values match without any sign/zero extension.
985 }
986 // Types do not match. Instead of calculating this with mixed types, promote
987 // all to the larger type. This enables scalar evolution to analyze this
988 // expression.
989 else if (CmpRHS->getType()->getScalarSizeInBits() < SelEltTy->getBitWidth()) {
990 Constant *SextRHS = ConstantExpr::getSExt(AdjustedRHS, SelTy);
991
992 // X = sext x; x >s c ? X : C+1 --> X = sext x; X <s C+1 ? C+1 : X
993 // X = sext x; x <s c ? X : C-1 --> X = sext x; X >s C-1 ? C-1 : X
994 // X = sext x; x >u c ? X : C+1 --> X = sext x; X <u C+1 ? C+1 : X
995 // X = sext x; x <u c ? X : C-1 --> X = sext x; X >u C-1 ? C-1 : X
996 if (match(TrueVal, m_SExt(m_Specific(CmpLHS))) && SextRHS == FalseVal) {
997 CmpLHS = TrueVal;
998 AdjustedRHS = SextRHS;
999 } else if (match(FalseVal, m_SExt(m_Specific(CmpLHS))) &&
1000 SextRHS == TrueVal) {
1001 CmpLHS = FalseVal;
1002 AdjustedRHS = SextRHS;
1003 } else if (Cmp.isUnsigned()) {
1004 Constant *ZextRHS = ConstantExpr::getZExt(AdjustedRHS, SelTy);
1005 // X = zext x; x >u c ? X : C+1 --> X = zext x; X <u C+1 ? C+1 : X
1006 // X = zext x; x <u c ? X : C-1 --> X = zext x; X >u C-1 ? C-1 : X
1007 // zext + signed compare cannot be changed:
1008 // 0xff <s 0x00, but 0x00ff >s 0x0000
1009 if (match(TrueVal, m_ZExt(m_Specific(CmpLHS))) && ZextRHS == FalseVal) {
1010 CmpLHS = TrueVal;
1011 AdjustedRHS = ZextRHS;
1012 } else if (match(FalseVal, m_ZExt(m_Specific(CmpLHS))) &&
1013 ZextRHS == TrueVal) {
1014 CmpLHS = FalseVal;
1015 AdjustedRHS = ZextRHS;
1016 } else {
1017 return false;
1018 }
1019 } else {
1020 return false;
1021 }
1022 } else {
1023 return false;
1024 }
1025
1026 Pred = ICmpInst::getSwappedPredicate(Pred);
1027 CmpRHS = AdjustedRHS;
1028 std::swap(FalseVal, TrueVal);
1029 Cmp.setPredicate(Pred);
1030 Cmp.setOperand(0, CmpLHS);
1031 Cmp.setOperand(1, CmpRHS);
1032 Sel.setOperand(1, TrueVal);
1033 Sel.setOperand(2, FalseVal);
1034 Sel.swapProfMetadata();
1035
1036 // Move the compare instruction right before the select instruction. Otherwise
1037 // the sext/zext value may be defined after the compare instruction uses it.
1038 Cmp.moveBefore(&Sel);
1039
1040 return true;
1041}
1042
1043/// If this is an integer min/max (icmp + select) with a constant operand,
1044/// create the canonical icmp for the min/max operation and canonicalize the
1045/// constant to the 'false' operand of the select:
1046/// select (icmp Pred X, C1), C2, X --> select (icmp Pred' X, C2), X, C2
1047/// Note: if C1 != C2, this will change the icmp constant to the existing
1048/// constant operand of the select.
1049static Instruction *canonicalizeMinMaxWithConstant(SelectInst &Sel,
1050 ICmpInst &Cmp,
1051 InstCombinerImpl &IC) {
1052 if (!Cmp.hasOneUse() || !isa<Constant>(Cmp.getOperand(1)))
1053 return nullptr;
1054
1055 // Canonicalize the compare predicate based on whether we have min or max.
1056 Value *LHS, *RHS;
1057 SelectPatternResult SPR = matchSelectPattern(&Sel, LHS, RHS);
1058 if (!SelectPatternResult::isMinOrMax(SPR.Flavor))
1059 return nullptr;
1060
1061 // Is this already canonical?
1062 ICmpInst::Predicate CanonicalPred = getMinMaxPred(SPR.Flavor);
1063 if (Cmp.getOperand(0) == LHS && Cmp.getOperand(1) == RHS &&
1064 Cmp.getPredicate() == CanonicalPred)
1065 return nullptr;
1066
1067 // Bail out on unsimplified X-0 operand (due to some worklist management bug),
1068 // as this may cause an infinite combine loop. Let the sub be folded first.
1069 if (match(LHS, m_Sub(m_Value(), m_Zero())) ||
1070 match(RHS, m_Sub(m_Value(), m_Zero())))
1071 return nullptr;
1072
1073 // Create the canonical compare and plug it into the select.
1074 IC.replaceOperand(Sel, 0, IC.Builder.CreateICmp(CanonicalPred, LHS, RHS));
1075
1076 // If the select operands did not change, we're done.
1077 if (Sel.getTrueValue() == LHS && Sel.getFalseValue() == RHS)
1078 return &Sel;
1079
1080 // If we are swapping the select operands, swap the metadata too.
1081 assert(Sel.getTrueValue() == RHS && Sel.getFalseValue() == LHS &&(static_cast<void> (0))
1082 "Unexpected results from matchSelectPattern")(static_cast<void> (0));
1083 Sel.swapValues();
1084 Sel.swapProfMetadata();
1085 return &Sel;
1086}
1087
1088static Instruction *canonicalizeAbsNabs(SelectInst &Sel, ICmpInst &Cmp,
1089 InstCombinerImpl &IC) {
1090 if (!Cmp.hasOneUse() || !isa<Constant>(Cmp.getOperand(1)))
1091 return nullptr;
1092
1093 Value *LHS, *RHS;
1094 SelectPatternFlavor SPF = matchSelectPattern(&Sel, LHS, RHS).Flavor;
1095 if (SPF != SelectPatternFlavor::SPF_ABS &&
1096 SPF != SelectPatternFlavor::SPF_NABS)
1097 return nullptr;
1098
1099 // Note that NSW flag can only be propagated for normal, non-negated abs!
1100 bool IntMinIsPoison = SPF == SelectPatternFlavor::SPF_ABS &&
1101 match(RHS, m_NSWNeg(m_Specific(LHS)));
1102 Constant *IntMinIsPoisonC =
1103 ConstantInt::get(Type::getInt1Ty(Sel.getContext()), IntMinIsPoison);
1104 Instruction *Abs =
1105 IC.Builder.CreateBinaryIntrinsic(Intrinsic::abs, LHS, IntMinIsPoisonC);
1106
1107 if (SPF == SelectPatternFlavor::SPF_NABS)
1108 return BinaryOperator::CreateNeg(Abs); // Always without NSW flag!
1109
1110 return IC.replaceInstUsesWith(Sel, Abs);
1111}
1112
1113/// If we have a select with an equality comparison, then we know the value in
1114/// one of the arms of the select. See if substituting this value into an arm
1115/// and simplifying the result yields the same value as the other arm.
1116///
1117/// To make this transform safe, we must drop poison-generating flags
1118/// (nsw, etc) if we simplified to a binop because the select may be guarding
1119/// that poison from propagating. If the existing binop already had no
1120/// poison-generating flags, then this transform can be done by instsimplify.
1121///
1122/// Consider:
1123/// %cmp = icmp eq i32 %x, 2147483647
1124/// %add = add nsw i32 %x, 1
1125/// %sel = select i1 %cmp, i32 -2147483648, i32 %add
1126///
1127/// We can't replace %sel with %add unless we strip away the flags.
1128/// TODO: Wrapping flags could be preserved in some cases with better analysis.
1129Instruction *InstCombinerImpl::foldSelectValueEquivalence(SelectInst &Sel,
1130 ICmpInst &Cmp) {
1131 // Value equivalence substitution requires an all-or-nothing replacement.
1132 // It does not make sense for a vector compare where each lane is chosen
1133 // independently.
1134 if (!Cmp.isEquality() || Cmp.getType()->isVectorTy())
1135 return nullptr;
1136
1137 // Canonicalize the pattern to ICMP_EQ by swapping the select operands.
1138 Value *TrueVal = Sel.getTrueValue(), *FalseVal = Sel.getFalseValue();
1139 bool Swapped = false;
1140 if (Cmp.getPredicate() == ICmpInst::ICMP_NE) {
1141 std::swap(TrueVal, FalseVal);
1142 Swapped = true;
1143 }
1144
1145 // In X == Y ? f(X) : Z, try to evaluate f(Y) and replace the operand.
1146 // Make sure Y cannot be undef though, as we might pick different values for
1147 // undef in the icmp and in f(Y). Additionally, take care to avoid replacing
1148 // X == Y ? X : Z with X == Y ? Y : Z, as that would lead to an infinite
1149 // replacement cycle.
1150 Value *CmpLHS = Cmp.getOperand(0), *CmpRHS = Cmp.getOperand(1);
1151 if (TrueVal != CmpLHS &&
1152 isGuaranteedNotToBeUndefOrPoison(CmpRHS, SQ.AC, &Sel, &DT)) {
1153 if (Value *V = simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, SQ,
1154 /* AllowRefinement */ true))
1155 return replaceOperand(Sel, Swapped ? 2 : 1, V);
1156
1157 // Even if TrueVal does not simplify, we can directly replace a use of
1158 // CmpLHS with CmpRHS, as long as the instruction is not used anywhere
1159 // else and is safe to speculatively execute (we may end up executing it
1160 // with different operands, which should not cause side-effects or trigger
1161 // undefined behavior). Only do this if CmpRHS is a constant, as
1162 // profitability is not clear for other cases.
1163 // FIXME: The replacement could be performed recursively.
1164 if (match(CmpRHS, m_ImmConstant()) && !match(CmpLHS, m_ImmConstant()))
1165 if (auto *I = dyn_cast<Instruction>(TrueVal))
1166 if (I->hasOneUse() && isSafeToSpeculativelyExecute(I))
1167 for (Use &U : I->operands())
1168 if (U == CmpLHS) {
1169 replaceUse(U, CmpRHS);
1170 return &Sel;
1171 }
1172 }
1173 if (TrueVal != CmpRHS &&
1174 isGuaranteedNotToBeUndefOrPoison(CmpLHS, SQ.AC, &Sel, &DT))
1175 if (Value *V = simplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, SQ,
1176 /* AllowRefinement */ true))
1177 return replaceOperand(Sel, Swapped ? 2 : 1, V);
1178
1179 auto *FalseInst = dyn_cast<Instruction>(FalseVal);
1180 if (!FalseInst)
1181 return nullptr;
1182
1183 // InstSimplify already performed this fold if it was possible subject to
1184 // current poison-generating flags. Try the transform again with
1185 // poison-generating flags temporarily dropped.
1186 bool WasNUW = false, WasNSW = false, WasExact = false, WasInBounds = false;
1187 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(FalseVal)) {
1188 WasNUW = OBO->hasNoUnsignedWrap();
1189 WasNSW = OBO->hasNoSignedWrap();
1190 FalseInst->setHasNoUnsignedWrap(false);
1191 FalseInst->setHasNoSignedWrap(false);
1192 }
1193 if (auto *PEO = dyn_cast<PossiblyExactOperator>(FalseVal)) {
1194 WasExact = PEO->isExact();
1195 FalseInst->setIsExact(false);
1196 }
1197 if (auto *GEP = dyn_cast<GetElementPtrInst>(FalseVal)) {
1198 WasInBounds = GEP->isInBounds();
1199 GEP->setIsInBounds(false);
1200 }
1201
1202 // Try each equivalence substitution possibility.
1203 // We have an 'EQ' comparison, so the select's false value will propagate.
1204 // Example:
1205 // (X == 42) ? 43 : (X + 1) --> (X == 42) ? (X + 1) : (X + 1) --> X + 1
1206 if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, SQ,
1207 /* AllowRefinement */ false) == TrueVal ||
1208 simplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, SQ,
1209 /* AllowRefinement */ false) == TrueVal) {
1210 return replaceInstUsesWith(Sel, FalseVal);
1211 }
1212
1213 // Restore poison-generating flags if the transform did not apply.
1214 if (WasNUW)
1215 FalseInst->setHasNoUnsignedWrap();
1216 if (WasNSW)
1217 FalseInst->setHasNoSignedWrap();
1218 if (WasExact)
1219 FalseInst->setIsExact();
1220 if (WasInBounds)
1221 cast<GetElementPtrInst>(FalseInst)->setIsInBounds();
1222
1223 return nullptr;
1224}
1225
1226// See if this is a pattern like:
1227// %old_cmp1 = icmp slt i32 %x, C2
1228// %old_replacement = select i1 %old_cmp1, i32 %target_low, i32 %target_high
1229// %old_x_offseted = add i32 %x, C1
1230// %old_cmp0 = icmp ult i32 %old_x_offseted, C0
1231// %r = select i1 %old_cmp0, i32 %x, i32 %old_replacement
1232// This can be rewritten as more canonical pattern:
1233// %new_cmp1 = icmp slt i32 %x, -C1
1234// %new_cmp2 = icmp sge i32 %x, C0-C1
1235// %new_clamped_low = select i1 %new_cmp1, i32 %target_low, i32 %x
1236// %r = select i1 %new_cmp2, i32 %target_high, i32 %new_clamped_low
1237// Iff -C1 s<= C2 s<= C0-C1
1238// Also ULT predicate can also be UGT iff C0 != -1 (+invert result)
1239// SLT predicate can also be SGT iff C2 != INT_MAX (+invert res.)
1240static Instruction *canonicalizeClampLike(SelectInst &Sel0, ICmpInst &Cmp0,
1241 InstCombiner::BuilderTy &Builder) {
1242 Value *X = Sel0.getTrueValue();
1243 Value *Sel1 = Sel0.getFalseValue();
1244
1245 // First match the condition of the outermost select.
1246 // Said condition must be one-use.
1247 if (!Cmp0.hasOneUse())
1248 return nullptr;
1249 Value *Cmp00 = Cmp0.getOperand(0);
1250 Constant *C0;
1251 if (!match(Cmp0.getOperand(1),
1252 m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C0))))
1253 return nullptr;
1254 // Canonicalize Cmp0 into the form we expect.
1255 // FIXME: we shouldn't care about lanes that are 'undef' in the end?
1256 switch (Cmp0.getPredicate()) {
1257 case ICmpInst::Predicate::ICMP_ULT:
1258 break; // Great!
1259 case ICmpInst::Predicate::ICMP_ULE:
1260 // We'd have to increment C0 by one, and for that it must not have all-ones
1261 // element, but then it would have been canonicalized to 'ult' before
1262 // we get here. So we can't do anything useful with 'ule'.
1263 return nullptr;
1264 case ICmpInst::Predicate::ICMP_UGT:
1265 // We want to canonicalize it to 'ult', so we'll need to increment C0,
1266 // which again means it must not have any all-ones elements.
1267 if (!match(C0,
1268 m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_NE,
1269 APInt::getAllOnesValue(
1270 C0->getType()->getScalarSizeInBits()))))
1271 return nullptr; // Can't do, have all-ones element[s].
1272 C0 = InstCombiner::AddOne(C0);
1273 std::swap(X, Sel1);
1274 break;
1275 case ICmpInst::Predicate::ICMP_UGE:
1276 // The only way we'd get this predicate if this `icmp` has extra uses,
1277 // but then we won't be able to do this fold.
1278 return nullptr;
1279 default:
1280 return nullptr; // Unknown predicate.
1281 }
1282
1283 // Now that we've canonicalized the ICmp, we know the X we expect;
1284 // the select in other hand should be one-use.
1285 if (!Sel1->hasOneUse())
1286 return nullptr;
1287
1288 // We now can finish matching the condition of the outermost select:
1289 // it should either be the X itself, or an addition of some constant to X.
1290 Constant *C1;
1291 if (Cmp00 == X)
1292 C1 = ConstantInt::getNullValue(Sel0.getType());
1293 else if (!match(Cmp00,
1294 m_Add(m_Specific(X),
1295 m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C1)))))
1296 return nullptr;
1297
1298 Value *Cmp1;
1299 ICmpInst::Predicate Pred1;
1300 Constant *C2;
1301 Value *ReplacementLow, *ReplacementHigh;
1302 if (!match(Sel1, m_Select(m_Value(Cmp1), m_Value(ReplacementLow),
1303 m_Value(ReplacementHigh))) ||
1304 !match(Cmp1,
1305 m_ICmp(Pred1, m_Specific(X),
1306 m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C2)))))
1307 return nullptr;
1308
1309 if (!Cmp1->hasOneUse() && (Cmp00 == X || !Cmp00->hasOneUse()))
1310 return nullptr; // Not enough one-use instructions for the fold.
1311 // FIXME: this restriction could be relaxed if Cmp1 can be reused as one of
1312 // two comparisons we'll need to build.
1313
1314 // Canonicalize Cmp1 into the form we expect.
1315 // FIXME: we shouldn't care about lanes that are 'undef' in the end?
1316 switch (Pred1) {
1317 case ICmpInst::Predicate::ICMP_SLT:
1318 break;
1319 case ICmpInst::Predicate::ICMP_SLE:
1320 // We'd have to increment C2 by one, and for that it must not have signed
1321 // max element, but then it would have been canonicalized to 'slt' before
1322 // we get here. So we can't do anything useful with 'sle'.
1323 return nullptr;
1324 case ICmpInst::Predicate::ICMP_SGT:
1325 // We want to canonicalize it to 'slt', so we'll need to increment C2,
1326 // which again means it must not have any signed max elements.
1327 if (!match(C2,
1328 m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_NE,
1329 APInt::getSignedMaxValue(
1330 C2->getType()->getScalarSizeInBits()))))
1331 return nullptr; // Can't do, have signed max element[s].
1332 C2 = InstCombiner::AddOne(C2);
1333 LLVM_FALLTHROUGH[[gnu::fallthrough]];
1334 case ICmpInst::Predicate::ICMP_SGE:
1335 // Also non-canonical, but here we don't need to change C2,
1336 // so we don't have any restrictions on C2, so we can just handle it.
1337 std::swap(ReplacementLow, ReplacementHigh);
1338 break;
1339 default:
1340 return nullptr; // Unknown predicate.
1341 }
1342
1343 // The thresholds of this clamp-like pattern.
1344 auto *ThresholdLowIncl = ConstantExpr::getNeg(C1);
1345 auto *ThresholdHighExcl = ConstantExpr::getSub(C0, C1);
1346
1347 // The fold has a precondition 1: C2 s>= ThresholdLow
1348 auto *Precond1 = ConstantExpr::getICmp(ICmpInst::Predicate::ICMP_SGE, C2,
1349 ThresholdLowIncl);
1350 if (!match(Precond1, m_One()))
1351 return nullptr;
1352 // The fold has a precondition 2: C2 s<= ThresholdHigh
1353 auto *Precond2 = ConstantExpr::getICmp(ICmpInst::Predicate::ICMP_SLE, C2,
1354 ThresholdHighExcl);
1355 if (!match(Precond2, m_One()))
1356 return nullptr;
1357
1358 // All good, finally emit the new pattern.
1359 Value *ShouldReplaceLow = Builder.CreateICmpSLT(X, ThresholdLowIncl);
1360 Value *ShouldReplaceHigh = Builder.CreateICmpSGE(X, ThresholdHighExcl);
1361 Value *MaybeReplacedLow =
1362 Builder.CreateSelect(ShouldReplaceLow, ReplacementLow, X);
1363 Instruction *MaybeReplacedHigh =
1364 SelectInst::Create(ShouldReplaceHigh, ReplacementHigh, MaybeReplacedLow);
1365
1366 return MaybeReplacedHigh;
1367}
1368
1369// If we have
1370// %cmp = icmp [canonical predicate] i32 %x, C0
1371// %r = select i1 %cmp, i32 %y, i32 C1
1372// Where C0 != C1 and %x may be different from %y, see if the constant that we
1373// will have if we flip the strictness of the predicate (i.e. without changing
1374// the result) is identical to the C1 in select. If it matches we can change
1375// original comparison to one with swapped predicate, reuse the constant,
1376// and swap the hands of select.
1377static Instruction *
1378tryToReuseConstantFromSelectInComparison(SelectInst &Sel, ICmpInst &Cmp,
1379 InstCombinerImpl &IC) {
1380 ICmpInst::Predicate Pred;
1381 Value *X;
1382 Constant *C0;
1383 if (!match(&Cmp, m_OneUse(m_ICmp(
1384 Pred, m_Value(X),
1385 m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C0))))))
1386 return nullptr;
1387
1388 // If comparison predicate is non-relational, we won't be able to do anything.
1389 if (ICmpInst::isEquality(Pred))
1390 return nullptr;
1391
1392 // If comparison predicate is non-canonical, then we certainly won't be able
1393 // to make it canonical; canonicalizeCmpWithConstant() already tried.
1394 if (!InstCombiner::isCanonicalPredicate(Pred))
1395 return nullptr;
1396
1397 // If the [input] type of comparison and select type are different, lets abort
1398 // for now. We could try to compare constants with trunc/[zs]ext though.
1399 if (C0->getType() != Sel.getType())
1400 return nullptr;
1401
1402 // FIXME: are there any magic icmp predicate+constant pairs we must not touch?
1403
1404 Value *SelVal0, *SelVal1; // We do not care which one is from where.
1405 match(&Sel, m_Select(m_Value(), m_Value(SelVal0), m_Value(SelVal1)));
1406 // At least one of these values we are selecting between must be a constant
1407 // else we'll never succeed.
1408 if (!match(SelVal0, m_AnyIntegralConstant()) &&
1409 !match(SelVal1, m_AnyIntegralConstant()))
1410 return nullptr;
1411
1412 // Does this constant C match any of the `select` values?
1413 auto MatchesSelectValue = [SelVal0, SelVal1](Constant *C) {
1414 return C->isElementWiseEqual(SelVal0) || C->isElementWiseEqual(SelVal1);
1415 };
1416
1417 // If C0 *already* matches true/false value of select, we are done.
1418 if (MatchesSelectValue(C0))
1419 return nullptr;
1420
1421 // Check the constant we'd have with flipped-strictness predicate.
1422 auto FlippedStrictness =
1423 InstCombiner::getFlippedStrictnessPredicateAndConstant(Pred, C0);
1424 if (!FlippedStrictness)
1425 return nullptr;
1426
1427 // If said constant doesn't match either, then there is no hope,
1428 if (!MatchesSelectValue(FlippedStrictness->second))
1429 return nullptr;
1430
1431 // It matched! Lets insert the new comparison just before select.
1432 InstCombiner::BuilderTy::InsertPointGuard Guard(IC.Builder);
1433 IC.Builder.SetInsertPoint(&Sel);
1434
1435 Pred = ICmpInst::getSwappedPredicate(Pred); // Yes, swapped.
1436 Value *NewCmp = IC.Builder.CreateICmp(Pred, X, FlippedStrictness->second,
1437 Cmp.getName() + ".inv");
1438 IC.replaceOperand(Sel, 0, NewCmp);
1439 Sel.swapValues();
1440 Sel.swapProfMetadata();
1441
1442 return &Sel;
1443}
1444
1445/// Visit a SelectInst that has an ICmpInst as its first operand.
1446Instruction *InstCombinerImpl::foldSelectInstWithICmp(SelectInst &SI,
1447 ICmpInst *ICI) {
1448 if (Instruction *NewSel = foldSelectValueEquivalence(SI, *ICI))
1449 return NewSel;
1450
1451 if (Instruction *NewSel = canonicalizeMinMaxWithConstant(SI, *ICI, *this))
1452 return NewSel;
1453
1454 if (Instruction *NewAbs = canonicalizeAbsNabs(SI, *ICI, *this))
1455 return NewAbs;
1456
1457 if (Instruction *NewAbs = canonicalizeClampLike(SI, *ICI, Builder))
1458 return NewAbs;
1459
1460 if (Instruction *NewSel =
1461 tryToReuseConstantFromSelectInComparison(SI, *ICI, *this))
1462 return NewSel;
1463
1464 bool Changed = adjustMinMax(SI, *ICI);
1465
1466 if (Value *V = foldSelectICmpAnd(SI, ICI, Builder))
1467 return replaceInstUsesWith(SI, V);
1468
1469 // NOTE: if we wanted to, this is where to detect integer MIN/MAX
1470 Value *TrueVal = SI.getTrueValue();
1471 Value *FalseVal = SI.getFalseValue();
1472 ICmpInst::Predicate Pred = ICI->getPredicate();
1473 Value *CmpLHS = ICI->getOperand(0);
1474 Value *CmpRHS = ICI->getOperand(1);
1475 if (CmpRHS != CmpLHS && isa<Constant>(CmpRHS)) {
1476 if (CmpLHS == TrueVal && Pred == ICmpInst::ICMP_EQ) {
1477 // Transform (X == C) ? X : Y -> (X == C) ? C : Y
1478 SI.setOperand(1, CmpRHS);
1479 Changed = true;
1480 } else if (CmpLHS == FalseVal && Pred == ICmpInst::ICMP_NE) {
1481 // Transform (X != C) ? Y : X -> (X != C) ? Y : C
1482 SI.setOperand(2, CmpRHS);
1483 Changed = true;
1484 }
1485 }
1486
1487 // FIXME: This code is nearly duplicated in InstSimplify. Using/refactoring
1488 // decomposeBitTestICmp() might help.
1489 {
1490 unsigned BitWidth =
1491 DL.getTypeSizeInBits(TrueVal->getType()->getScalarType());
1492 APInt MinSignedValue = APInt::getSignedMinValue(BitWidth);
1493 Value *X;
1494 const APInt *Y, *C;
1495 bool TrueWhenUnset;
1496 bool IsBitTest = false;
1497 if (ICmpInst::isEquality(Pred) &&
1498 match(CmpLHS, m_And(m_Value(X), m_Power2(Y))) &&
1499 match(CmpRHS, m_Zero())) {
1500 IsBitTest = true;
1501 TrueWhenUnset = Pred == ICmpInst::ICMP_EQ;
1502 } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) {
1503 X = CmpLHS;
1504 Y = &MinSignedValue;
1505 IsBitTest = true;
1506 TrueWhenUnset = false;
1507 } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) {
1508 X = CmpLHS;
1509 Y = &MinSignedValue;
1510 IsBitTest = true;
1511 TrueWhenUnset = true;
1512 }
1513 if (IsBitTest) {
1514 Value *V = nullptr;
1515 // (X & Y) == 0 ? X : X ^ Y --> X & ~Y
1516 if (TrueWhenUnset && TrueVal == X &&
1517 match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1518 V = Builder.CreateAnd(X, ~(*Y));
1519 // (X & Y) != 0 ? X ^ Y : X --> X & ~Y
1520 else if (!TrueWhenUnset && FalseVal == X &&
1521 match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1522 V = Builder.CreateAnd(X, ~(*Y));
1523 // (X & Y) == 0 ? X ^ Y : X --> X | Y
1524 else if (TrueWhenUnset && FalseVal == X &&
1525 match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1526 V = Builder.CreateOr(X, *Y);
1527 // (X & Y) != 0 ? X : X ^ Y --> X | Y
1528 else if (!TrueWhenUnset && TrueVal == X &&
1529 match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1530 V = Builder.CreateOr(X, *Y);
1531
1532 if (V)
1533 return replaceInstUsesWith(SI, V);
1534 }
1535 }
1536
1537 if (Instruction *V =
1538 foldSelectICmpAndAnd(SI.getType(), ICI, TrueVal, FalseVal, Builder))
1539 return V;
1540
1541 if (Instruction *V = foldSelectCtlzToCttz(ICI, TrueVal, FalseVal, Builder))
1542 return V;
1543
1544 if (Value *V = foldSelectICmpAndOr(ICI, TrueVal, FalseVal, Builder))
1545 return replaceInstUsesWith(SI, V);
1546
1547 if (Value *V = foldSelectICmpLshrAshr(ICI, TrueVal, FalseVal, Builder))
1548 return replaceInstUsesWith(SI, V);
1549
1550 if (Value *V = foldSelectCttzCtlz(ICI, TrueVal, FalseVal, Builder))
1551 return replaceInstUsesWith(SI, V);
1552
1553 if (Value *V = canonicalizeSaturatedSubtract(ICI, TrueVal, FalseVal, Builder))
1554 return replaceInstUsesWith(SI, V);
1555
1556 if (Value *V = canonicalizeSaturatedAdd(ICI, TrueVal, FalseVal, Builder))
1557 return replaceInstUsesWith(SI, V);
1558
1559 return Changed ? &SI : nullptr;
1560}
1561
1562/// SI is a select whose condition is a PHI node (but the two may be in
1563/// different blocks). See if the true/false values (V) are live in all of the
1564/// predecessor blocks of the PHI. For example, cases like this can't be mapped:
1565///
1566/// X = phi [ C1, BB1], [C2, BB2]
1567/// Y = add
1568/// Z = select X, Y, 0
1569///
1570/// because Y is not live in BB1/BB2.
1571static bool canSelectOperandBeMappingIntoPredBlock(const Value *V,
1572 const SelectInst &SI) {
1573 // If the value is a non-instruction value like a constant or argument, it
1574 // can always be mapped.
1575 const Instruction *I = dyn_cast<Instruction>(V);
1576 if (!I) return true;
1577
1578 // If V is a PHI node defined in the same block as the condition PHI, we can
1579 // map the arguments.
1580 const PHINode *CondPHI = cast<PHINode>(SI.getCondition());
1581
1582 if (const PHINode *VP = dyn_cast<PHINode>(I))
1583 if (VP->getParent() == CondPHI->getParent())
1584 return true;
1585
1586 // Otherwise, if the PHI and select are defined in the same block and if V is
1587 // defined in a different block, then we can transform it.
1588 if (SI.getParent() == CondPHI->getParent() &&
1589 I->getParent() != CondPHI->getParent())
1590 return true;
1591
1592 // Otherwise we have a 'hard' case and we can't tell without doing more
1593 // detailed dominator based analysis, punt.
1594 return false;
1595}
1596
1597/// We have an SPF (e.g. a min or max) of an SPF of the form:
1598/// SPF2(SPF1(A, B), C)
1599Instruction *InstCombinerImpl::foldSPFofSPF(Instruction *Inner,
1600 SelectPatternFlavor SPF1, Value *A,
1601 Value *B, Instruction &Outer,
1602 SelectPatternFlavor SPF2,
1603 Value *C) {
1604 if (Outer.getType() != Inner->getType())
1605 return nullptr;
1606
1607 if (C == A || C == B) {
1608 // MAX(MAX(A, B), B) -> MAX(A, B)
1609 // MIN(MIN(a, b), a) -> MIN(a, b)
1610 // TODO: This could be done in instsimplify.
1611 if (SPF1 == SPF2 && SelectPatternResult::isMinOrMax(SPF1))
1612 return replaceInstUsesWith(Outer, Inner);
1613
1614 // MAX(MIN(a, b), a) -> a
1615 // MIN(MAX(a, b), a) -> a
1616 // TODO: This could be done in instsimplify.
1617 if ((SPF1 == SPF_SMIN && SPF2 == SPF_SMAX) ||
1618 (SPF1 == SPF_SMAX && SPF2 == SPF_SMIN) ||
1619 (SPF1 == SPF_UMIN && SPF2 == SPF_UMAX) ||
1620 (SPF1 == SPF_UMAX && SPF2 == SPF_UMIN))
1621 return replaceInstUsesWith(Outer, C);
1622 }
1623
1624 if (SPF1 == SPF2) {
1625 const APInt *CB, *CC;
1626 if (match(B, m_APInt(CB)) && match(C, m_APInt(CC))) {
1627 // MIN(MIN(A, 23), 97) -> MIN(A, 23)
1628 // MAX(MAX(A, 97), 23) -> MAX(A, 97)
1629 // TODO: This could be done in instsimplify.
1630 if ((SPF1 == SPF_UMIN && CB->ule(*CC)) ||
1631 (SPF1 == SPF_SMIN && CB->sle(*CC)) ||
1632 (SPF1 == SPF_UMAX && CB->uge(*CC)) ||
1633 (SPF1 == SPF_SMAX && CB->sge(*CC)))
1634 return replaceInstUsesWith(Outer, Inner);
1635
1636 // MIN(MIN(A, 97), 23) -> MIN(A, 23)
1637 // MAX(MAX(A, 23), 97) -> MAX(A, 97)
1638 if ((SPF1 == SPF_UMIN && CB->ugt(*CC)) ||
1639 (SPF1 == SPF_SMIN && CB->sgt(*CC)) ||
1640 (SPF1 == SPF_UMAX && CB->ult(*CC)) ||
1641 (SPF1 == SPF_SMAX && CB->slt(*CC))) {
1642 Outer.replaceUsesOfWith(Inner, A);
1643 return &Outer;
1644 }
1645 }
1646 }
1647
1648 // max(max(A, B), min(A, B)) --> max(A, B)
1649 // min(min(A, B), max(A, B)) --> min(A, B)
1650 // TODO: This could be done in instsimplify.
1651 if (SPF1 == SPF2 &&
1652 ((SPF1 == SPF_UMIN && match(C, m_c_UMax(m_Specific(A), m_Specific(B)))) ||
1653 (SPF1 == SPF_SMIN && match(C, m_c_SMax(m_Specific(A), m_Specific(B)))) ||
1654 (SPF1 == SPF_UMAX && match(C, m_c_UMin(m_Specific(A), m_Specific(B)))) ||
1655 (SPF1 == SPF_SMAX && match(C, m_c_SMin(m_Specific(A), m_Specific(B))))))
1656 return replaceInstUsesWith(Outer, Inner);
1657
1658 // ABS(ABS(X)) -> ABS(X)
1659 // NABS(NABS(X)) -> NABS(X)
1660 // TODO: This could be done in instsimplify.
1661 if (SPF1 == SPF2 && (SPF1 == SPF_ABS || SPF1 == SPF_NABS)) {
1662 return replaceInstUsesWith(Outer, Inner);
1663 }
1664
1665 // ABS(NABS(X)) -> ABS(X)
1666 // NABS(ABS(X)) -> NABS(X)
1667 if ((SPF1 == SPF_ABS && SPF2 == SPF_NABS) ||
1668 (SPF1 == SPF_NABS && SPF2 == SPF_ABS)) {
1669 SelectInst *SI = cast<SelectInst>(Inner);
1670 Value *NewSI =
1671 Builder.CreateSelect(SI->getCondition(), SI->getFalseValue(),
1672 SI->getTrueValue(), SI->getName(), SI);
1673 return replaceInstUsesWith(Outer, NewSI);
1674 }
1675
1676 auto IsFreeOrProfitableToInvert =
1677 [&](Value *V, Value *&NotV, bool &ElidesXor) {
1678 if (match(V, m_Not(m_Value(NotV)))) {
1679 // If V has at most 2 uses then we can get rid of the xor operation
1680 // entirely.
1681 ElidesXor |= !V->hasNUsesOrMore(3);
1682 return true;
1683 }
1684
1685 if (isFreeToInvert(V, !V->hasNUsesOrMore(3))) {
1686 NotV = nullptr;
1687 return true;
1688 }
1689
1690 return false;
1691 };
1692
1693 Value *NotA, *NotB, *NotC;
1694 bool ElidesXor = false;
1695
1696 // MIN(MIN(~A, ~B), ~C) == ~MAX(MAX(A, B), C)
1697 // MIN(MAX(~A, ~B), ~C) == ~MAX(MIN(A, B), C)
1698 // MAX(MIN(~A, ~B), ~C) == ~MIN(MAX(A, B), C)
1699 // MAX(MAX(~A, ~B), ~C) == ~MIN(MIN(A, B), C)
1700 //
1701 // This transform is performance neutral if we can elide at least one xor from
1702 // the set of three operands, since we'll be tacking on an xor at the very
1703 // end.
1704 if (SelectPatternResult::isMinOrMax(SPF1) &&
1705 SelectPatternResult::isMinOrMax(SPF2) &&
1706 IsFreeOrProfitableToInvert(A, NotA, ElidesXor) &&
1707 IsFreeOrProfitableToInvert(B, NotB, ElidesXor) &&
1708 IsFreeOrProfitableToInvert(C, NotC, ElidesXor) && ElidesXor) {
1709 if (!NotA)
1710 NotA = Builder.CreateNot(A);
1711 if (!NotB)
1712 NotB = Builder.CreateNot(B);
1713 if (!NotC)
1714 NotC = Builder.CreateNot(C);
1715
1716 Value *NewInner = createMinMax(Builder, getInverseMinMaxFlavor(SPF1), NotA,
1717 NotB);
1718 Value *NewOuter = Builder.CreateNot(
1719 createMinMax(Builder, getInverseMinMaxFlavor(SPF2), NewInner, NotC));
1720 return replaceInstUsesWith(Outer, NewOuter);
1721 }
1722
1723 return nullptr;
1724}
1725
1726/// Turn select C, (X + Y), (X - Y) --> (X + (select C, Y, (-Y))).
1727/// This is even legal for FP.
1728static Instruction *foldAddSubSelect(SelectInst &SI,
1729 InstCombiner::BuilderTy &Builder) {
1730 Value *CondVal = SI.getCondition();
1731 Value *TrueVal = SI.getTrueValue();
1732 Value *FalseVal = SI.getFalseValue();
1733 auto *TI = dyn_cast<Instruction>(TrueVal);
1734 auto *FI = dyn_cast<Instruction>(FalseVal);
1735 if (!TI || !FI || !TI->hasOneUse() || !FI->hasOneUse())
1736 return nullptr;
1737
1738 Instruction *AddOp = nullptr, *SubOp = nullptr;
1739 if ((TI->getOpcode() == Instruction::Sub &&
1740 FI->getOpcode() == Instruction::Add) ||
1741 (TI->getOpcode() == Instruction::FSub &&
1742 FI->getOpcode() == Instruction::FAdd)) {
1743 AddOp = FI;
1744 SubOp = TI;
1745 } else if ((FI->getOpcode() == Instruction::Sub &&
1746 TI->getOpcode() == Instruction::Add) ||
1747 (FI->getOpcode() == Instruction::FSub &&
1748 TI->getOpcode() == Instruction::FAdd)) {
1749 AddOp = TI;
1750 SubOp = FI;
1751 }
1752
1753 if (AddOp) {
1754 Value *OtherAddOp = nullptr;
1755 if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
1756 OtherAddOp = AddOp->getOperand(1);
1757 } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
1758 OtherAddOp = AddOp->getOperand(0);
1759 }
1760
1761 if (OtherAddOp) {
1762 // So at this point we know we have (Y -> OtherAddOp):
1763 // select C, (add X, Y), (sub X, Z)
1764 Value *NegVal; // Compute -Z
1765 if (SI.getType()->isFPOrFPVectorTy()) {
1766 NegVal = Builder.CreateFNeg(SubOp->getOperand(1));
1767 if (Instruction *NegInst = dyn_cast<Instruction>(NegVal)) {
1768 FastMathFlags Flags = AddOp->getFastMathFlags();
1769 Flags &= SubOp->getFastMathFlags();
1770 NegInst->setFastMathFlags(Flags);
1771 }
1772 } else {
1773 NegVal = Builder.CreateNeg(SubOp->getOperand(1));
1774 }
1775
1776 Value *NewTrueOp = OtherAddOp;
1777 Value *NewFalseOp = NegVal;
1778 if (AddOp != TI)
1779 std::swap(NewTrueOp, NewFalseOp);
1780 Value *NewSel = Builder.CreateSelect(CondVal, NewTrueOp, NewFalseOp,
1781 SI.getName() + ".p", &SI);
1782
1783 if (SI.getType()->isFPOrFPVectorTy()) {
1784 Instruction *RI =
1785 BinaryOperator::CreateFAdd(SubOp->getOperand(0), NewSel);
1786
1787 FastMathFlags Flags = AddOp->getFastMathFlags();
1788 Flags &= SubOp->getFastMathFlags();
1789 RI->setFastMathFlags(Flags);
1790 return RI;
1791 } else
1792 return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
1793 }
1794 }
1795 return nullptr;
1796}
1797
1798/// Turn X + Y overflows ? -1 : X + Y -> uadd_sat X, Y
1799/// And X - Y overflows ? 0 : X - Y -> usub_sat X, Y
1800/// Along with a number of patterns similar to:
1801/// X + Y overflows ? (X < 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1802/// X - Y overflows ? (X > 0 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1803static Instruction *
1804foldOverflowingAddSubSelect(SelectInst &SI, InstCombiner::BuilderTy &Builder) {
1805 Value *CondVal = SI.getCondition();
1806 Value *TrueVal = SI.getTrueValue();
1807 Value *FalseVal = SI.getFalseValue();
1808
1809 WithOverflowInst *II;
1810 if (!match(CondVal, m_ExtractValue<1>(m_WithOverflowInst(II))) ||
1811 !match(FalseVal, m_ExtractValue<0>(m_Specific(II))))
1812 return nullptr;
1813
1814 Value *X = II->getLHS();
1815 Value *Y = II->getRHS();
1816
1817 auto IsSignedSaturateLimit = [&](Value *Limit, bool IsAdd) {
1818 Type *Ty = Limit->getType();
1819
1820 ICmpInst::Predicate Pred;
1821 Value *TrueVal, *FalseVal, *Op;
1822 const APInt *C;
1823 if (!match(Limit, m_Select(m_ICmp(Pred, m_Value(Op), m_APInt(C)),
1824 m_Value(TrueVal), m_Value(FalseVal))))
1825 return false;
1826
1827 auto IsZeroOrOne = [](const APInt &C) {
1828 return C.isNullValue() || C.isOneValue();
1829 };
1830 auto IsMinMax = [&](Value *Min, Value *Max) {
1831 APInt MinVal = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
1832 APInt MaxVal = APInt::getSignedMaxValue(Ty->getScalarSizeInBits());
1833 return match(Min, m_SpecificInt(MinVal)) &&
1834 match(Max, m_SpecificInt(MaxVal));
1835 };
1836
1837 if (Op != X && Op != Y)
1838 return false;
1839
1840 if (IsAdd) {
1841 // X + Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1842 // X + Y overflows ? (X <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1843 // X + Y overflows ? (Y <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1844 // X + Y overflows ? (Y <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1845 if (Pred == ICmpInst::ICMP_SLT && IsZeroOrOne(*C) &&
1846 IsMinMax(TrueVal, FalseVal))
1847 return true;
1848 // X + Y overflows ? (X >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1849 // X + Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1850 // X + Y overflows ? (Y >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1851 // X + Y overflows ? (Y >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1852 if (Pred == ICmpInst::ICMP_SGT && IsZeroOrOne(*C + 1) &&
1853 IsMinMax(FalseVal, TrueVal))
1854 return true;
1855 } else {
1856 // X - Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1857 // X - Y overflows ? (X <s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1858 if (Op == X && Pred == ICmpInst::ICMP_SLT && IsZeroOrOne(*C + 1) &&
1859 IsMinMax(TrueVal, FalseVal))
1860 return true;
1861 // X - Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1862 // X - Y overflows ? (X >s -2 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1863 if (Op == X && Pred == ICmpInst::ICMP_SGT && IsZeroOrOne(*C + 2) &&
1864 IsMinMax(FalseVal, TrueVal))
1865 return true;
1866 // X - Y overflows ? (Y <s 0 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1867 // X - Y overflows ? (Y <s 1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1868 if (Op == Y && Pred == ICmpInst::ICMP_SLT && IsZeroOrOne(*C) &&
1869 IsMinMax(FalseVal, TrueVal))
1870 return true;
1871 // X - Y overflows ? (Y >s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1872 // X - Y overflows ? (Y >s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1873 if (Op == Y && Pred == ICmpInst::ICMP_SGT && IsZeroOrOne(*C + 1) &&
1874 IsMinMax(TrueVal, FalseVal))
1875 return true;
1876 }
1877
1878 return false;
1879 };
1880
1881 Intrinsic::ID NewIntrinsicID;
1882 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow &&
1883 match(TrueVal, m_AllOnes()))
1884 // X + Y overflows ? -1 : X + Y -> uadd_sat X, Y
1885 NewIntrinsicID = Intrinsic::uadd_sat;
1886 else if (II->getIntrinsicID() == Intrinsic::usub_with_overflow &&
1887 match(TrueVal, m_Zero()))
1888 // X - Y overflows ? 0 : X - Y -> usub_sat X, Y
1889 NewIntrinsicID = Intrinsic::usub_sat;
1890 else if (II->getIntrinsicID() == Intrinsic::sadd_with_overflow &&
1891 IsSignedSaturateLimit(TrueVal, /*IsAdd=*/true))
1892 // X + Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1893 // X + Y overflows ? (X <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1894 // X + Y overflows ? (X >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1895 // X + Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1896 // X + Y overflows ? (Y <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1897 // X + Y overflows ? (Y <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1898 // X + Y overflows ? (Y >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1899 // X + Y overflows ? (Y >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1900 NewIntrinsicID = Intrinsic::sadd_sat;
1901 else if (II->getIntrinsicID() == Intrinsic::ssub_with_overflow &&
1902 IsSignedSaturateLimit(TrueVal, /*IsAdd=*/false))
1903 // X - Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1904 // X - Y overflows ? (X <s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1905 // X - Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1906 // X - Y overflows ? (X >s -2 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1907 // X - Y overflows ? (Y <s 0 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1908 // X - Y overflows ? (Y <s 1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1909 // X - Y overflows ? (Y >s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1910 // X - Y overflows ? (Y >s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1911 NewIntrinsicID = Intrinsic::ssub_sat;
1912 else
1913 return nullptr;
1914
1915 Function *F =
1916 Intrinsic::getDeclaration(SI.getModule(), NewIntrinsicID, SI.getType());
1917 return CallInst::Create(F, {X, Y});
1918}
1919
1920Instruction *InstCombinerImpl::foldSelectExtConst(SelectInst &Sel) {
1921 Constant *C;
1922 if (!match(Sel.getTrueValue(), m_Constant(C)) &&
1923 !match(Sel.getFalseValue(), m_Constant(C)))
1924 return nullptr;
1925
1926 Instruction *ExtInst;
1927 if (!match(Sel.getTrueValue(), m_Instruction(ExtInst)) &&
1928 !match(Sel.getFalseValue(), m_Instruction(ExtInst)))
1929 return nullptr;
1930
1931 auto ExtOpcode = ExtInst->getOpcode();
1932 if (ExtOpcode != Instruction::ZExt && ExtOpcode != Instruction::SExt)
1933 return nullptr;
1934
1935 // If we are extending from a boolean type or if we can create a select that
1936 // has the same size operands as its condition, try to narrow the select.
1937 Value *X = ExtInst->getOperand(0);
1938 Type *SmallType = X->getType();
1939 Value *Cond = Sel.getCondition();
1940 auto *Cmp = dyn_cast<CmpInst>(Cond);
1941 if (!SmallType->isIntOrIntVectorTy(1) &&
1942 (!Cmp || Cmp->getOperand(0)->getType() != SmallType))
1943 return nullptr;
1944
1945 // If the constant is the same after truncation to the smaller type and
1946 // extension to the original type, we can narrow the select.
1947 Type *SelType = Sel.getType();
1948 Constant *TruncC = ConstantExpr::getTrunc(C, SmallType);
1949 Constant *ExtC = ConstantExpr::getCast(ExtOpcode, TruncC, SelType);
1950 if (ExtC == C && ExtInst->hasOneUse()) {
1951 Value *TruncCVal = cast<Value>(TruncC);
1952 if (ExtInst == Sel.getFalseValue())
1953 std::swap(X, TruncCVal);
1954
1955 // select Cond, (ext X), C --> ext(select Cond, X, C')
1956 // select Cond, C, (ext X) --> ext(select Cond, C', X)
1957 Value *NewSel = Builder.CreateSelect(Cond, X, TruncCVal, "narrow", &Sel);
1958 return CastInst::Create(Instruction::CastOps(ExtOpcode), NewSel, SelType);
1959 }
1960
1961 // If one arm of the select is the extend of the condition, replace that arm
1962 // with the extension of the appropriate known bool value.
1963 if (Cond == X) {
1964 if (ExtInst == Sel.getTrueValue()) {
1965 // select X, (sext X), C --> select X, -1, C
1966 // select X, (zext X), C --> select X, 1, C
1967 Constant *One = ConstantInt::getTrue(SmallType);
1968 Constant *AllOnesOrOne = ConstantExpr::getCast(ExtOpcode, One, SelType);
1969 return SelectInst::Create(Cond, AllOnesOrOne, C, "", nullptr, &Sel);
1970 } else {
1971 // select X, C, (sext X) --> select X, C, 0
1972 // select X, C, (zext X) --> select X, C, 0
1973 Constant *Zero = ConstantInt::getNullValue(SelType);
1974 return SelectInst::Create(Cond, C, Zero, "", nullptr, &Sel);
1975 }
1976 }
1977
1978 return nullptr;
1979}
1980
1981/// Try to transform a vector select with a constant condition vector into a
1982/// shuffle for easier combining with other shuffles and insert/extract.
1983static Instruction *canonicalizeSelectToShuffle(SelectInst &SI) {
1984 Value *CondVal = SI.getCondition();
1985 Constant *CondC;
1986 auto *CondValTy = dyn_cast<FixedVectorType>(CondVal->getType());
1987 if (!CondValTy || !match(CondVal, m_Constant(CondC)))
1988 return nullptr;
1989
1990 unsigned NumElts = CondValTy->getNumElements();
1991 SmallVector<int, 16> Mask;
1992 Mask.reserve(NumElts);
1993 for (unsigned i = 0; i != NumElts; ++i) {
1994 Constant *Elt = CondC->getAggregateElement(i);
1995 if (!Elt)
1996 return nullptr;
1997
1998 if (Elt->isOneValue()) {
1999 // If the select condition element is true, choose from the 1st vector.
2000 Mask.push_back(i);
2001 } else if (Elt->isNullValue()) {
2002 // If the select condition element is false, choose from the 2nd vector.
2003 Mask.push_back(i + NumElts);
2004 } else if (isa<UndefValue>(Elt)) {
2005 // Undef in a select condition (choose one of the operands) does not mean
2006 // the same thing as undef in a shuffle mask (any value is acceptable), so
2007 // give up.
2008 return nullptr;
2009 } else {
2010 // Bail out on a constant expression.
2011 return nullptr;
2012 }
2013 }
2014
2015 return new ShuffleVectorInst(SI.getTrueValue(), SI.getFalseValue(), Mask);
2016}
2017
2018/// If we have a select of vectors with a scalar condition, try to convert that
2019/// to a vector select by splatting the condition. A splat may get folded with
2020/// other operations in IR and having all operands of a select be vector types
2021/// is likely better for vector codegen.
2022static Instruction *canonicalizeScalarSelectOfVecs(SelectInst &Sel,
2023 InstCombinerImpl &IC) {
2024 auto *Ty = dyn_cast<VectorType>(Sel.getType());
2025 if (!Ty)
2026 return nullptr;
2027
2028 // We can replace a single-use extract with constant index.
2029 Value *Cond = Sel.getCondition();
2030 if (!match(Cond, m_OneUse(m_ExtractElt(m_Value(), m_ConstantInt()))))
2031 return nullptr;
2032
2033 // select (extelt V, Index), T, F --> select (splat V, Index), T, F
2034 // Splatting the extracted condition reduces code (we could directly create a
2035 // splat shuffle of the source vector to eliminate the intermediate step).
2036 return IC.replaceOperand(
2037 Sel, 0, IC.Builder.CreateVectorSplat(Ty->getElementCount(), Cond));
2038}
2039
2040/// Reuse bitcasted operands between a compare and select:
2041/// select (cmp (bitcast C), (bitcast D)), (bitcast' C), (bitcast' D) -->
2042/// bitcast (select (cmp (bitcast C), (bitcast D)), (bitcast C), (bitcast D))
2043static Instruction *foldSelectCmpBitcasts(SelectInst &Sel,
2044 InstCombiner::BuilderTy &Builder) {
2045 Value *Cond = Sel.getCondition();
2046 Value *TVal = Sel.getTrueValue();
2047 Value *FVal = Sel.getFalseValue();
2048
2049 CmpInst::Predicate Pred;
2050 Value *A, *B;
2051 if (!match(Cond, m_Cmp(Pred, m_Value(A), m_Value(B))))
2052 return nullptr;
2053
2054 // The select condition is a compare instruction. If the select's true/false
2055 // values are already the same as the compare operands, there's nothing to do.
2056 if (TVal == A || TVal == B || FVal == A || FVal == B)
2057 return nullptr;
2058
2059 Value *C, *D;
2060 if (!match(A, m_BitCast(m_Value(C))) || !match(B, m_BitCast(m_Value(D))))
2061 return nullptr;
2062
2063 // select (cmp (bitcast C), (bitcast D)), (bitcast TSrc), (bitcast FSrc)
2064 Value *TSrc, *FSrc;
2065 if (!match(TVal, m_BitCast(m_Value(TSrc))) ||
2066 !match(FVal, m_BitCast(m_Value(FSrc))))
2067 return nullptr;
2068
2069 // If the select true/false values are *different bitcasts* of the same source
2070 // operands, make the select operands the same as the compare operands and
2071 // cast the result. This is the canonical select form for min/max.
2072 Value *NewSel;
2073 if (TSrc == C && FSrc == D) {
2074 // select (cmp (bitcast C), (bitcast D)), (bitcast' C), (bitcast' D) -->
2075 // bitcast (select (cmp A, B), A, B)
2076 NewSel = Builder.CreateSelect(Cond, A, B, "", &Sel);
2077 } else if (TSrc == D && FSrc == C) {
2078 // select (cmp (bitcast C), (bitcast D)), (bitcast' D), (bitcast' C) -->
2079 // bitcast (select (cmp A, B), B, A)
2080 NewSel = Builder.CreateSelect(Cond, B, A, "", &Sel);
2081 } else {
2082 return nullptr;
2083 }
2084 return CastInst::CreateBitOrPointerCast(NewSel, Sel.getType());
2085}
2086
2087/// Try to eliminate select instructions that test the returned flag of cmpxchg
2088/// instructions.
2089///
2090/// If a select instruction tests the returned flag of a cmpxchg instruction and
2091/// selects between the returned value of the cmpxchg instruction its compare
2092/// operand, the result of the select will always be equal to its false value.
2093/// For example:
2094///
2095/// %0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
2096/// %1 = extractvalue { i64, i1 } %0, 1
2097/// %2 = extractvalue { i64, i1 } %0, 0
2098/// %3 = select i1 %1, i64 %compare, i64 %2
2099/// ret i64 %3
2100///
2101/// The returned value of the cmpxchg instruction (%2) is the original value
2102/// located at %ptr prior to any update. If the cmpxchg operation succeeds, %2
2103/// must have been equal to %compare. Thus, the result of the select is always
2104/// equal to %2, and the code can be simplified to:
2105///
2106/// %0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
2107/// %1 = extractvalue { i64, i1 } %0, 0
2108/// ret i64 %1
2109///
2110static Value *foldSelectCmpXchg(SelectInst &SI) {
2111 // A helper that determines if V is an extractvalue instruction whose
2112 // aggregate operand is a cmpxchg instruction and whose single index is equal
2113 // to I. If such conditions are true, the helper returns the cmpxchg
2114 // instruction; otherwise, a nullptr is returned.
2115 auto isExtractFromCmpXchg = [](Value *V, unsigned I) -> AtomicCmpXchgInst * {
2116 auto *Extract = dyn_cast<ExtractValueInst>(V);
2117 if (!Extract)
2118 return nullptr;
2119 if (Extract->getIndices()[0] != I)
2120 return nullptr;
2121 return dyn_cast<AtomicCmpXchgInst>(Extract->getAggregateOperand());
2122 };
2123
2124 // If the select has a single user, and this user is a select instruction that
2125 // we can simplify, skip the cmpxchg simplification for now.
2126 if (SI.hasOneUse())
2127 if (auto *Select = dyn_cast<SelectInst>(SI.user_back()))
2128 if (Select->getCondition() == SI.getCondition())
2129 if (Select->getFalseValue() == SI.getTrueValue() ||
2130 Select->getTrueValue() == SI.getFalseValue())
2131 return nullptr;
2132
2133 // Ensure the select condition is the returned flag of a cmpxchg instruction.
2134 auto *CmpXchg = isExtractFromCmpXchg(SI.getCondition(), 1);
2135 if (!CmpXchg)
2136 return nullptr;
2137
2138 // Check the true value case: The true value of the select is the returned
2139 // value of the same cmpxchg used by the condition, and the false value is the
2140 // cmpxchg instruction's compare operand.
2141 if (auto *X = isExtractFromCmpXchg(SI.getTrueValue(), 0))
2142 if (X == CmpXchg && X->getCompareOperand() == SI.getFalseValue())
2143 return SI.getFalseValue();
2144
2145 // Check the false value case: The false value of the select is the returned
2146 // value of the same cmpxchg used by the condition, and the true value is the
2147 // cmpxchg instruction's compare operand.
2148 if (auto *X = isExtractFromCmpXchg(SI.getFalseValue(), 0))
2149 if (X == CmpXchg && X->getCompareOperand() == SI.getTrueValue())
2150 return SI.getFalseValue();
2151
2152 return nullptr;
2153}
2154
2155static Instruction *moveAddAfterMinMax(SelectPatternFlavor SPF, Value *X,
2156 Value *Y,
2157 InstCombiner::BuilderTy &Builder) {
2158 assert(SelectPatternResult::isMinOrMax(SPF) && "Expected min/max pattern")(static_cast<void> (0));
2159 bool IsUnsigned = SPF == SelectPatternFlavor::SPF_UMIN ||
2160 SPF == SelectPatternFlavor::SPF_UMAX;
2161 // TODO: If InstSimplify could fold all cases where C2 <= C1, we could change
2162 // the constant value check to an assert.
2163 Value *A;
2164 const APInt *C1, *C2;
2165 if (IsUnsigned && match(X, m_NUWAdd(m_Value(A), m_APInt(C1))) &&
2166 match(Y, m_APInt(C2)) && C2->uge(*C1) && X->hasNUses(2)) {
2167 // umin (add nuw A, C1), C2 --> add nuw (umin A, C2 - C1), C1
2168 // umax (add nuw A, C1), C2 --> add nuw (umax A, C2 - C1), C1
2169 Value *NewMinMax = createMinMax(Builder, SPF, A,
2170 ConstantInt::get(X->getType(), *C2 - *C1));
2171 return BinaryOperator::CreateNUW(BinaryOperator::Add, NewMinMax,
2172 ConstantInt::get(X->getType(), *C1));
2173 }
2174
2175 if (!IsUnsigned && match(X, m_NSWAdd(m_Value(A), m_APInt(C1))) &&
2176 match(Y, m_APInt(C2)) && X->hasNUses(2)) {
2177 bool Overflow;
2178 APInt Diff = C2->ssub_ov(*C1, Overflow);
2179 if (!Overflow) {
2180 // smin (add nsw A, C1), C2 --> add nsw (smin A, C2 - C1), C1
2181 // smax (add nsw A, C1), C2 --> add nsw (smax A, C2 - C1), C1
2182 Value *NewMinMax = createMinMax(Builder, SPF, A,
2183 ConstantInt::get(X->getType(), Diff));
2184 return BinaryOperator::CreateNSW(BinaryOperator::Add, NewMinMax,
2185 ConstantInt::get(X->getType(), *C1));
2186 }
2187 }
2188
2189 return nullptr;
2190}
2191
2192/// Match a sadd_sat or ssub_sat which is using min/max to clamp the value.
2193Instruction *InstCombinerImpl::matchSAddSubSat(Instruction &MinMax1) {
2194 Type *Ty = MinMax1.getType();
2195
2196 // We are looking for a tree of:
2197 // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B))))
2198 // Where the min and max could be reversed
2199 Instruction *MinMax2;
2200 BinaryOperator *AddSub;
2201 const APInt *MinValue, *MaxValue;
2202 if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) {
2203 if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue))))
2204 return nullptr;
2205 } else if (match(&MinMax1,
2206 m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) {
2207 if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue))))
2208 return nullptr;
2209 } else
2210 return nullptr;
2211
2212 // Check that the constants clamp a saturate, and that the new type would be
2213 // sensible to convert to.
2214 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
2215 return nullptr;
2216 // In what bitwidth can this be treated as saturating arithmetics?
2217 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
2218 // FIXME: This isn't quite right for vectors, but using the scalar type is a
2219 // good first approximation for what should be done there.
2220 if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth))
2221 return nullptr;
2222
2223 // Also make sure that the number of uses is as expected. The 3 is for the
2224 // the two items of the compare and the select, or 2 from a min/max.
2225 unsigned ExpUses = isa<IntrinsicInst>(MinMax1) ? 2 : 3;
2226 if (MinMax2->hasNUsesOrMore(ExpUses) || AddSub->hasNUsesOrMore(ExpUses))
2227 return nullptr;
2228
2229 // Create the new type (which can be a vector type)
2230 Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth);
2231 // Match the two extends from the add/sub
2232 Value *A, *B;
2233 if(!match(AddSub, m_BinOp(m_SExt(m_Value(A)), m_SExt(m_Value(B)))))
2234 return nullptr;
2235 // And check the incoming values are of a type smaller than or equal to the
2236 // size of the saturation. Otherwise the higher bits can cause different
2237 // results.
2238 if (A->getType()->getScalarSizeInBits() > NewBitWidth ||
2239 B->getType()->getScalarSizeInBits() > NewBitWidth)
2240 return nullptr;
2241
2242 Intrinsic::ID IntrinsicID;
2243 if (AddSub->getOpcode() == Instruction::Add)
2244 IntrinsicID = Intrinsic::sadd_sat;
2245 else if (AddSub->getOpcode() == Instruction::Sub)
2246 IntrinsicID = Intrinsic::ssub_sat;
2247 else
2248 return nullptr;
2249
2250 // Finally create and return the sat intrinsic, truncated to the new type
2251 Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy);
2252 Value *AT = Builder.CreateSExt(A, NewTy);
2253 Value *BT = Builder.CreateSExt(B, NewTy);
2254 Value *Sat = Builder.CreateCall(F, {AT, BT});
2255 return CastInst::Create(Instruction::SExt, Sat, Ty);
2256}
2257
2258/// Reduce a sequence of min/max with a common operand.
2259static Instruction *factorizeMinMaxTree(SelectPatternFlavor SPF, Value *LHS,
2260 Value *RHS,
2261 InstCombiner::BuilderTy &Builder) {
2262 assert(SelectPatternResult::isMinOrMax(SPF) && "Expected a min/max")(static_cast<void> (0));
2263 // TODO: Allow FP min/max with nnan/nsz.
2264 if (!LHS->getType()->isIntOrIntVectorTy())
2265 return nullptr;
2266
2267 // Match 3 of the same min/max ops. Example: umin(umin(), umin()).
2268 Value *A, *B, *C, *D;
2269 SelectPatternResult L = matchSelectPattern(LHS, A, B);
2270 SelectPatternResult R = matchSelectPattern(RHS, C, D);
2271 if (SPF != L.Flavor || L.Flavor != R.Flavor)
2272 return nullptr;
2273
2274 // Look for a common operand. The use checks are different than usual because
2275 // a min/max pattern typically has 2 uses of each op: 1 by the cmp and 1 by
2276 // the select.
2277 Value *MinMaxOp = nullptr;
2278 Value *ThirdOp = nullptr;
2279 if (!LHS->hasNUsesOrMore(3) && RHS->hasNUsesOrMore(3)) {
2280 // If the LHS is only used in this chain and the RHS is used outside of it,
2281 // reuse the RHS min/max because that will eliminate the LHS.
2282 if (D == A || C == A) {
2283 // min(min(a, b), min(c, a)) --> min(min(c, a), b)
2284 // min(min(a, b), min(a, d)) --> min(min(a, d), b)
2285 MinMaxOp = RHS;
2286 ThirdOp = B;
2287 } else if (D == B || C == B) {
2288 // min(min(a, b), min(c, b)) --> min(min(c, b), a)
2289 // min(min(a, b), min(b, d)) --> min(min(b, d), a)
2290 MinMaxOp = RHS;
2291 ThirdOp = A;
2292 }
2293 } else if (!RHS->hasNUsesOrMore(3)) {
2294 // Reuse the LHS. This will eliminate the RHS.
2295 if (D == A || D == B) {
2296 // min(min(a, b), min(c, a)) --> min(min(a, b), c)
2297 // min(min(a, b), min(c, b)) --> min(min(a, b), c)
2298 MinMaxOp = LHS;
2299 ThirdOp = C;
2300 } else if (C == A || C == B) {
2301 // min(min(a, b), min(b, d)) --> min(min(a, b), d)
2302 // min(min(a, b), min(c, b)) --> min(min(a, b), d)
2303 MinMaxOp = LHS;
2304 ThirdOp = D;
2305 }
2306 }
2307 if (!MinMaxOp || !ThirdOp)
2308 return nullptr;
2309
2310 CmpInst::Predicate P = getMinMaxPred(SPF);
2311 Value *CmpABC = Builder.CreateICmp(P, MinMaxOp, ThirdOp);
2312 return SelectInst::Create(CmpABC, MinMaxOp, ThirdOp);
2313}
2314
2315/// Try to reduce a funnel/rotate pattern that includes a compare and select
2316/// into a funnel shift intrinsic. Example:
2317/// rotl32(a, b) --> (b == 0 ? a : ((a >> (32 - b)) | (a << b)))
2318/// --> call llvm.fshl.i32(a, a, b)
2319/// fshl32(a, b, c) --> (c == 0 ? a : ((b >> (32 - c)) | (a << c)))
2320/// --> call llvm.fshl.i32(a, b, c)
2321/// fshr32(a, b, c) --> (c == 0 ? b : ((a >> (32 - c)) | (b << c)))
2322/// --> call llvm.fshr.i32(a, b, c)
2323static Instruction *foldSelectFunnelShift(SelectInst &Sel,
2324 InstCombiner::BuilderTy &Builder) {
2325 // This must be a power-of-2 type for a bitmasking transform to be valid.
2326 unsigned Width = Sel.getType()->getScalarSizeInBits();
2327 if (!isPowerOf2_32(Width))
2328 return nullptr;
2329
2330 BinaryOperator *Or0, *Or1;
2331 if (!match(Sel.getFalseValue(), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1)))))
2332 return nullptr;
2333
2334 Value *SV0, *SV1, *SA0, *SA1;
2335 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(SV0),
2336 m_ZExtOrSelf(m_Value(SA0))))) ||
2337 !match(Or1, m_OneUse(m_LogicalShift(m_Value(SV1),
2338 m_ZExtOrSelf(m_Value(SA1))))) ||
2339 Or0->getOpcode() == Or1->getOpcode())
2340 return nullptr;
2341
2342 // Canonicalize to or(shl(SV0, SA0), lshr(SV1, SA1)).
2343 if (Or0->getOpcode() == BinaryOperator::LShr) {
2344 std::swap(Or0, Or1);
2345 std::swap(SV0, SV1);
2346 std::swap(SA0, SA1);
2347 }
2348 assert(Or0->getOpcode() == BinaryOperator::Shl &&(static_cast<void> (0))
2349 Or1->getOpcode() == BinaryOperator::LShr &&(static_cast<void> (0))
2350 "Illegal or(shift,shift) pair")(static_cast<void> (0));
2351
2352 // Check the shift amounts to see if they are an opposite pair.
2353 Value *ShAmt;
2354 if (match(SA1, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA0)))))
2355 ShAmt = SA0;
2356 else if (match(SA0, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA1)))))
2357 ShAmt = SA1;
2358 else
2359 return nullptr;
2360
2361 // We should now have this pattern:
2362 // select ?, TVal, (or (shl SV0, SA0), (lshr SV1, SA1))
2363 // The false value of the select must be a funnel-shift of the true value:
2364 // IsFShl -> TVal must be SV0 else TVal must be SV1.
2365 bool IsFshl = (ShAmt == SA0);
2366 Value *TVal = Sel.getTrueValue();
2367 if ((IsFshl && TVal != SV0) || (!IsFshl && TVal != SV1))
2368 return nullptr;
2369
2370 // Finally, see if the select is filtering out a shift-by-zero.
2371 Value *Cond = Sel.getCondition();
2372 ICmpInst::Predicate Pred;
2373 if (!match(Cond, m_OneUse(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()))) ||
2374 Pred != ICmpInst::ICMP_EQ)
2375 return nullptr;
2376
2377 // If this is not a rotate then the select was blocking poison from the
2378 // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
2379 if (SV0 != SV1) {
2380 if (IsFshl && !llvm::isGuaranteedNotToBePoison(SV1))
2381 SV1 = Builder.CreateFreeze(SV1);
2382 else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(SV0))
2383 SV0 = Builder.CreateFreeze(SV0);
2384 }
2385
2386 // This is a funnel/rotate that avoids shift-by-bitwidth UB in a suboptimal way.
2387 // Convert to funnel shift intrinsic.
2388 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
2389 Function *F = Intrinsic::getDeclaration(Sel.getModule(), IID, Sel.getType());
2390 ShAmt = Builder.CreateZExt(ShAmt, Sel.getType());
2391 return CallInst::Create(F, { SV0, SV1, ShAmt });
2392}
2393
2394static Instruction *foldSelectToCopysign(SelectInst &Sel,
2395 InstCombiner::BuilderTy &Builder) {
2396 Value *Cond = Sel.getCondition();
2397 Value *TVal = Sel.getTrueValue();
2398 Value *FVal = Sel.getFalseValue();
2399 Type *SelType = Sel.getType();
2400
2401 // Match select ?, TC, FC where the constants are equal but negated.
2402 // TODO: Generalize to handle a negated variable operand?
2403 const APFloat *TC, *FC;
2404 if (!match(TVal, m_APFloat(TC)) || !match(FVal, m_APFloat(FC)) ||
2405 !abs(*TC).bitwiseIsEqual(abs(*FC)))
2406 return nullptr;
2407
2408 assert(TC != FC && "Expected equal select arms to simplify")(static_cast<void> (0));
2409
2410 Value *X;
2411 const APInt *C;
2412 bool IsTrueIfSignSet;
2413 ICmpInst::Predicate Pred;
2414 if (!match(Cond, m_OneUse(m_ICmp(Pred, m_BitCast(m_Value(X)), m_APInt(C)))) ||
2415 !InstCombiner::isSignBitCheck(Pred, *C, IsTrueIfSignSet) ||
2416 X->getType() != SelType)
2417 return nullptr;
2418
2419 // If needed, negate the value that will be the sign argument of the copysign:
2420 // (bitcast X) < 0 ? -TC : TC --> copysign(TC, X)
2421 // (bitcast X) < 0 ? TC : -TC --> copysign(TC, -X)
2422 // (bitcast X) >= 0 ? -TC : TC --> copysign(TC, -X)
2423 // (bitcast X) >= 0 ? TC : -TC --> copysign(TC, X)
2424 if (IsTrueIfSignSet ^ TC->isNegative())
2425 X = Builder.CreateFNegFMF(X, &Sel);
2426
2427 // Canonicalize the magnitude argument as the positive constant since we do
2428 // not care about its sign.
2429 Value *MagArg = TC->isNegative() ? FVal : TVal;
2430 Function *F = Intrinsic::getDeclaration(Sel.getModule(), Intrinsic::copysign,
2431 Sel.getType());
2432 Instruction *CopySign = CallInst::Create(F, { MagArg, X });
2433 CopySign->setFastMathFlags(Sel.getFastMathFlags());
2434 return CopySign;
2435}
2436
2437Instruction *InstCombinerImpl::foldVectorSelect(SelectInst &Sel) {
2438 auto *VecTy = dyn_cast<FixedVectorType>(Sel.getType());
2439 if (!VecTy)
2440 return nullptr;
2441
2442 unsigned NumElts = VecTy->getNumElements();
2443 APInt UndefElts(NumElts, 0);
2444 APInt AllOnesEltMask(APInt::getAllOnesValue(NumElts));
2445 if (Value *V = SimplifyDemandedVectorElts(&Sel, AllOnesEltMask, UndefElts)) {
2446 if (V != &Sel)
2447 return replaceInstUsesWith(Sel, V);
2448 return &Sel;
2449 }
2450
2451 // A select of a "select shuffle" with a common operand can be rearranged
2452 // to select followed by "select shuffle". Because of poison, this only works
2453 // in the case of a shuffle with no undefined mask elements.
2454 Value *Cond = Sel.getCondition();
2455 Value *TVal = Sel.getTrueValue();
2456 Value *FVal = Sel.getFalseValue();
2457 Value *X, *Y;
2458 ArrayRef<int> Mask;
2459 if (match(TVal, m_OneUse(m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask)))) &&
2460 !is_contained(Mask, UndefMaskElem) &&
2461 cast<ShuffleVectorInst>(TVal)->isSelect()) {
2462 if (X == FVal) {
2463 // select Cond, (shuf_sel X, Y), X --> shuf_sel X, (select Cond, Y, X)
2464 Value *NewSel = Builder.CreateSelect(Cond, Y, X, "sel", &Sel);
2465 return new ShuffleVectorInst(X, NewSel, Mask);
2466 }
2467 if (Y == FVal) {
2468 // select Cond, (shuf_sel X, Y), Y --> shuf_sel (select Cond, X, Y), Y
2469 Value *NewSel = Builder.CreateSelect(Cond, X, Y, "sel", &Sel);
2470 return new ShuffleVectorInst(NewSel, Y, Mask);
2471 }
2472 }
2473 if (match(FVal, m_OneUse(m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask)))) &&
2474 !is_contained(Mask, UndefMaskElem) &&
2475 cast<ShuffleVectorInst>(FVal)->isSelect()) {
2476 if (X == TVal) {
2477 // select Cond, X, (shuf_sel X, Y) --> shuf_sel X, (select Cond, X, Y)
2478 Value *NewSel = Builder.CreateSelect(Cond, X, Y, "sel", &Sel);
2479 return new ShuffleVectorInst(X, NewSel, Mask);
2480 }
2481 if (Y == TVal) {
2482 // select Cond, Y, (shuf_sel X, Y) --> shuf_sel (select Cond, Y, X), Y
2483 Value *NewSel = Builder.CreateSelect(Cond, Y, X, "sel", &Sel);
2484 return new ShuffleVectorInst(NewSel, Y, Mask);
2485 }
2486 }
2487
2488 return nullptr;
2489}
2490
2491static Instruction *foldSelectToPhiImpl(SelectInst &Sel, BasicBlock *BB,
2492 const DominatorTree &DT,
2493 InstCombiner::BuilderTy &Builder) {
2494 // Find the block's immediate dominator that ends with a conditional branch
2495 // that matches select's condition (maybe inverted).
2496 auto *IDomNode = DT[BB]->getIDom();
2497 if (!IDomNode)
2498 return nullptr;
2499 BasicBlock *IDom = IDomNode->getBlock();
2500
2501 Value *Cond = Sel.getCondition();
2502 Value *IfTrue, *IfFalse;
2503 BasicBlock *TrueSucc, *FalseSucc;
2504 if (match(IDom->getTerminator(),
2505 m_Br(m_Specific(Cond), m_BasicBlock(TrueSucc),
2506 m_BasicBlock(FalseSucc)))) {
2507 IfTrue = Sel.getTrueValue();
2508 IfFalse = Sel.getFalseValue();
2509 } else if (match(IDom->getTerminator(),
2510 m_Br(m_Not(m_Specific(Cond)), m_BasicBlock(TrueSucc),
2511 m_BasicBlock(FalseSucc)))) {
2512 IfTrue = Sel.getFalseValue();
2513 IfFalse = Sel.getTrueValue();
2514 } else
2515 return nullptr;
2516
2517 // Make sure the branches are actually different.
2518 if (TrueSucc == FalseSucc)
2519 return nullptr;
2520
2521 // We want to replace select %cond, %a, %b with a phi that takes value %a
2522 // for all incoming edges that are dominated by condition `%cond == true`,
2523 // and value %b for edges dominated by condition `%cond == false`. If %a
2524 // or %b are also phis from the same basic block, we can go further and take
2525 // their incoming values from the corresponding blocks.
2526 BasicBlockEdge TrueEdge(IDom, TrueSucc);
2527 BasicBlockEdge FalseEdge(IDom, FalseSucc);
2528 DenseMap<BasicBlock *, Value *> Inputs;
2529 for (auto *Pred : predecessors(BB)) {
2530 // Check implication.
2531 BasicBlockEdge Incoming(Pred, BB);
2532 if (DT.dominates(TrueEdge, Incoming))
2533 Inputs[Pred] = IfTrue->DoPHITranslation(BB, Pred);
2534 else if (DT.dominates(FalseEdge, Incoming))
2535 Inputs[Pred] = IfFalse->DoPHITranslation(BB, Pred);
2536 else
2537 return nullptr;
2538 // Check availability.
2539 if (auto *Insn = dyn_cast<Instruction>(Inputs[Pred]))
2540 if (!DT.dominates(Insn, Pred->getTerminator()))
2541 return nullptr;
2542 }
2543
2544 Builder.SetInsertPoint(&*BB->begin());
2545 auto *PN = Builder.CreatePHI(Sel.getType(), Inputs.size());
2546 for (auto *Pred : predecessors(BB))
2547 PN->addIncoming(Inputs[Pred], Pred);
2548 PN->takeName(&Sel);
2549 return PN;
2550}
2551
2552static Instruction *foldSelectToPhi(SelectInst &Sel, const DominatorTree &DT,
2553 InstCombiner::BuilderTy &Builder) {
2554 // Try to replace this select with Phi in one of these blocks.
2555 SmallSetVector<BasicBlock *, 4> CandidateBlocks;
2556 CandidateBlocks.insert(Sel.getParent());
2557 for (Value *V : Sel.operands())
2558 if (auto *I = dyn_cast<Instruction>(V))
2559 CandidateBlocks.insert(I->getParent());
2560
2561 for (BasicBlock *BB : CandidateBlocks)
2562 if (auto *PN = foldSelectToPhiImpl(Sel, BB, DT, Builder))
2563 return PN;
2564 return nullptr;
2565}
2566
2567static Value *foldSelectWithFrozenICmp(SelectInst &Sel, InstCombiner::BuilderTy &Builder) {
2568 FreezeInst *FI = dyn_cast<FreezeInst>(Sel.getCondition());
2569 if (!FI)
2570 return nullptr;
2571
2572 Value *Cond = FI->getOperand(0);
2573 Value *TrueVal = Sel.getTrueValue(), *FalseVal = Sel.getFalseValue();
2574
2575 // select (freeze(x == y)), x, y --> y
2576 // select (freeze(x != y)), x, y --> x
2577 // The freeze should be only used by this select. Otherwise, remaining uses of
2578 // the freeze can observe a contradictory value.
2579 // c = freeze(x == y) ; Let's assume that y = poison & x = 42; c is 0 or 1
2580 // a = select c, x, y ;
2581 // f(a, c) ; f(poison, 1) cannot happen, but if a is folded
2582 // ; to y, this can happen.
2583 CmpInst::Predicate Pred;
2584 if (FI->hasOneUse() &&
2585 match(Cond, m_c_ICmp(Pred, m_Specific(TrueVal), m_Specific(FalseVal))) &&
2586 (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)) {
2587 return Pred == ICmpInst::ICMP_EQ ? FalseVal : TrueVal;
2588 }
2589
2590 return nullptr;
2591}
2592
2593Instruction *InstCombinerImpl::foldAndOrOfSelectUsingImpliedCond(Value *Op,
2594 SelectInst &SI,
2595 bool IsAnd) {
2596 Value *CondVal = SI.getCondition();
2597 Value *A = SI.getTrueValue();
2598 Value *B = SI.getFalseValue();
2599
2600 assert(Op->getType()->isIntOrIntVectorTy(1) &&(static_cast<void> (0))
2601 "Op must be either i1 or vector of i1.")(static_cast<void> (0));
2602
2603 Optional<bool> Res = isImpliedCondition(Op, CondVal, DL, IsAnd);
2604 if (!Res)
2605 return nullptr;
2606
2607 Value *Zero = Constant::getNullValue(A->getType());
2608 Value *One = Constant::getAllOnesValue(A->getType());
2609
2610 if (*Res == true) {
2611 if (IsAnd)
2612 // select op, (select cond, A, B), false => select op, A, false
2613 // and op, (select cond, A, B) => select op, A, false
2614 // if op = true implies condval = true.
2615 return SelectInst::Create(Op, A, Zero);
2616 else
2617 // select op, true, (select cond, A, B) => select op, true, A
2618 // or op, (select cond, A, B) => select op, true, A
2619 // if op = false implies condval = true.
2620 return SelectInst::Create(Op, One, A);
2621 } else {
2622 if (IsAnd)
2623 // select op, (select cond, A, B), false => select op, B, false
2624 // and op, (select cond, A, B) => select op, B, false
2625 // if op = true implies condval = false.
2626 return SelectInst::Create(Op, B, Zero);
2627 else
2628 // select op, true, (select cond, A, B) => select op, true, B
2629 // or op, (select cond, A, B) => select op, true, B
2630 // if op = false implies condval = false.
2631 return SelectInst::Create(Op, One, B);
2632 }
2633}
2634
2635Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
2636 Value *CondVal = SI.getCondition();
2637 Value *TrueVal = SI.getTrueValue();
2638 Value *FalseVal = SI.getFalseValue();
2639 Type *SelType = SI.getType();
2640
2641 // FIXME: Remove this workaround when freeze related patches are done.
2642 // For select with undef operand which feeds into an equality comparison,
2643 // don't simplify it so loop unswitch can know the equality comparison
2644 // may have an undef operand. This is a workaround for PR31652 caused by
2645 // descrepancy about branch on undef between LoopUnswitch and GVN.
2646 if (match(TrueVal, m_Undef()) || match(FalseVal, m_Undef())) {
2647 if (llvm::any_of(SI.users(), [&](User *U) {
2648 ICmpInst *CI = dyn_cast<ICmpInst>(U);
2649 if (CI && CI->isEquality())
2650 return true;
2651 return false;
2652 })) {
2653 return nullptr;
2654 }
2655 }
2656
2657 if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal,
2658 SQ.getWithInstruction(&SI)))
2659 return replaceInstUsesWith(SI, V);
2660
2661 if (Instruction *I = canonicalizeSelectToShuffle(SI))
2662 return I;
2663
2664 if (Instruction *I = canonicalizeScalarSelectOfVecs(SI, *this))
2665 return I;
2666
2667 CmpInst::Predicate Pred;
2668
2669 // Avoid potential infinite loops by checking for non-constant condition.
2670 // TODO: Can we assert instead by improving canonicalizeSelectToShuffle()?
2671 // Scalar select must have simplified?
2672 if (SelType->isIntOrIntVectorTy(1) && !isa<Constant>(CondVal) &&
2673 TrueVal->getType() == CondVal->getType()) {
2674 // Folding select to and/or i1 isn't poison safe in general. impliesPoison
2675 // checks whether folding it does not convert a well-defined value into
2676 // poison.
2677 if (match(TrueVal, m_One()) && impliesPoison(FalseVal, CondVal)) {
2678 // Change: A = select B, true, C --> A = or B, C
2679 return BinaryOperator::CreateOr(CondVal, FalseVal);
2680 }
2681 if (match(FalseVal, m_Zero()) && impliesPoison(TrueVal, CondVal)) {
2682 // Change: A = select B, C, false --> A = and B, C
2683 return BinaryOperator::CreateAnd(CondVal, TrueVal);
2684 }
2685
2686 auto *One = ConstantInt::getTrue(SelType);
2687 auto *Zero = ConstantInt::getFalse(SelType);
2688
2689 // We match the "full" 0 or 1 constant here to avoid a potential infinite
2690 // loop with vectors that may have undefined/poison elements.
2691 // select a, false, b -> select !a, b, false
2692 if (match(TrueVal, m_Specific(Zero))) {
2693 Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2694 return SelectInst::Create(NotCond, FalseVal, Zero);
2695 }
2696 // select a, b, true -> select !a, true, b
2697 if (match(FalseVal, m_Specific(One))) {
2698 Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2699 return SelectInst::Create(NotCond, One, TrueVal);
2700 }
2701
2702 // select a, a, b -> select a, true, b
2703 if (CondVal == TrueVal)
2704 return replaceOperand(SI, 1, One);
2705 // select a, b, a -> select a, b, false
2706 if (CondVal == FalseVal)
2707 return replaceOperand(SI, 2, Zero);
2708
2709 // select a, !a, b -> select !a, b, false
2710 if (match(TrueVal, m_Not(m_Specific(CondVal))))
2711 return SelectInst::Create(TrueVal, FalseVal, Zero);
2712 // select a, b, !a -> select !a, true, b
2713 if (match(FalseVal, m_Not(m_Specific(CondVal))))
2714 return SelectInst::Create(FalseVal, One, TrueVal);
2715
2716 Value *A, *B;
2717
2718 // DeMorgan in select form: !a && !b --> !(a || b)
2719 // select !a, !b, false --> not (select a, true, b)
2720 if (match(&SI, m_LogicalAnd(m_Not(m_Value(A)), m_Not(m_Value(B)))) &&
2721 (CondVal->hasOneUse() || TrueVal->hasOneUse()) &&
2722 !match(A, m_ConstantExpr()) && !match(B, m_ConstantExpr()))
2723 return BinaryOperator::CreateNot(Builder.CreateSelect(A, One, B));
2724
2725 // DeMorgan in select form: !a || !b --> !(a && b)
2726 // select !a, true, !b --> not (select a, b, false)
2727 if (match(&SI, m_LogicalOr(m_Not(m_Value(A)), m_Not(m_Value(B)))) &&
2728 (CondVal->hasOneUse() || FalseVal->hasOneUse()) &&
2729 !match(A, m_ConstantExpr()) && !match(B, m_ConstantExpr()))
2730 return BinaryOperator::CreateNot(Builder.CreateSelect(A, B, Zero));
2731
2732 // select (select a, true, b), true, b -> select a, true, b
2733 if (match(CondVal, m_Select(m_Value(A), m_One(), m_Value(B))) &&
2734 match(TrueVal, m_One()) && match(FalseVal, m_Specific(B)))
2735 return replaceOperand(SI, 0, A);
2736 // select (select a, b, false), b, false -> select a, b, false
2737 if (match(CondVal, m_Select(m_Value(A), m_Value(B), m_Zero())) &&
2738 match(TrueVal, m_Specific(B)) && match(FalseVal, m_Zero()))
2739 return replaceOperand(SI, 0, A);
2740
2741 if (!SelType->isVectorTy()) {
2742 if (Value *S = simplifyWithOpReplaced(TrueVal, CondVal, One, SQ,
2743 /* AllowRefinement */ true))
2744 return replaceOperand(SI, 1, S);
2745 if (Value *S = simplifyWithOpReplaced(FalseVal, CondVal, Zero, SQ,
2746 /* AllowRefinement */ true))
2747 return replaceOperand(SI, 2, S);
2748 }
2749
2750 if (match(FalseVal, m_Zero()) || match(TrueVal, m_One())) {
2751 Use *Y = nullptr;
2752 bool IsAnd = match(FalseVal, m_Zero()) ? true : false;
2753 Value *Op1 = IsAnd ? TrueVal : FalseVal;
2754 if (isCheckForZeroAndMulWithOverflow(CondVal, Op1, IsAnd, Y)) {
2755 auto *FI = new FreezeInst(*Y, (*Y)->getName() + ".fr");
2756 InsertNewInstBefore(FI, *cast<Instruction>(Y->getUser()));
2757 replaceUse(*Y, FI);
2758 return replaceInstUsesWith(SI, Op1);
2759 }
2760
2761 if (auto *Op1SI = dyn_cast<SelectInst>(Op1))
2762 if (auto *I = foldAndOrOfSelectUsingImpliedCond(CondVal, *Op1SI,
2763 /* IsAnd */ IsAnd))
2764 return I;
2765
2766 if (auto *ICmp0 = dyn_cast<ICmpInst>(CondVal)) {
2767 if (auto *ICmp1 = dyn_cast<ICmpInst>(Op1)) {
2768 if (auto *V = foldAndOrOfICmpsOfAndWithPow2(ICmp0, ICmp1, &SI, IsAnd,
2769 /* IsLogical */ true))
2770 return replaceInstUsesWith(SI, V);
2771
2772 if (auto *V = foldEqOfParts(ICmp0, ICmp1, IsAnd))
2773 return replaceInstUsesWith(SI, V);
2774 }
2775 }
2776 }
2777
2778 // select (select a, true, b), c, false -> select a, c, false
2779 // select c, (select a, true, b), false -> select c, a, false
2780 // if c implies that b is false.
2781 if (match(CondVal, m_Select(m_Value(A), m_One(), m_Value(B))) &&
2782 match(FalseVal, m_Zero())) {
2783 Optional<bool> Res = isImpliedCondition(TrueVal, B, DL);
2784 if (Res && *Res == false)
2785 return replaceOperand(SI, 0, A);
2786 }
2787 if (match(TrueVal, m_Select(m_Value(A), m_One(), m_Value(B))) &&
2788 match(FalseVal, m_Zero())) {
2789 Optional<bool> Res = isImpliedCondition(CondVal, B, DL);
2790 if (Res && *Res == false)
2791 return replaceOperand(SI, 1, A);
2792 }
2793 // select c, true, (select a, b, false) -> select c, true, a
2794 // select (select a, b, false), true, c -> select a, true, c
2795 // if c = false implies that b = true
2796 if (match(TrueVal, m_One()) &&
2797 match(FalseVal, m_Select(m_Value(A), m_Value(B), m_Zero()))) {
2798 Optional<bool> Res = isImpliedCondition(CondVal, B, DL, false);
2799 if (Res && *Res == true)
2800 return replaceOperand(SI, 2, A);
2801 }
2802 if (match(CondVal, m_Select(m_Value(A), m_Value(B), m_Zero())) &&
2803 match(TrueVal, m_One())) {
2804 Optional<bool> Res = isImpliedCondition(FalseVal, B, DL, false);
2805 if (Res && *Res == true)
2806 return replaceOperand(SI, 0, A);
2807 }
2808
2809 // sel (sel c, a, false), true, (sel !c, b, false) -> sel c, a, b
2810 // sel (sel !c, a, false), true, (sel c, b, false) -> sel c, b, a
2811 Value *C1, *C2;
2812 if (match(CondVal, m_Select(m_Value(C1), m_Value(A), m_Zero())) &&
2813 match(TrueVal, m_One()) &&
2814 match(FalseVal, m_Select(m_Value(C2), m_Value(B), m_Zero()))) {
2815 if (match(C2, m_Not(m_Specific(C1)))) // first case
2816 return SelectInst::Create(C1, A, B);
2817 else if (match(C1, m_Not(m_Specific(C2)))) // second case
2818 return SelectInst::Create(C2, B, A);
2819 }
2820 }
2821
2822 // Selecting between two integer or vector splat integer constants?
2823 //
2824 // Note that we don't handle a scalar select of vectors:
2825 // select i1 %c, <2 x i8> <1, 1>, <2 x i8> <0, 0>
2826 // because that may need 3 instructions to splat the condition value:
2827 // extend, insertelement, shufflevector.
2828 //
2829 // Do not handle i1 TrueVal and FalseVal otherwise would result in
2830 // zext/sext i1 to i1.
2831 if (SelType->isIntOrIntVectorTy() && !SelType->isIntOrIntVectorTy(1) &&
2832 CondVal->getType()->isVectorTy() == SelType->isVectorTy()) {
2833 // select C, 1, 0 -> zext C to int
2834 if (match(TrueVal, m_One()) && match(FalseVal, m_Zero()))
2835 return new ZExtInst(CondVal, SelType);
2836
2837 // select C, -1, 0 -> sext C to int
2838 if (match(TrueVal, m_AllOnes()) && match(FalseVal, m_Zero()))
2839 return new SExtInst(CondVal, SelType);
2840
2841 // select C, 0, 1 -> zext !C to int
2842 if (match(TrueVal, m_Zero()) && match(FalseVal, m_One())) {
2843 Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2844 return new ZExtInst(NotCond, SelType);
2845 }
2846
2847 // select C, 0, -1 -> sext !C to int
2848 if (match(TrueVal, m_Zero()) && match(FalseVal, m_AllOnes())) {
2849 Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2850 return new SExtInst(NotCond, SelType);
2851 }
2852 }
2853
2854 if (auto *FCmp = dyn_cast<FCmpInst>(CondVal)) {
2855 Value *Cmp0 = FCmp->getOperand(0), *Cmp1 = FCmp->getOperand(1);
2856 // Are we selecting a value based on a comparison of the two values?
2857 if ((Cmp0 == TrueVal && Cmp1 == FalseVal) ||
2858 (Cmp0 == FalseVal && Cmp1 == TrueVal)) {
2859 // Canonicalize to use ordered comparisons by swapping the select
2860 // operands.
2861 //
2862 // e.g.
2863 // (X ugt Y) ? X : Y -> (X ole Y) ? Y : X
2864 if (FCmp->hasOneUse() && FCmpInst::isUnordered(FCmp->getPredicate())) {
2865 FCmpInst::Predicate InvPred = FCmp->getInversePredicate();
2866 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2867 // FIXME: The FMF should propagate from the select, not the fcmp.
2868 Builder.setFastMathFlags(FCmp->getFastMathFlags());
2869 Value *NewCond = Builder.CreateFCmp(InvPred, Cmp0, Cmp1,
2870 FCmp->getName() + ".inv");
2871 Value *NewSel = Builder.CreateSelect(NewCond, FalseVal, TrueVal);
2872 return replaceInstUsesWith(SI, NewSel);
2873 }
2874
2875 // NOTE: if we wanted to, this is where to detect MIN/MAX
2876 }
2877 }
2878
2879 // Canonicalize select with fcmp to fabs(). -0.0 makes this tricky. We need
2880 // fast-math-flags (nsz) or fsub with +0.0 (not fneg) for this to work.
2881 // (X <= +/-0.0) ? (0.0 - X) : X --> fabs(X)
2882 Instruction *FSub;
2883 if (match(CondVal, m_FCmp(Pred, m_Specific(FalseVal), m_AnyZeroFP())) &&
2884 match(TrueVal, m_FSub(m_PosZeroFP(), m_Specific(FalseVal))) &&
2885 match(TrueVal, m_Instruction(FSub)) &&
2886 (Pred == FCmpInst::FCMP_OLE || Pred == FCmpInst::FCMP_ULE)) {
2887 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FalseVal, &SI);
2888 return replaceInstUsesWith(SI, Fabs);
2889 }
2890 // (X > +/-0.0) ? X : (0.0 - X) --> fabs(X)
2891 if (match(CondVal, m_FCmp(Pred, m_Specific(TrueVal), m_AnyZeroFP())) &&
2892 match(FalseVal, m_FSub(m_PosZeroFP(), m_Specific(TrueVal))) &&
2893 match(FalseVal, m_Instruction(FSub)) &&
2894 (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_UGT)) {
2895 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, TrueVal, &SI);
2896 return replaceInstUsesWith(SI, Fabs);
2897 }
2898 // With nnan and nsz:
2899 // (X < +/-0.0) ? -X : X --> fabs(X)
2900 // (X <= +/-0.0) ? -X : X --> fabs(X)
2901 Instruction *FNeg;
2902 if (match(CondVal, m_FCmp(Pred, m_Specific(FalseVal), m_AnyZeroFP())) &&
2903 match(TrueVal, m_FNeg(m_Specific(FalseVal))) &&
2904 match(TrueVal, m_Instruction(FNeg)) && SI.hasNoSignedZeros() &&
2905 (Pred == FCmpInst::FCMP_OLT || Pred == FCmpInst::FCMP_OLE ||
2906 Pred == FCmpInst::FCMP_ULT || Pred == FCmpInst::FCMP_ULE)) {
2907 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FalseVal, &SI);
2908 return replaceInstUsesWith(SI, Fabs);
2909 }
2910 // With nnan and nsz:
2911 // (X > +/-0.0) ? X : -X --> fabs(X)
2912 // (X >= +/-0.0) ? X : -X --> fabs(X)
2913 if (match(CondVal, m_FCmp(Pred, m_Specific(TrueVal), m_AnyZeroFP())) &&
2914 match(FalseVal, m_FNeg(m_Specific(TrueVal))) &&
2915 match(FalseVal, m_Instruction(FNeg)) && SI.hasNoSignedZeros() &&
2916 (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_OGE ||
2917 Pred == FCmpInst::FCMP_UGT || Pred == FCmpInst::FCMP_UGE)) {
2918 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, TrueVal, &SI);
2919 return replaceInstUsesWith(SI, Fabs);
2920 }
2921
2922 // See if we are selecting two values based on a comparison of the two values.
2923 if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
2924 if (Instruction *Result = foldSelectInstWithICmp(SI, ICI))
2925 return Result;
2926
2927 if (Instruction *Add = foldAddSubSelect(SI, Builder))
2928 return Add;
2929 if (Instruction *Add = foldOverflowingAddSubSelect(SI, Builder))
2930 return Add;
2931 if (Instruction *Or = foldSetClearBits(SI, Builder))
2932 return Or;
2933
2934 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
2935 auto *TI = dyn_cast<Instruction>(TrueVal);
2936 auto *FI = dyn_cast<Instruction>(FalseVal);
2937 if (TI && FI && TI->getOpcode() == FI->getOpcode())
2938 if (Instruction *IV = foldSelectOpOp(SI, TI, FI))
2939 return IV;
2940
2941 if (Instruction *I = foldSelectExtConst(SI))
2942 return I;
2943
2944 // Fold (select C, (gep Ptr, Idx), Ptr) -> (gep Ptr, (select C, Idx, 0))
2945 // Fold (select C, Ptr, (gep Ptr, Idx)) -> (gep Ptr, (select C, 0, Idx))
2946 auto SelectGepWithBase = [&](GetElementPtrInst *Gep, Value *Base,
2947 bool Swap) -> GetElementPtrInst * {
2948 Value *Ptr = Gep->getPointerOperand();
2949 if (Gep->getNumOperands() != 2 || Gep->getPointerOperand() != Base ||
2950 !Gep->hasOneUse())
2951 return nullptr;
2952 Type *ElementType = Gep->getResultElementType();
2953 Value *Idx = Gep->getOperand(1);
2954 Value *NewT = Idx;
2955 Value *NewF = Constant::getNullValue(Idx->getType());
2956 if (Swap)
2957 std::swap(NewT, NewF);
2958 Value *NewSI =
2959 Builder.CreateSelect(CondVal, NewT, NewF, SI.getName() + ".idx", &SI);
2960 return GetElementPtrInst::Create(ElementType, Ptr, {NewSI});
2961 };
2962 if (auto *TrueGep = dyn_cast<GetElementPtrInst>(TrueVal))
2963 if (auto *NewGep = SelectGepWithBase(TrueGep, FalseVal, false))
2964 return NewGep;
2965 if (auto *FalseGep = dyn_cast<GetElementPtrInst>(FalseVal))
2966 if (auto *NewGep = SelectGepWithBase(FalseGep, TrueVal, true))
2967 return NewGep;
2968
2969 // See if we can fold the select into one of our operands.
2970 if (SelType->isIntOrIntVectorTy() || SelType->isFPOrFPVectorTy()) {
2971 if (Instruction *FoldI = foldSelectIntoOp(SI, TrueVal, FalseVal))
2972 return FoldI;
2973
2974 Value *LHS, *RHS;
2975 Instruction::CastOps CastOp;
2976 SelectPatternResult SPR = matchSelectPattern(&SI, LHS, RHS, &CastOp);
2977 auto SPF = SPR.Flavor;
2978 if (SPF) {
2979 Value *LHS2, *RHS2;
2980 if (SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor)
2981 if (Instruction *R = foldSPFofSPF(cast<Instruction>(LHS), SPF2, LHS2,
2982 RHS2, SI, SPF, RHS))
2983 return R;
2984 if (SelectPatternFlavor SPF2 = matchSelectPattern(RHS, LHS2, RHS2).Flavor)
2985 if (Instruction *R = foldSPFofSPF(cast<Instruction>(RHS), SPF2, LHS2,
2986 RHS2, SI, SPF, LHS))
2987 return R;
2988 // TODO.
2989 // ABS(-X) -> ABS(X)
2990 }
2991
2992 if (SelectPatternResult::isMinOrMax(SPF)) {
2993 // Canonicalize so that
2994 // - type casts are outside select patterns.
2995 // - float clamp is transformed to min/max pattern
2996
2997 bool IsCastNeeded = LHS->getType() != SelType;
2998 Value *CmpLHS = cast<CmpInst>(CondVal)->getOperand(0);
2999 Value *CmpRHS = cast<CmpInst>(CondVal)->getOperand(1);
3000 if (IsCastNeeded ||
3001 (LHS->getType()->isFPOrFPVectorTy() &&
3002 ((CmpLHS != LHS && CmpLHS != RHS) ||
3003 (CmpRHS != LHS && CmpRHS != RHS)))) {
3004 CmpInst::Predicate MinMaxPred = getMinMaxPred(SPF, SPR.Ordered);
3005
3006 Value *Cmp;
3007 if (CmpInst::isIntPredicate(MinMaxPred)) {
3008 Cmp = Builder.CreateICmp(MinMaxPred, LHS, RHS);
3009 } else {
3010 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
3011 auto FMF =
3012 cast<FPMathOperator>(SI.getCondition())->getFastMathFlags();
3013 Builder.setFastMathFlags(FMF);
3014 Cmp = Builder.CreateFCmp(MinMaxPred, LHS, RHS);
3015 }
3016
3017 Value *NewSI = Builder.CreateSelect(Cmp, LHS, RHS, SI.getName(), &SI);
3018 if (!IsCastNeeded)
3019 return replaceInstUsesWith(SI, NewSI);
3020
3021 Value *NewCast = Builder.CreateCast(CastOp, NewSI, SelType);
3022 return replaceInstUsesWith(SI, NewCast);
3023 }
3024
3025 // MAX(~a, ~b) -> ~MIN(a, b)
3026 // MAX(~a, C) -> ~MIN(a, ~C)
3027 // MIN(~a, ~b) -> ~MAX(a, b)
3028 // MIN(~a, C) -> ~MAX(a, ~C)
3029 auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * {
3030 Value *A;
3031 if (match(X, m_Not(m_Value(A))) && !X->hasNUsesOrMore(3) &&
3032 !isFreeToInvert(A, A->hasOneUse()) &&
3033 // Passing false to only consider m_Not and constants.
3034 isFreeToInvert(Y, false)) {
3035 Value *B = Builder.CreateNot(Y);
3036 Value *NewMinMax = createMinMax(Builder, getInverseMinMaxFlavor(SPF),
3037 A, B);
3038 // Copy the profile metadata.
3039 if (MDNode *MD = SI.getMetadata(LLVMContext::MD_prof)) {
3040 cast<SelectInst>(NewMinMax)->setMetadata(LLVMContext::MD_prof, MD);
3041 // Swap the metadata if the operands are swapped.
3042 if (X == SI.getFalseValue() && Y == SI.getTrueValue())
3043 cast<SelectInst>(NewMinMax)->swapProfMetadata();
3044 }
3045
3046 return BinaryOperator::CreateNot(NewMinMax);
3047 }
3048
3049 return nullptr;
3050 };
3051
3052 if (Instruction *I = moveNotAfterMinMax(LHS, RHS))
3053 return I;
3054 if (Instruction *I = moveNotAfterMinMax(RHS, LHS))
3055 return I;
3056
3057 if (Instruction *I = moveAddAfterMinMax(SPF, LHS, RHS, Builder))
3058 return I;
3059
3060 if (Instruction *I = factorizeMinMaxTree(SPF, LHS, RHS, Builder))
3061 return I;
3062 if (Instruction *I = matchSAddSubSat(SI))
3063 return I;
3064 }
3065 }
3066
3067 // Canonicalize select of FP values where NaN and -0.0 are not valid as
3068 // minnum/maxnum intrinsics.
3069 if (isa<FPMathOperator>(SI) && SI.hasNoNaNs() && SI.hasNoSignedZeros()) {
3070 Value *X, *Y;
3071 if (match(&SI, m_OrdFMax(m_Value(X), m_Value(Y))))
3072 return replaceInstUsesWith(
3073 SI, Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, X, Y, &SI));
3074
3075 if (match(&SI, m_OrdFMin(m_Value(X), m_Value(Y))))
3076 return replaceInstUsesWith(
3077 SI, Builder.CreateBinaryIntrinsic(Intrinsic::minnum, X, Y, &SI));
3078 }
3079
3080 // See if we can fold the select into a phi node if the condition is a select.
3081 if (auto *PN = dyn_cast<PHINode>(SI.getCondition()))
3082 // The true/false values have to be live in the PHI predecessor's blocks.
3083 if (canSelectOperandBeMappingIntoPredBlock(TrueVal, SI) &&
3084 canSelectOperandBeMappingIntoPredBlock(FalseVal, SI))
3085 if (Instruction *NV = foldOpIntoPhi(SI, PN))
3086 return NV;
3087
3088 if (SelectInst *TrueSI = dyn_cast<SelectInst>(TrueVal)) {
3089 if (TrueSI->getCondition()->getType() == CondVal->getType()) {
3090 // select(C, select(C, a, b), c) -> select(C, a, c)
3091 if (TrueSI->getCondition() == CondVal) {
3092 if (SI.getTrueValue() == TrueSI->getTrueValue())
3093 return nullptr;
3094 return replaceOperand(SI, 1, TrueSI->getTrueValue());
3095 }
3096 // select(C0, select(C1, a, b), b) -> select(C0&C1, a, b)
3097 // We choose this as normal form to enable folding on the And and
3098 // shortening paths for the values (this helps getUnderlyingObjects() for
3099 // example).
3100 if (TrueSI->getFalseValue() == FalseVal && TrueSI->hasOneUse()) {
3101 Value *And = Builder.CreateLogicalAnd(CondVal, TrueSI->getCondition());
3102 replaceOperand(SI, 0, And);
3103 replaceOperand(SI, 1, TrueSI->getTrueValue());
3104 return &SI;
3105 }
3106 }
3107 }
3108 if (SelectInst *FalseSI = dyn_cast<SelectInst>(FalseVal)) {
3109 if (FalseSI->getCondition()->getType() == CondVal->getType()) {
3110 // select(C, a, select(C, b, c)) -> select(C, a, c)
3111 if (FalseSI->getCondition() == CondVal) {
3112 if (SI.getFalseValue() == FalseSI->getFalseValue())
3113 return nullptr;
3114 return replaceOperand(SI, 2, FalseSI->getFalseValue());
3115 }
3116 // select(C0, a, select(C1, a, b)) -> select(C0|C1, a, b)
3117 if (FalseSI->getTrueValue() == TrueVal && FalseSI->hasOneUse()) {
3118 Value *Or = Builder.CreateLogicalOr(CondVal, FalseSI->getCondition());
3119 replaceOperand(SI, 0, Or);
3120 replaceOperand(SI, 2, FalseSI->getFalseValue());
3121 return &SI;
3122 }
3123 }
3124 }
3125
3126 auto canMergeSelectThroughBinop = [](BinaryOperator *BO) {
3127 // The select might be preventing a division by 0.
3128 switch (BO->getOpcode()) {
3129 default:
3130 return true;
3131 case Instruction::SRem:
3132 case Instruction::URem:
3133 case Instruction::SDiv:
3134 case Instruction::UDiv:
3135 return false;
3136 }
3137 };
3138
3139 // Try to simplify a binop sandwiched between 2 selects with the same
3140 // condition.
3141 // select(C, binop(select(C, X, Y), W), Z) -> select(C, binop(X, W), Z)
3142 BinaryOperator *TrueBO;
3143 if (match(TrueVal, m_OneUse(m_BinOp(TrueBO))) &&
3144 canMergeSelectThroughBinop(TrueBO)) {
3145 if (auto *TrueBOSI = dyn_cast<SelectInst>(TrueBO->getOperand(0))) {
3146 if (TrueBOSI->getCondition() == CondVal) {
3147 replaceOperand(*TrueBO, 0, TrueBOSI->getTrueValue());
3148 Worklist.push(TrueBO);
3149 return &SI;
3150 }
3151 }
3152 if (auto *TrueBOSI = dyn_cast<SelectInst>(TrueBO->getOperand(1))) {
3153 if (TrueBOSI->getCondition() == CondVal) {
3154 replaceOperand(*TrueBO, 1, TrueBOSI->getTrueValue());
3155 Worklist.push(TrueBO);
3156 return &SI;
3157 }
3158 }
3159 }
3160
3161 // select(C, Z, binop(select(C, X, Y), W)) -> select(C, Z, binop(Y, W))
3162 BinaryOperator *FalseBO;
3163 if (match(FalseVal, m_OneUse(m_BinOp(FalseBO))) &&
3164 canMergeSelectThroughBinop(FalseBO)) {
3165 if (auto *FalseBOSI = dyn_cast<SelectInst>(FalseBO->getOperand(0))) {
3166 if (FalseBOSI->getCondition() == CondVal) {
3167 replaceOperand(*FalseBO, 0, FalseBOSI->getFalseValue());
3168 Worklist.push(FalseBO);
3169 return &SI;
3170 }
3171 }
3172 if (auto *FalseBOSI = dyn_cast<SelectInst>(FalseBO->getOperand(1))) {
3173 if (FalseBOSI->getCondition() == CondVal) {
3174 replaceOperand(*FalseBO, 1, FalseBOSI->getFalseValue());
3175 Worklist.push(FalseBO);
3176 return &SI;
3177 }
3178 }
3179 }
3180
3181 Value *NotCond;
3182 if (match(CondVal, m_Not(m_Value(NotCond))) &&
3183 !InstCombiner::shouldAvoidAbsorbingNotIntoSelect(SI)) {
3184 replaceOperand(SI, 0, NotCond);
3185 SI.swapValues();
3186 SI.swapProfMetadata();
3187 return &SI;
3188 }
3189
3190 if (Instruction *I = foldVectorSelect(SI))
3191 return I;
3192
3193 // If we can compute the condition, there's no need for a select.
3194 // Like the above fold, we are attempting to reduce compile-time cost by
3195 // putting this fold here with limitations rather than in InstSimplify.
3196 // The motivation for this call into value tracking is to take advantage of
3197 // the assumption cache, so make sure that is populated.
3198 if (!CondVal->getType()->isVectorTy() && !AC.assumptions().empty()) {
3199 KnownBits Known(1);
3200 computeKnownBits(CondVal, Known, 0, &SI);
3201 if (Known.One.isOneValue())
3202 return replaceInstUsesWith(SI, TrueVal);
3203 if (Known.Zero.isOneValue())
3204 return replaceInstUsesWith(SI, FalseVal);
3205 }
3206
3207 if (Instruction *BitCastSel = foldSelectCmpBitcasts(SI, Builder))
3208 return BitCastSel;
3209
3210 // Simplify selects that test the returned flag of cmpxchg instructions.
3211 if (Value *V = foldSelectCmpXchg(SI))
3212 return replaceInstUsesWith(SI, V);
3213
3214 if (Instruction *Select = foldSelectBinOpIdentity(SI, TLI, *this))
3215 return Select;
3216
3217 if (Instruction *Funnel = foldSelectFunnelShift(SI, Builder))
3218 return Funnel;
3219
3220 if (Instruction *Copysign = foldSelectToCopysign(SI, Builder))
3221 return Copysign;
3222
3223 if (Instruction *PN = foldSelectToPhi(SI, DT, Builder))
3224 return replaceInstUsesWith(SI, PN);
3225
3226 if (Value *Fr = foldSelectWithFrozenICmp(SI, Builder))
3227 return replaceInstUsesWith(SI, Fr);
3228
3229 // select(mask, mload(,,mask,0), 0) -> mload(,,mask,0)
3230 // Load inst is intentionally not checked for hasOneUse()
3231 if (match(FalseVal, m_Zero()) &&
3232 match(TrueVal, m_MaskedLoad(m_Value(), m_Value(), m_Specific(CondVal),
3233 m_CombineOr(m_Undef(), m_Zero())))) {
3234 auto *MaskedLoad = cast<IntrinsicInst>(TrueVal);
3235 if (isa<UndefValue>(MaskedLoad->getArgOperand(3)))
3236 MaskedLoad->setArgOperand(3, FalseVal /* Zero */);
3237 return replaceInstUsesWith(SI, MaskedLoad);
3238 }
3239
3240 Value *Mask;
3241 if (match(TrueVal, m_Zero()) &&
3242 match(FalseVal, m_MaskedLoad(m_Value(), m_Value(), m_Value(Mask),
3243 m_CombineOr(m_Undef(), m_Zero()))) &&
3244 (CondVal->getType() == Mask->getType())) {
3245 // We can remove the select by ensuring the load zeros all lanes the
3246 // select would have. We determine this by proving there is no overlap
3247 // between the load and select masks.
3248 // (i.e (load_mask & select_mask) == 0 == no overlap)
3249 bool CanMergeSelectIntoLoad = false;
3250 if (Value *V = SimplifyAndInst(CondVal, Mask, SQ.getWithInstruction(&SI)))
3251 CanMergeSelectIntoLoad = match(V, m_Zero());
3252
3253 if (CanMergeSelectIntoLoad) {
3254 auto *MaskedLoad = cast<IntrinsicInst>(FalseVal);
3255 if (isa<UndefValue>(MaskedLoad->getArgOperand(3)))
3256 MaskedLoad->setArgOperand(3, TrueVal /* Zero */);
3257 return replaceInstUsesWith(SI, MaskedLoad);
3258 }
3259 }
3260
3261 return nullptr;
3262}