LLVM  9.0.0svn
ValueTracking.cpp
Go to the documentation of this file.
1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Operator.h"
58 #include "llvm/IR/PatternMatch.h"
59 #include "llvm/IR/Type.h"
60 #include "llvm/IR/User.h"
61 #include "llvm/IR/Value.h"
62 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/KnownBits.h"
68 #include <algorithm>
69 #include <array>
70 #include <cassert>
71 #include <cstdint>
72 #include <iterator>
73 #include <utility>
74 
75 using namespace llvm;
76 using namespace llvm::PatternMatch;
77 
78 const unsigned MaxDepth = 6;
79 
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83  cl::Hidden, cl::init(20));
84 
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88  if (unsigned BitWidth = Ty->getScalarSizeInBits())
89  return BitWidth;
90 
91  return DL.getIndexTypeSizeInBits(Ty);
92 }
93 
94 namespace {
95 
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101  const DataLayout &DL;
102  AssumptionCache *AC;
103  const Instruction *CxtI;
104  const DominatorTree *DT;
105 
106  // Unlike the other analyses, this may be a nullptr because not all clients
107  // provide it currently.
109 
110  /// Set of assumptions that should be excluded from further queries.
111  /// This is because of the potential for mutual recursion to cause
112  /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113  /// classic case of this is assume(x = y), which will attempt to determine
114  /// bits in x from bits in y, which will attempt to determine bits in y from
115  /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116  /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117  /// (all of which can call computeKnownBits), and so on.
118  std::array<const Value *, MaxDepth> Excluded;
119 
120  /// If true, it is safe to use metadata during simplification.
121  InstrInfoQuery IIQ;
122 
123  unsigned NumExcluded = 0;
124 
125  Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126  const DominatorTree *DT, bool UseInstrInfo,
127  OptimizationRemarkEmitter *ORE = nullptr)
128  : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129 
130  Query(const Query &Q, const Value *NewExcl)
131  : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132  NumExcluded(Q.NumExcluded) {
133  Excluded = Q.Excluded;
134  Excluded[NumExcluded++] = NewExcl;
135  assert(NumExcluded <= Excluded.size());
136  }
137 
138  bool isExcluded(const Value *Value) const {
139  if (NumExcluded == 0)
140  return false;
141  auto End = Excluded.begin() + NumExcluded;
142  return std::find(Excluded.begin(), End, Value) != End;
143  }
144 };
145 
146 } // end anonymous namespace
147 
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151  // If we've been provided with a context instruction, then use that (provided
152  // it has been inserted).
153  if (CxtI && CxtI->getParent())
154  return CxtI;
155 
156  // If the value is really an already-inserted instruction, then use that.
157  CxtI = dyn_cast<Instruction>(V);
158  if (CxtI && CxtI->getParent())
159  return CxtI;
160 
161  return nullptr;
162 }
163 
164 static void computeKnownBits(const Value *V, KnownBits &Known,
165  unsigned Depth, const Query &Q);
166 
167 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
168  const DataLayout &DL, unsigned Depth,
169  AssumptionCache *AC, const Instruction *CxtI,
170  const DominatorTree *DT,
171  OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
172  ::computeKnownBits(V, Known, Depth,
173  Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
174 }
175 
176 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
177  const Query &Q);
178 
180  unsigned Depth, AssumptionCache *AC,
181  const Instruction *CxtI,
182  const DominatorTree *DT,
184  bool UseInstrInfo) {
186  V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
187 }
188 
189 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
190  const DataLayout &DL, AssumptionCache *AC,
191  const Instruction *CxtI, const DominatorTree *DT,
192  bool UseInstrInfo) {
193  assert(LHS->getType() == RHS->getType() &&
194  "LHS and RHS should have the same type");
195  assert(LHS->getType()->isIntOrIntVectorTy() &&
196  "LHS and RHS should be integers");
197  // Look for an inverted mask: (X & ~M) op (Y & M).
198  Value *M;
199  if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
200  match(RHS, m_c_And(m_Specific(M), m_Value())))
201  return true;
202  if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
203  match(LHS, m_c_And(m_Specific(M), m_Value())))
204  return true;
205  IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
206  KnownBits LHSKnown(IT->getBitWidth());
207  KnownBits RHSKnown(IT->getBitWidth());
208  computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
209  computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
210  return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
211 }
212 
214  for (const User *U : CxtI->users()) {
215  if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
216  if (IC->isEquality())
217  if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
218  if (C->isNullValue())
219  continue;
220  return false;
221  }
222  return true;
223 }
224 
225 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
226  const Query &Q);
227 
229  bool OrZero, unsigned Depth,
230  AssumptionCache *AC, const Instruction *CxtI,
231  const DominatorTree *DT, bool UseInstrInfo) {
233  V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
234 }
235 
236 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
237 
238 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
239  AssumptionCache *AC, const Instruction *CxtI,
240  const DominatorTree *DT, bool UseInstrInfo) {
241  return ::isKnownNonZero(V, Depth,
242  Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
243 }
244 
245 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
246  unsigned Depth, AssumptionCache *AC,
247  const Instruction *CxtI, const DominatorTree *DT,
248  bool UseInstrInfo) {
249  KnownBits Known =
250  computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
251  return Known.isNonNegative();
252 }
253 
254 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
255  AssumptionCache *AC, const Instruction *CxtI,
256  const DominatorTree *DT, bool UseInstrInfo) {
257  if (auto *CI = dyn_cast<ConstantInt>(V))
258  return CI->getValue().isStrictlyPositive();
259 
260  // TODO: We'd doing two recursive queries here. We should factor this such
261  // that only a single query is needed.
262  return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
263  isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
264 }
265 
266 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
267  AssumptionCache *AC, const Instruction *CxtI,
268  const DominatorTree *DT, bool UseInstrInfo) {
269  KnownBits Known =
270  computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
271  return Known.isNegative();
272 }
273 
274 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
275 
276 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
277  const DataLayout &DL, AssumptionCache *AC,
278  const Instruction *CxtI, const DominatorTree *DT,
279  bool UseInstrInfo) {
281  Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
282  UseInstrInfo, /*ORE=*/nullptr));
283 }
284 
285 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
286  const Query &Q);
287 
288 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
289  const DataLayout &DL, unsigned Depth,
290  AssumptionCache *AC, const Instruction *CxtI,
291  const DominatorTree *DT, bool UseInstrInfo) {
293  V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
294 }
295 
296 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
297  const Query &Q);
298 
299 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
300  unsigned Depth, AssumptionCache *AC,
301  const Instruction *CxtI,
302  const DominatorTree *DT, bool UseInstrInfo) {
304  V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
305 }
306 
307 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
308  bool NSW,
309  KnownBits &KnownOut, KnownBits &Known2,
310  unsigned Depth, const Query &Q) {
311  unsigned BitWidth = KnownOut.getBitWidth();
312 
313  // If an initial sequence of bits in the result is not needed, the
314  // corresponding bits in the operands are not needed.
315  KnownBits LHSKnown(BitWidth);
316  computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
317  computeKnownBits(Op1, Known2, Depth + 1, Q);
318 
319  KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
320 }
321 
322 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
323  KnownBits &Known, KnownBits &Known2,
324  unsigned Depth, const Query &Q) {
325  unsigned BitWidth = Known.getBitWidth();
326  computeKnownBits(Op1, Known, Depth + 1, Q);
327  computeKnownBits(Op0, Known2, Depth + 1, Q);
328 
329  bool isKnownNegative = false;
330  bool isKnownNonNegative = false;
331  // If the multiplication is known not to overflow, compute the sign bit.
332  if (NSW) {
333  if (Op0 == Op1) {
334  // The product of a number with itself is non-negative.
335  isKnownNonNegative = true;
336  } else {
337  bool isKnownNonNegativeOp1 = Known.isNonNegative();
338  bool isKnownNonNegativeOp0 = Known2.isNonNegative();
339  bool isKnownNegativeOp1 = Known.isNegative();
340  bool isKnownNegativeOp0 = Known2.isNegative();
341  // The product of two numbers with the same sign is non-negative.
342  isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
343  (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
344  // The product of a negative number and a non-negative number is either
345  // negative or zero.
346  if (!isKnownNonNegative)
347  isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
348  isKnownNonZero(Op0, Depth, Q)) ||
349  (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
350  isKnownNonZero(Op1, Depth, Q));
351  }
352  }
353 
354  assert(!Known.hasConflict() && !Known2.hasConflict());
355  // Compute a conservative estimate for high known-0 bits.
356  unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
357  Known2.countMinLeadingZeros(),
358  BitWidth) - BitWidth;
359  LeadZ = std::min(LeadZ, BitWidth);
360 
361  // The result of the bottom bits of an integer multiply can be
362  // inferred by looking at the bottom bits of both operands and
363  // multiplying them together.
364  // We can infer at least the minimum number of known trailing bits
365  // of both operands. Depending on number of trailing zeros, we can
366  // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
367  // a and b are divisible by m and n respectively.
368  // We then calculate how many of those bits are inferrable and set
369  // the output. For example, the i8 mul:
370  // a = XXXX1100 (12)
371  // b = XXXX1110 (14)
372  // We know the bottom 3 bits are zero since the first can be divided by
373  // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
374  // Applying the multiplication to the trimmed arguments gets:
375  // XX11 (3)
376  // X111 (7)
377  // -------
378  // XX11
379  // XX11
380  // XX11
381  // XX11
382  // -------
383  // XXXXX01
384  // Which allows us to infer the 2 LSBs. Since we're multiplying the result
385  // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
386  // The proof for this can be described as:
387  // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
388  // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
389  // umin(countTrailingZeros(C2), C6) +
390  // umin(C5 - umin(countTrailingZeros(C1), C5),
391  // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
392  // %aa = shl i8 %a, C5
393  // %bb = shl i8 %b, C6
394  // %aaa = or i8 %aa, C1
395  // %bbb = or i8 %bb, C2
396  // %mul = mul i8 %aaa, %bbb
397  // %mask = and i8 %mul, C7
398  // =>
399  // %mask = i8 ((C1*C2)&C7)
400  // Where C5, C6 describe the known bits of %a, %b
401  // C1, C2 describe the known bottom bits of %a, %b.
402  // C7 describes the mask of the known bits of the result.
403  APInt Bottom0 = Known.One;
404  APInt Bottom1 = Known2.One;
405 
406  // How many times we'd be able to divide each argument by 2 (shr by 1).
407  // This gives us the number of trailing zeros on the multiplication result.
408  unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
409  unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
410  unsigned TrailZero0 = Known.countMinTrailingZeros();
411  unsigned TrailZero1 = Known2.countMinTrailingZeros();
412  unsigned TrailZ = TrailZero0 + TrailZero1;
413 
414  // Figure out the fewest known-bits operand.
415  unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
416  TrailBitsKnown1 - TrailZero1);
417  unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
418 
419  APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
420  Bottom1.getLoBits(TrailBitsKnown1);
421 
422  Known.resetAll();
423  Known.Zero.setHighBits(LeadZ);
424  Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
425  Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
426 
427  // Only make use of no-wrap flags if we failed to compute the sign bit
428  // directly. This matters if the multiplication always overflows, in
429  // which case we prefer to follow the result of the direct computation,
430  // though as the program is invoking undefined behaviour we can choose
431  // whatever we like here.
432  if (isKnownNonNegative && !Known.isNegative())
433  Known.makeNonNegative();
434  else if (isKnownNegative && !Known.isNonNegative())
435  Known.makeNegative();
436 }
437 
439  KnownBits &Known) {
440  unsigned BitWidth = Known.getBitWidth();
441  unsigned NumRanges = Ranges.getNumOperands() / 2;
442  assert(NumRanges >= 1);
443 
444  Known.Zero.setAllBits();
445  Known.One.setAllBits();
446 
447  for (unsigned i = 0; i < NumRanges; ++i) {
448  ConstantInt *Lower =
449  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
450  ConstantInt *Upper =
451  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
452  ConstantRange Range(Lower->getValue(), Upper->getValue());
453 
454  // The first CommonPrefixBits of all values in Range are equal.
455  unsigned CommonPrefixBits =
456  (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
457 
458  APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
459  Known.One &= Range.getUnsignedMax() & Mask;
460  Known.Zero &= ~Range.getUnsignedMax() & Mask;
461  }
462 }
463 
464 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
465  SmallVector<const Value *, 16> WorkSet(1, I);
468 
469  // The instruction defining an assumption's condition itself is always
470  // considered ephemeral to that assumption (even if it has other
471  // non-ephemeral users). See r246696's test case for an example.
472  if (is_contained(I->operands(), E))
473  return true;
474 
475  while (!WorkSet.empty()) {
476  const Value *V = WorkSet.pop_back_val();
477  if (!Visited.insert(V).second)
478  continue;
479 
480  // If all uses of this value are ephemeral, then so is this value.
481  if (llvm::all_of(V->users(), [&](const User *U) {
482  return EphValues.count(U);
483  })) {
484  if (V == E)
485  return true;
486 
487  if (V == I || isSafeToSpeculativelyExecute(V)) {
488  EphValues.insert(V);
489  if (const User *U = dyn_cast<User>(V))
490  for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
491  J != JE; ++J)
492  WorkSet.push_back(*J);
493  }
494  }
495  }
496 
497  return false;
498 }
499 
500 // Is this an intrinsic that cannot be speculated but also cannot trap?
502  if (const CallInst *CI = dyn_cast<CallInst>(I))
503  if (Function *F = CI->getCalledFunction())
504  switch (F->getIntrinsicID()) {
505  default: break;
506  // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
507  case Intrinsic::assume:
508  case Intrinsic::sideeffect:
509  case Intrinsic::dbg_declare:
510  case Intrinsic::dbg_value:
511  case Intrinsic::dbg_label:
512  case Intrinsic::invariant_start:
513  case Intrinsic::invariant_end:
514  case Intrinsic::lifetime_start:
515  case Intrinsic::lifetime_end:
516  case Intrinsic::objectsize:
517  case Intrinsic::ptr_annotation:
518  case Intrinsic::var_annotation:
519  return true;
520  }
521 
522  return false;
523 }
524 
526  const Instruction *CxtI,
527  const DominatorTree *DT) {
528  // There are two restrictions on the use of an assume:
529  // 1. The assume must dominate the context (or the control flow must
530  // reach the assume whenever it reaches the context).
531  // 2. The context must not be in the assume's set of ephemeral values
532  // (otherwise we will use the assume to prove that the condition
533  // feeding the assume is trivially true, thus causing the removal of
534  // the assume).
535 
536  if (DT) {
537  if (DT->dominates(Inv, CxtI))
538  return true;
539  } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
540  // We don't have a DT, but this trivially dominates.
541  return true;
542  }
543 
544  // With or without a DT, the only remaining case we will check is if the
545  // instructions are in the same BB. Give up if that is not the case.
546  if (Inv->getParent() != CxtI->getParent())
547  return false;
548 
549  // If we have a dom tree, then we now know that the assume doesn't dominate
550  // the other instruction. If we don't have a dom tree then we can check if
551  // the assume is first in the BB.
552  if (!DT) {
553  // Search forward from the assume until we reach the context (or the end
554  // of the block); the common case is that the assume will come first.
555  for (auto I = std::next(BasicBlock::const_iterator(Inv)),
556  IE = Inv->getParent()->end(); I != IE; ++I)
557  if (&*I == CxtI)
558  return true;
559  }
560 
561  // The context comes first, but they're both in the same block. Make sure
562  // there is nothing in between that might interrupt the control flow.
564  std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
565  I != IE; ++I)
567  return false;
568 
569  return !isEphemeralValueOf(Inv, CxtI);
570 }
571 
572 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
573  unsigned Depth, const Query &Q) {
574  // Use of assumptions is context-sensitive. If we don't have a context, we
575  // cannot use them!
576  if (!Q.AC || !Q.CxtI)
577  return;
578 
579  unsigned BitWidth = Known.getBitWidth();
580 
581  // Note that the patterns below need to be kept in sync with the code
582  // in AssumptionCache::updateAffectedValues.
583 
584  for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
585  if (!AssumeVH)
586  continue;
587  CallInst *I = cast<CallInst>(AssumeVH);
588  assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
589  "Got assumption for the wrong function!");
590  if (Q.isExcluded(I))
591  continue;
592 
593  // Warning: This loop can end up being somewhat performance sensitive.
594  // We're running this loop for once for each value queried resulting in a
595  // runtime of ~O(#assumes * #values).
596 
597  assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
598  "must be an assume intrinsic");
599 
600  Value *Arg = I->getArgOperand(0);
601 
602  if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
603  assert(BitWidth == 1 && "assume operand is not i1?");
604  Known.setAllOnes();
605  return;
606  }
607  if (match(Arg, m_Not(m_Specific(V))) &&
608  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
609  assert(BitWidth == 1 && "assume operand is not i1?");
610  Known.setAllZero();
611  return;
612  }
613 
614  // The remaining tests are all recursive, so bail out if we hit the limit.
615  if (Depth == MaxDepth)
616  continue;
617 
618  ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
619  if (!Cmp)
620  continue;
621 
622  Value *A, *B;
623  auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
624 
625  CmpInst::Predicate Pred;
626  uint64_t C;
627  switch (Cmp->getPredicate()) {
628  default:
629  break;
630  case ICmpInst::ICMP_EQ:
631  // assume(v = a)
632  if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
633  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
634  KnownBits RHSKnown(BitWidth);
635  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
636  Known.Zero |= RHSKnown.Zero;
637  Known.One |= RHSKnown.One;
638  // assume(v & b = a)
639  } else if (match(Cmp,
640  m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
641  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
642  KnownBits RHSKnown(BitWidth);
643  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
644  KnownBits MaskKnown(BitWidth);
645  computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
646 
647  // For those bits in the mask that are known to be one, we can propagate
648  // known bits from the RHS to V.
649  Known.Zero |= RHSKnown.Zero & MaskKnown.One;
650  Known.One |= RHSKnown.One & MaskKnown.One;
651  // assume(~(v & b) = a)
652  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
653  m_Value(A))) &&
654  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
655  KnownBits RHSKnown(BitWidth);
656  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
657  KnownBits MaskKnown(BitWidth);
658  computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
659 
660  // For those bits in the mask that are known to be one, we can propagate
661  // inverted known bits from the RHS to V.
662  Known.Zero |= RHSKnown.One & MaskKnown.One;
663  Known.One |= RHSKnown.Zero & MaskKnown.One;
664  // assume(v | b = a)
665  } else if (match(Cmp,
666  m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
667  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
668  KnownBits RHSKnown(BitWidth);
669  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
670  KnownBits BKnown(BitWidth);
671  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
672 
673  // For those bits in B that are known to be zero, we can propagate known
674  // bits from the RHS to V.
675  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
676  Known.One |= RHSKnown.One & BKnown.Zero;
677  // assume(~(v | b) = a)
678  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
679  m_Value(A))) &&
680  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
681  KnownBits RHSKnown(BitWidth);
682  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
683  KnownBits BKnown(BitWidth);
684  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
685 
686  // For those bits in B that are known to be zero, we can propagate
687  // inverted known bits from the RHS to V.
688  Known.Zero |= RHSKnown.One & BKnown.Zero;
689  Known.One |= RHSKnown.Zero & BKnown.Zero;
690  // assume(v ^ b = a)
691  } else if (match(Cmp,
692  m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
693  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694  KnownBits RHSKnown(BitWidth);
695  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
696  KnownBits BKnown(BitWidth);
697  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
698 
699  // For those bits in B that are known to be zero, we can propagate known
700  // bits from the RHS to V. For those bits in B that are known to be one,
701  // we can propagate inverted known bits from the RHS to V.
702  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
703  Known.One |= RHSKnown.One & BKnown.Zero;
704  Known.Zero |= RHSKnown.One & BKnown.One;
705  Known.One |= RHSKnown.Zero & BKnown.One;
706  // assume(~(v ^ b) = a)
707  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
708  m_Value(A))) &&
709  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
710  KnownBits RHSKnown(BitWidth);
711  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
712  KnownBits BKnown(BitWidth);
713  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
714 
715  // For those bits in B that are known to be zero, we can propagate
716  // inverted known bits from the RHS to V. For those bits in B that are
717  // known to be one, we can propagate known bits from the RHS to V.
718  Known.Zero |= RHSKnown.One & BKnown.Zero;
719  Known.One |= RHSKnown.Zero & BKnown.Zero;
720  Known.Zero |= RHSKnown.Zero & BKnown.One;
721  Known.One |= RHSKnown.One & BKnown.One;
722  // assume(v << c = a)
723  } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
724  m_Value(A))) &&
725  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
726  KnownBits RHSKnown(BitWidth);
727  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
728  // For those bits in RHS that are known, we can propagate them to known
729  // bits in V shifted to the right by C.
730  RHSKnown.Zero.lshrInPlace(C);
731  Known.Zero |= RHSKnown.Zero;
732  RHSKnown.One.lshrInPlace(C);
733  Known.One |= RHSKnown.One;
734  // assume(~(v << c) = a)
735  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
736  m_Value(A))) &&
737  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
738  KnownBits RHSKnown(BitWidth);
739  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
740  // For those bits in RHS that are known, we can propagate them inverted
741  // to known bits in V shifted to the right by C.
742  RHSKnown.One.lshrInPlace(C);
743  Known.Zero |= RHSKnown.One;
744  RHSKnown.Zero.lshrInPlace(C);
745  Known.One |= RHSKnown.Zero;
746  // assume(v >> c = a)
747  } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
748  m_Value(A))) &&
749  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
750  KnownBits RHSKnown(BitWidth);
751  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
752  // For those bits in RHS that are known, we can propagate them to known
753  // bits in V shifted to the right by C.
754  Known.Zero |= RHSKnown.Zero << C;
755  Known.One |= RHSKnown.One << C;
756  // assume(~(v >> c) = a)
757  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
758  m_Value(A))) &&
759  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
760  KnownBits RHSKnown(BitWidth);
761  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
762  // For those bits in RHS that are known, we can propagate them inverted
763  // to known bits in V shifted to the right by C.
764  Known.Zero |= RHSKnown.One << C;
765  Known.One |= RHSKnown.Zero << C;
766  }
767  break;
768  case ICmpInst::ICMP_SGE:
769  // assume(v >=_s c) where c is non-negative
770  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
771  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
772  KnownBits RHSKnown(BitWidth);
773  computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
774 
775  if (RHSKnown.isNonNegative()) {
776  // We know that the sign bit is zero.
777  Known.makeNonNegative();
778  }
779  }
780  break;
781  case ICmpInst::ICMP_SGT:
782  // assume(v >_s c) where c is at least -1.
783  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
784  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
785  KnownBits RHSKnown(BitWidth);
786  computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
787 
788  if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
789  // We know that the sign bit is zero.
790  Known.makeNonNegative();
791  }
792  }
793  break;
794  case ICmpInst::ICMP_SLE:
795  // assume(v <=_s c) where c is negative
796  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
797  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
798  KnownBits RHSKnown(BitWidth);
799  computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
800 
801  if (RHSKnown.isNegative()) {
802  // We know that the sign bit is one.
803  Known.makeNegative();
804  }
805  }
806  break;
807  case ICmpInst::ICMP_SLT:
808  // assume(v <_s c) where c is non-positive
809  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
810  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
811  KnownBits RHSKnown(BitWidth);
812  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
813 
814  if (RHSKnown.isZero() || RHSKnown.isNegative()) {
815  // We know that the sign bit is one.
816  Known.makeNegative();
817  }
818  }
819  break;
820  case ICmpInst::ICMP_ULE:
821  // assume(v <=_u c)
822  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
823  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
824  KnownBits RHSKnown(BitWidth);
825  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
826 
827  // Whatever high bits in c are zero are known to be zero.
828  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
829  }
830  break;
831  case ICmpInst::ICMP_ULT:
832  // assume(v <_u c)
833  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
834  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
835  KnownBits RHSKnown(BitWidth);
836  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
837 
838  // If the RHS is known zero, then this assumption must be wrong (nothing
839  // is unsigned less than zero). Signal a conflict and get out of here.
840  if (RHSKnown.isZero()) {
841  Known.Zero.setAllBits();
842  Known.One.setAllBits();
843  break;
844  }
845 
846  // Whatever high bits in c are zero are known to be zero (if c is a power
847  // of 2, then one more).
848  if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
849  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
850  else
851  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
852  }
853  break;
854  }
855  }
856 
857  // If assumptions conflict with each other or previous known bits, then we
858  // have a logical fallacy. It's possible that the assumption is not reachable,
859  // so this isn't a real bug. On the other hand, the program may have undefined
860  // behavior, or we might have a bug in the compiler. We can't assert/crash, so
861  // clear out the known bits, try to warn the user, and hope for the best.
862  if (Known.Zero.intersects(Known.One)) {
863  Known.resetAll();
864 
865  if (Q.ORE)
866  Q.ORE->emit([&]() {
867  auto *CxtI = const_cast<Instruction *>(Q.CxtI);
868  return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
869  CxtI)
870  << "Detected conflicting code assumptions. Program may "
871  "have undefined behavior, or compiler may have "
872  "internal error.";
873  });
874  }
875 }
876 
877 /// Compute known bits from a shift operator, including those with a
878 /// non-constant shift amount. Known is the output of this function. Known2 is a
879 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
880 /// operator-specific functions that, given the known-zero or known-one bits
881 /// respectively, and a shift amount, compute the implied known-zero or
882 /// known-one bits of the shift operator's result respectively for that shift
883 /// amount. The results from calling KZF and KOF are conservatively combined for
884 /// all permitted shift amounts.
886  const Operator *I, KnownBits &Known, KnownBits &Known2,
887  unsigned Depth, const Query &Q,
888  function_ref<APInt(const APInt &, unsigned)> KZF,
889  function_ref<APInt(const APInt &, unsigned)> KOF) {
890  unsigned BitWidth = Known.getBitWidth();
891 
892  if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
893  unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
894 
895  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
896  Known.Zero = KZF(Known.Zero, ShiftAmt);
897  Known.One = KOF(Known.One, ShiftAmt);
898  // If the known bits conflict, this must be an overflowing left shift, so
899  // the shift result is poison. We can return anything we want. Choose 0 for
900  // the best folding opportunity.
901  if (Known.hasConflict())
902  Known.setAllZero();
903 
904  return;
905  }
906 
907  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
908 
909  // If the shift amount could be greater than or equal to the bit-width of the
910  // LHS, the value could be poison, but bail out because the check below is
911  // expensive. TODO: Should we just carry on?
912  if ((~Known.Zero).uge(BitWidth)) {
913  Known.resetAll();
914  return;
915  }
916 
917  // Note: We cannot use Known.Zero.getLimitedValue() here, because if
918  // BitWidth > 64 and any upper bits are known, we'll end up returning the
919  // limit value (which implies all bits are known).
920  uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
921  uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
922 
923  // It would be more-clearly correct to use the two temporaries for this
924  // calculation. Reusing the APInts here to prevent unnecessary allocations.
925  Known.resetAll();
926 
927  // If we know the shifter operand is nonzero, we can sometimes infer more
928  // known bits. However this is expensive to compute, so be lazy about it and
929  // only compute it when absolutely necessary.
930  Optional<bool> ShifterOperandIsNonZero;
931 
932  // Early exit if we can't constrain any well-defined shift amount.
933  if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
934  !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
935  ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
936  if (!*ShifterOperandIsNonZero)
937  return;
938  }
939 
940  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
941 
942  Known.Zero.setAllBits();
943  Known.One.setAllBits();
944  for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
945  // Combine the shifted known input bits only for those shift amounts
946  // compatible with its known constraints.
947  if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
948  continue;
949  if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
950  continue;
951  // If we know the shifter is nonzero, we may be able to infer more known
952  // bits. This check is sunk down as far as possible to avoid the expensive
953  // call to isKnownNonZero if the cheaper checks above fail.
954  if (ShiftAmt == 0) {
955  if (!ShifterOperandIsNonZero.hasValue())
956  ShifterOperandIsNonZero =
957  isKnownNonZero(I->getOperand(1), Depth + 1, Q);
958  if (*ShifterOperandIsNonZero)
959  continue;
960  }
961 
962  Known.Zero &= KZF(Known2.Zero, ShiftAmt);
963  Known.One &= KOF(Known2.One, ShiftAmt);
964  }
965 
966  // If the known bits conflict, the result is poison. Return a 0 and hope the
967  // caller can further optimize that.
968  if (Known.hasConflict())
969  Known.setAllZero();
970 }
971 
972 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
973  unsigned Depth, const Query &Q) {
974  unsigned BitWidth = Known.getBitWidth();
975 
976  KnownBits Known2(Known);
977  switch (I->getOpcode()) {
978  default: break;
979  case Instruction::Load:
980  if (MDNode *MD =
981  Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
983  break;
984  case Instruction::And: {
985  // If either the LHS or the RHS are Zero, the result is zero.
986  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
987  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
988 
989  // Output known-1 bits are only known if set in both the LHS & RHS.
990  Known.One &= Known2.One;
991  // Output known-0 are known to be clear if zero in either the LHS | RHS.
992  Known.Zero |= Known2.Zero;
993 
994  // and(x, add (x, -1)) is a common idiom that always clears the low bit;
995  // here we handle the more general case of adding any odd number by
996  // matching the form add(x, add(x, y)) where y is odd.
997  // TODO: This could be generalized to clearing any bit set in y where the
998  // following bit is known to be unset in y.
999  Value *X = nullptr, *Y = nullptr;
1000  if (!Known.Zero[0] && !Known.One[0] &&
1001  match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1002  Known2.resetAll();
1003  computeKnownBits(Y, Known2, Depth + 1, Q);
1004  if (Known2.countMinTrailingOnes() > 0)
1005  Known.Zero.setBit(0);
1006  }
1007  break;
1008  }
1009  case Instruction::Or:
1010  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1011  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1012 
1013  // Output known-0 bits are only known if clear in both the LHS & RHS.
1014  Known.Zero &= Known2.Zero;
1015  // Output known-1 are known to be set if set in either the LHS | RHS.
1016  Known.One |= Known2.One;
1017  break;
1018  case Instruction::Xor: {
1019  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1020  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1021 
1022  // Output known-0 bits are known if clear or set in both the LHS & RHS.
1023  APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1024  // Output known-1 are known to be set if set in only one of the LHS, RHS.
1025  Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1026  Known.Zero = std::move(KnownZeroOut);
1027  break;
1028  }
1029  case Instruction::Mul: {
1030  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1031  computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1032  Known2, Depth, Q);
1033  break;
1034  }
1035  case Instruction::UDiv: {
1036  // For the purposes of computing leading zeros we can conservatively
1037  // treat a udiv as a logical right shift by the power of 2 known to
1038  // be less than the denominator.
1039  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1040  unsigned LeadZ = Known2.countMinLeadingZeros();
1041 
1042  Known2.resetAll();
1043  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1044  unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1045  if (RHSMaxLeadingZeros != BitWidth)
1046  LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1047 
1048  Known.Zero.setHighBits(LeadZ);
1049  break;
1050  }
1051  case Instruction::Select: {
1052  const Value *LHS, *RHS;
1053  SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1055  computeKnownBits(RHS, Known, Depth + 1, Q);
1056  computeKnownBits(LHS, Known2, Depth + 1, Q);
1057  } else {
1058  computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1059  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1060  }
1061 
1062  unsigned MaxHighOnes = 0;
1063  unsigned MaxHighZeros = 0;
1064  if (SPF == SPF_SMAX) {
1065  // If both sides are negative, the result is negative.
1066  if (Known.isNegative() && Known2.isNegative())
1067  // We can derive a lower bound on the result by taking the max of the
1068  // leading one bits.
1069  MaxHighOnes =
1071  // If either side is non-negative, the result is non-negative.
1072  else if (Known.isNonNegative() || Known2.isNonNegative())
1073  MaxHighZeros = 1;
1074  } else if (SPF == SPF_SMIN) {
1075  // If both sides are non-negative, the result is non-negative.
1076  if (Known.isNonNegative() && Known2.isNonNegative())
1077  // We can derive an upper bound on the result by taking the max of the
1078  // leading zero bits.
1079  MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1080  Known2.countMinLeadingZeros());
1081  // If either side is negative, the result is negative.
1082  else if (Known.isNegative() || Known2.isNegative())
1083  MaxHighOnes = 1;
1084  } else if (SPF == SPF_UMAX) {
1085  // We can derive a lower bound on the result by taking the max of the
1086  // leading one bits.
1087  MaxHighOnes =
1089  } else if (SPF == SPF_UMIN) {
1090  // We can derive an upper bound on the result by taking the max of the
1091  // leading zero bits.
1092  MaxHighZeros =
1094  } else if (SPF == SPF_ABS) {
1095  // RHS from matchSelectPattern returns the negation part of abs pattern.
1096  // If the negate has an NSW flag we can assume the sign bit of the result
1097  // will be 0 because that makes abs(INT_MIN) undefined.
1098  if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1099  MaxHighZeros = 1;
1100  }
1101 
1102  // Only known if known in both the LHS and RHS.
1103  Known.One &= Known2.One;
1104  Known.Zero &= Known2.Zero;
1105  if (MaxHighOnes > 0)
1106  Known.One.setHighBits(MaxHighOnes);
1107  if (MaxHighZeros > 0)
1108  Known.Zero.setHighBits(MaxHighZeros);
1109  break;
1110  }
1111  case Instruction::FPTrunc:
1112  case Instruction::FPExt:
1113  case Instruction::FPToUI:
1114  case Instruction::FPToSI:
1115  case Instruction::SIToFP:
1116  case Instruction::UIToFP:
1117  break; // Can't work with floating point.
1118  case Instruction::PtrToInt:
1119  case Instruction::IntToPtr:
1120  // Fall through and handle them the same as zext/trunc.
1122  case Instruction::ZExt:
1123  case Instruction::Trunc: {
1124  Type *SrcTy = I->getOperand(0)->getType();
1125 
1126  unsigned SrcBitWidth;
1127  // Note that we handle pointer operands here because of inttoptr/ptrtoint
1128  // which fall through here.
1129  Type *ScalarTy = SrcTy->getScalarType();
1130  SrcBitWidth = ScalarTy->isPointerTy() ?
1131  Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1132  Q.DL.getTypeSizeInBits(ScalarTy);
1133 
1134  assert(SrcBitWidth && "SrcBitWidth can't be zero");
1135  Known = Known.zextOrTrunc(SrcBitWidth, false);
1136  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1137  Known = Known.zextOrTrunc(BitWidth, true /* ExtendedBitsAreKnownZero */);
1138  break;
1139  }
1140  case Instruction::BitCast: {
1141  Type *SrcTy = I->getOperand(0)->getType();
1142  if (SrcTy->isIntOrPtrTy() &&
1143  // TODO: For now, not handling conversions like:
1144  // (bitcast i64 %x to <2 x i32>)
1145  !I->getType()->isVectorTy()) {
1146  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1147  break;
1148  }
1149  break;
1150  }
1151  case Instruction::SExt: {
1152  // Compute the bits in the result that are not present in the input.
1153  unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1154 
1155  Known = Known.trunc(SrcBitWidth);
1156  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1157  // If the sign bit of the input is known set or clear, then we know the
1158  // top bits of the result.
1159  Known = Known.sext(BitWidth);
1160  break;
1161  }
1162  case Instruction::Shl: {
1163  // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1164  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1165  auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1166  APInt KZResult = KnownZero << ShiftAmt;
1167  KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1168  // If this shift has "nsw" keyword, then the result is either a poison
1169  // value or has the same sign bit as the first operand.
1170  if (NSW && KnownZero.isSignBitSet())
1171  KZResult.setSignBit();
1172  return KZResult;
1173  };
1174 
1175  auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1176  APInt KOResult = KnownOne << ShiftAmt;
1177  if (NSW && KnownOne.isSignBitSet())
1178  KOResult.setSignBit();
1179  return KOResult;
1180  };
1181 
1182  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1183  break;
1184  }
1185  case Instruction::LShr: {
1186  // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1187  auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1188  APInt KZResult = KnownZero.lshr(ShiftAmt);
1189  // High bits known zero.
1190  KZResult.setHighBits(ShiftAmt);
1191  return KZResult;
1192  };
1193 
1194  auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1195  return KnownOne.lshr(ShiftAmt);
1196  };
1197 
1198  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1199  break;
1200  }
1201  case Instruction::AShr: {
1202  // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1203  auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1204  return KnownZero.ashr(ShiftAmt);
1205  };
1206 
1207  auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1208  return KnownOne.ashr(ShiftAmt);
1209  };
1210 
1211  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1212  break;
1213  }
1214  case Instruction::Sub: {
1215  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1216  computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1217  Known, Known2, Depth, Q);
1218  break;
1219  }
1220  case Instruction::Add: {
1221  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1222  computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1223  Known, Known2, Depth, Q);
1224  break;
1225  }
1226  case Instruction::SRem:
1227  if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1228  APInt RA = Rem->getValue().abs();
1229  if (RA.isPowerOf2()) {
1230  APInt LowBits = RA - 1;
1231  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1232 
1233  // The low bits of the first operand are unchanged by the srem.
1234  Known.Zero = Known2.Zero & LowBits;
1235  Known.One = Known2.One & LowBits;
1236 
1237  // If the first operand is non-negative or has all low bits zero, then
1238  // the upper bits are all zero.
1239  if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1240  Known.Zero |= ~LowBits;
1241 
1242  // If the first operand is negative and not all low bits are zero, then
1243  // the upper bits are all one.
1244  if (Known2.isNegative() && LowBits.intersects(Known2.One))
1245  Known.One |= ~LowBits;
1246 
1247  assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1248  break;
1249  }
1250  }
1251 
1252  // The sign bit is the LHS's sign bit, except when the result of the
1253  // remainder is zero.
1254  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1255  // If it's known zero, our sign bit is also zero.
1256  if (Known2.isNonNegative())
1257  Known.makeNonNegative();
1258 
1259  break;
1260  case Instruction::URem: {
1261  if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1262  const APInt &RA = Rem->getValue();
1263  if (RA.isPowerOf2()) {
1264  APInt LowBits = (RA - 1);
1265  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1266  Known.Zero |= ~LowBits;
1267  Known.One &= LowBits;
1268  break;
1269  }
1270  }
1271 
1272  // Since the result is less than or equal to either operand, any leading
1273  // zero bits in either operand must also exist in the result.
1274  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1275  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1276 
1277  unsigned Leaders =
1279  Known.resetAll();
1280  Known.Zero.setHighBits(Leaders);
1281  break;
1282  }
1283 
1284  case Instruction::Alloca: {
1285  const AllocaInst *AI = cast<AllocaInst>(I);
1286  unsigned Align = AI->getAlignment();
1287  if (Align == 0)
1288  Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1289 
1290  if (Align > 0)
1291  Known.Zero.setLowBits(countTrailingZeros(Align));
1292  break;
1293  }
1294  case Instruction::GetElementPtr: {
1295  // Analyze all of the subscripts of this getelementptr instruction
1296  // to determine if we can prove known low zero bits.
1297  KnownBits LocalKnown(BitWidth);
1298  computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1299  unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1300 
1302  for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1303  Value *Index = I->getOperand(i);
1304  if (StructType *STy = GTI.getStructTypeOrNull()) {
1305  // Handle struct member offset arithmetic.
1306 
1307  // Handle case when index is vector zeroinitializer
1308  Constant *CIndex = cast<Constant>(Index);
1309  if (CIndex->isZeroValue())
1310  continue;
1311 
1312  if (CIndex->getType()->isVectorTy())
1313  Index = CIndex->getSplatValue();
1314 
1315  unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1316  const StructLayout *SL = Q.DL.getStructLayout(STy);
1317  uint64_t Offset = SL->getElementOffset(Idx);
1318  TrailZ = std::min<unsigned>(TrailZ,
1319  countTrailingZeros(Offset));
1320  } else {
1321  // Handle array index arithmetic.
1322  Type *IndexedTy = GTI.getIndexedType();
1323  if (!IndexedTy->isSized()) {
1324  TrailZ = 0;
1325  break;
1326  }
1327  unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1328  uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1329  LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1330  computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1331  TrailZ = std::min(TrailZ,
1332  unsigned(countTrailingZeros(TypeSize) +
1333  LocalKnown.countMinTrailingZeros()));
1334  }
1335  }
1336 
1337  Known.Zero.setLowBits(TrailZ);
1338  break;
1339  }
1340  case Instruction::PHI: {
1341  const PHINode *P = cast<PHINode>(I);
1342  // Handle the case of a simple two-predecessor recurrence PHI.
1343  // There's a lot more that could theoretically be done here, but
1344  // this is sufficient to catch some interesting cases.
1345  if (P->getNumIncomingValues() == 2) {
1346  for (unsigned i = 0; i != 2; ++i) {
1347  Value *L = P->getIncomingValue(i);
1348  Value *R = P->getIncomingValue(!i);
1349  Operator *LU = dyn_cast<Operator>(L);
1350  if (!LU)
1351  continue;
1352  unsigned Opcode = LU->getOpcode();
1353  // Check for operations that have the property that if
1354  // both their operands have low zero bits, the result
1355  // will have low zero bits.
1356  if (Opcode == Instruction::Add ||
1357  Opcode == Instruction::Sub ||
1358  Opcode == Instruction::And ||
1359  Opcode == Instruction::Or ||
1360  Opcode == Instruction::Mul) {
1361  Value *LL = LU->getOperand(0);
1362  Value *LR = LU->getOperand(1);
1363  // Find a recurrence.
1364  if (LL == I)
1365  L = LR;
1366  else if (LR == I)
1367  L = LL;
1368  else
1369  break;
1370  // Ok, we have a PHI of the form L op= R. Check for low
1371  // zero bits.
1372  computeKnownBits(R, Known2, Depth + 1, Q);
1373 
1374  // We need to take the minimum number of known bits
1375  KnownBits Known3(Known);
1376  computeKnownBits(L, Known3, Depth + 1, Q);
1377 
1378  Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1379  Known3.countMinTrailingZeros()));
1380 
1381  auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1382  if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1383  // If initial value of recurrence is nonnegative, and we are adding
1384  // a nonnegative number with nsw, the result can only be nonnegative
1385  // or poison value regardless of the number of times we execute the
1386  // add in phi recurrence. If initial value is negative and we are
1387  // adding a negative number with nsw, the result can only be
1388  // negative or poison value. Similar arguments apply to sub and mul.
1389  //
1390  // (add non-negative, non-negative) --> non-negative
1391  // (add negative, negative) --> negative
1392  if (Opcode == Instruction::Add) {
1393  if (Known2.isNonNegative() && Known3.isNonNegative())
1394  Known.makeNonNegative();
1395  else if (Known2.isNegative() && Known3.isNegative())
1396  Known.makeNegative();
1397  }
1398 
1399  // (sub nsw non-negative, negative) --> non-negative
1400  // (sub nsw negative, non-negative) --> negative
1401  else if (Opcode == Instruction::Sub && LL == I) {
1402  if (Known2.isNonNegative() && Known3.isNegative())
1403  Known.makeNonNegative();
1404  else if (Known2.isNegative() && Known3.isNonNegative())
1405  Known.makeNegative();
1406  }
1407 
1408  // (mul nsw non-negative, non-negative) --> non-negative
1409  else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1410  Known3.isNonNegative())
1411  Known.makeNonNegative();
1412  }
1413 
1414  break;
1415  }
1416  }
1417  }
1418 
1419  // Unreachable blocks may have zero-operand PHI nodes.
1420  if (P->getNumIncomingValues() == 0)
1421  break;
1422 
1423  // Otherwise take the unions of the known bit sets of the operands,
1424  // taking conservative care to avoid excessive recursion.
1425  if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1426  // Skip if every incoming value references to ourself.
1427  if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1428  break;
1429 
1430  Known.Zero.setAllBits();
1431  Known.One.setAllBits();
1432  for (Value *IncValue : P->incoming_values()) {
1433  // Skip direct self references.
1434  if (IncValue == P) continue;
1435 
1436  Known2 = KnownBits(BitWidth);
1437  // Recurse, but cap the recursion to one level, because we don't
1438  // want to waste time spinning around in loops.
1439  computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1440  Known.Zero &= Known2.Zero;
1441  Known.One &= Known2.One;
1442  // If all bits have been ruled out, there's no need to check
1443  // more operands.
1444  if (!Known.Zero && !Known.One)
1445  break;
1446  }
1447  }
1448  break;
1449  }
1450  case Instruction::Call:
1451  case Instruction::Invoke:
1452  // If range metadata is attached to this call, set known bits from that,
1453  // and then intersect with known bits based on other properties of the
1454  // function.
1455  if (MDNode *MD =
1456  Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1458  if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1459  computeKnownBits(RV, Known2, Depth + 1, Q);
1460  Known.Zero |= Known2.Zero;
1461  Known.One |= Known2.One;
1462  }
1463  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1464  switch (II->getIntrinsicID()) {
1465  default: break;
1466  case Intrinsic::bitreverse:
1467  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1468  Known.Zero |= Known2.Zero.reverseBits();
1469  Known.One |= Known2.One.reverseBits();
1470  break;
1471  case Intrinsic::bswap:
1472  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1473  Known.Zero |= Known2.Zero.byteSwap();
1474  Known.One |= Known2.One.byteSwap();
1475  break;
1476  case Intrinsic::ctlz: {
1477  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1478  // If we have a known 1, its position is our upper bound.
1479  unsigned PossibleLZ = Known2.One.countLeadingZeros();
1480  // If this call is undefined for 0, the result will be less than 2^n.
1481  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1482  PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1483  unsigned LowBits = Log2_32(PossibleLZ)+1;
1484  Known.Zero.setBitsFrom(LowBits);
1485  break;
1486  }
1487  case Intrinsic::cttz: {
1488  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1489  // If we have a known 1, its position is our upper bound.
1490  unsigned PossibleTZ = Known2.One.countTrailingZeros();
1491  // If this call is undefined for 0, the result will be less than 2^n.
1492  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1493  PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1494  unsigned LowBits = Log2_32(PossibleTZ)+1;
1495  Known.Zero.setBitsFrom(LowBits);
1496  break;
1497  }
1498  case Intrinsic::ctpop: {
1499  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1500  // We can bound the space the count needs. Also, bits known to be zero
1501  // can't contribute to the population.
1502  unsigned BitsPossiblySet = Known2.countMaxPopulation();
1503  unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1504  Known.Zero.setBitsFrom(LowBits);
1505  // TODO: we could bound KnownOne using the lower bound on the number
1506  // of bits which might be set provided by popcnt KnownOne2.
1507  break;
1508  }
1509  case Intrinsic::fshr:
1510  case Intrinsic::fshl: {
1511  const APInt *SA;
1512  if (!match(I->getOperand(2), m_APInt(SA)))
1513  break;
1514 
1515  // Normalize to funnel shift left.
1516  uint64_t ShiftAmt = SA->urem(BitWidth);
1517  if (II->getIntrinsicID() == Intrinsic::fshr)
1518  ShiftAmt = BitWidth - ShiftAmt;
1519 
1520  KnownBits Known3(Known);
1521  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1522  computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1523 
1524  Known.Zero =
1525  Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1526  Known.One =
1527  Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1528  break;
1529  }
1530  case Intrinsic::uadd_sat:
1531  case Intrinsic::usub_sat: {
1532  bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1533  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1534  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1535 
1536  // Add: Leading ones of either operand are preserved.
1537  // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1538  // as leading zeros in the result.
1539  unsigned LeadingKnown;
1540  if (IsAdd)
1541  LeadingKnown = std::max(Known.countMinLeadingOnes(),
1542  Known2.countMinLeadingOnes());
1543  else
1544  LeadingKnown = std::max(Known.countMinLeadingZeros(),
1545  Known2.countMinLeadingOnes());
1546 
1548  IsAdd, /* NSW */ false, Known, Known2);
1549 
1550  // We select between the operation result and all-ones/zero
1551  // respectively, so we can preserve known ones/zeros.
1552  if (IsAdd) {
1553  Known.One.setHighBits(LeadingKnown);
1554  Known.Zero.clearAllBits();
1555  } else {
1556  Known.Zero.setHighBits(LeadingKnown);
1557  Known.One.clearAllBits();
1558  }
1559  break;
1560  }
1561  case Intrinsic::x86_sse42_crc32_64_64:
1562  Known.Zero.setBitsFrom(32);
1563  break;
1564  }
1565  }
1566  break;
1567  case Instruction::ExtractElement:
1568  // Look through extract element. At the moment we keep this simple and skip
1569  // tracking the specific element. But at least we might find information
1570  // valid for all elements of the vector (for example if vector is sign
1571  // extended, shifted, etc).
1572  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1573  break;
1574  case Instruction::ExtractValue:
1575  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1576  const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1577  if (EVI->getNumIndices() != 1) break;
1578  if (EVI->getIndices()[0] == 0) {
1579  switch (II->getIntrinsicID()) {
1580  default: break;
1581  case Intrinsic::uadd_with_overflow:
1582  case Intrinsic::sadd_with_overflow:
1583  computeKnownBitsAddSub(true, II->getArgOperand(0),
1584  II->getArgOperand(1), false, Known, Known2,
1585  Depth, Q);
1586  break;
1587  case Intrinsic::usub_with_overflow:
1588  case Intrinsic::ssub_with_overflow:
1589  computeKnownBitsAddSub(false, II->getArgOperand(0),
1590  II->getArgOperand(1), false, Known, Known2,
1591  Depth, Q);
1592  break;
1593  case Intrinsic::umul_with_overflow:
1594  case Intrinsic::smul_with_overflow:
1595  computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1596  Known, Known2, Depth, Q);
1597  break;
1598  }
1599  }
1600  }
1601  }
1602 }
1603 
1604 /// Determine which bits of V are known to be either zero or one and return
1605 /// them.
1606 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1607  KnownBits Known(getBitWidth(V->getType(), Q.DL));
1608  computeKnownBits(V, Known, Depth, Q);
1609  return Known;
1610 }
1611 
1612 /// Determine which bits of V are known to be either zero or one and return
1613 /// them in the Known bit set.
1614 ///
1615 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1616 /// we cannot optimize based on the assumption that it is zero without changing
1617 /// it to be an explicit zero. If we don't change it to zero, other code could
1618 /// optimized based on the contradictory assumption that it is non-zero.
1619 /// Because instcombine aggressively folds operations with undef args anyway,
1620 /// this won't lose us code quality.
1621 ///
1622 /// This function is defined on values with integer type, values with pointer
1623 /// type, and vectors of integers. In the case
1624 /// where V is a vector, known zero, and known one values are the
1625 /// same width as the vector element, and the bit is set only if it is true
1626 /// for all of the elements in the vector.
1627 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1628  const Query &Q) {
1629  assert(V && "No Value?");
1630  assert(Depth <= MaxDepth && "Limit Search Depth");
1631  unsigned BitWidth = Known.getBitWidth();
1632 
1633  assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1634  V->getType()->isPtrOrPtrVectorTy()) &&
1635  "Not integer or pointer type!");
1636 
1637  Type *ScalarTy = V->getType()->getScalarType();
1638  unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1639  Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1640  assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
1641  (void)BitWidth;
1642  (void)ExpectedWidth;
1643 
1644  const APInt *C;
1645  if (match(V, m_APInt(C))) {
1646  // We know all of the bits for a scalar constant or a splat vector constant!
1647  Known.One = *C;
1648  Known.Zero = ~Known.One;
1649  return;
1650  }
1651  // Null and aggregate-zero are all-zeros.
1652  if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1653  Known.setAllZero();
1654  return;
1655  }
1656  // Handle a constant vector by taking the intersection of the known bits of
1657  // each element.
1658  if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1659  // We know that CDS must be a vector of integers. Take the intersection of
1660  // each element.
1661  Known.Zero.setAllBits(); Known.One.setAllBits();
1662  for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1663  APInt Elt = CDS->getElementAsAPInt(i);
1664  Known.Zero &= ~Elt;
1665  Known.One &= Elt;
1666  }
1667  return;
1668  }
1669 
1670  if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1671  // We know that CV must be a vector of integers. Take the intersection of
1672  // each element.
1673  Known.Zero.setAllBits(); Known.One.setAllBits();
1674  for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1675  Constant *Element = CV->getAggregateElement(i);
1676  auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1677  if (!ElementCI) {
1678  Known.resetAll();
1679  return;
1680  }
1681  const APInt &Elt = ElementCI->getValue();
1682  Known.Zero &= ~Elt;
1683  Known.One &= Elt;
1684  }
1685  return;
1686  }
1687 
1688  // Start out not knowing anything.
1689  Known.resetAll();
1690 
1691  // We can't imply anything about undefs.
1692  if (isa<UndefValue>(V))
1693  return;
1694 
1695  // There's no point in looking through other users of ConstantData for
1696  // assumptions. Confirm that we've handled them all.
1697  assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1698 
1699  // Limit search depth.
1700  // All recursive calls that increase depth must come after this.
1701  if (Depth == MaxDepth)
1702  return;
1703 
1704  // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1705  // the bits of its aliasee.
1706  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1707  if (!GA->isInterposable())
1708  computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1709  return;
1710  }
1711 
1712  if (const Operator *I = dyn_cast<Operator>(V))
1713  computeKnownBitsFromOperator(I, Known, Depth, Q);
1714 
1715  // Aligned pointers have trailing zeros - refine Known.Zero set
1716  if (V->getType()->isPointerTy()) {
1717  unsigned Align = V->getPointerAlignment(Q.DL);
1718  if (Align)
1719  Known.Zero.setLowBits(countTrailingZeros(Align));
1720  }
1721 
1722  // computeKnownBitsFromAssume strictly refines Known.
1723  // Therefore, we run them after computeKnownBitsFromOperator.
1724 
1725  // Check whether a nearby assume intrinsic can determine some known bits.
1726  computeKnownBitsFromAssume(V, Known, Depth, Q);
1727 
1728  assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1729 }
1730 
1731 /// Return true if the given value is known to have exactly one
1732 /// bit set when defined. For vectors return true if every element is known to
1733 /// be a power of two when defined. Supports values with integer or pointer
1734 /// types and vectors of integers.
1735 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1736  const Query &Q) {
1737  assert(Depth <= MaxDepth && "Limit Search Depth");
1738 
1739  // Attempt to match against constants.
1740  if (OrZero && match(V, m_Power2OrZero()))
1741  return true;
1742  if (match(V, m_Power2()))
1743  return true;
1744 
1745  // 1 << X is clearly a power of two if the one is not shifted off the end. If
1746  // it is shifted off the end then the result is undefined.
1747  if (match(V, m_Shl(m_One(), m_Value())))
1748  return true;
1749 
1750  // (signmask) >>l X is clearly a power of two if the one is not shifted off
1751  // the bottom. If it is shifted off the bottom then the result is undefined.
1752  if (match(V, m_LShr(m_SignMask(), m_Value())))
1753  return true;
1754 
1755  // The remaining tests are all recursive, so bail out if we hit the limit.
1756  if (Depth++ == MaxDepth)
1757  return false;
1758 
1759  Value *X = nullptr, *Y = nullptr;
1760  // A shift left or a logical shift right of a power of two is a power of two
1761  // or zero.
1762  if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1763  match(V, m_LShr(m_Value(X), m_Value()))))
1764  return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1765 
1766  if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1767  return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1768 
1769  if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1770  return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1771  isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1772 
1773  if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1774  // A power of two and'd with anything is a power of two or zero.
1775  if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1776  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1777  return true;
1778  // X & (-X) is always a power of two or zero.
1779  if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1780  return true;
1781  return false;
1782  }
1783 
1784  // Adding a power-of-two or zero to the same power-of-two or zero yields
1785  // either the original power-of-two, a larger power-of-two or zero.
1786  if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1787  const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1788  if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1789  Q.IIQ.hasNoSignedWrap(VOBO)) {
1790  if (match(X, m_And(m_Specific(Y), m_Value())) ||
1791  match(X, m_And(m_Value(), m_Specific(Y))))
1792  if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1793  return true;
1794  if (match(Y, m_And(m_Specific(X), m_Value())) ||
1795  match(Y, m_And(m_Value(), m_Specific(X))))
1796  if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1797  return true;
1798 
1799  unsigned BitWidth = V->getType()->getScalarSizeInBits();
1800  KnownBits LHSBits(BitWidth);
1801  computeKnownBits(X, LHSBits, Depth, Q);
1802 
1803  KnownBits RHSBits(BitWidth);
1804  computeKnownBits(Y, RHSBits, Depth, Q);
1805  // If i8 V is a power of two or zero:
1806  // ZeroBits: 1 1 1 0 1 1 1 1
1807  // ~ZeroBits: 0 0 0 1 0 0 0 0
1808  if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1809  // If OrZero isn't set, we cannot give back a zero result.
1810  // Make sure either the LHS or RHS has a bit set.
1811  if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1812  return true;
1813  }
1814  }
1815 
1816  // An exact divide or right shift can only shift off zero bits, so the result
1817  // is a power of two only if the first operand is a power of two and not
1818  // copying a sign bit (sdiv int_min, 2).
1819  if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1820  match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1821  return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1822  Depth, Q);
1823  }
1824 
1825  return false;
1826 }
1827 
1828 /// Test whether a GEP's result is known to be non-null.
1829 ///
1830 /// Uses properties inherent in a GEP to try to determine whether it is known
1831 /// to be non-null.
1832 ///
1833 /// Currently this routine does not support vector GEPs.
1834 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1835  const Query &Q) {
1836  const Function *F = nullptr;
1837  if (const Instruction *I = dyn_cast<Instruction>(GEP))
1838  F = I->getFunction();
1839 
1840  if (!GEP->isInBounds() ||
1842  return false;
1843 
1844  // FIXME: Support vector-GEPs.
1845  assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1846 
1847  // If the base pointer is non-null, we cannot walk to a null address with an
1848  // inbounds GEP in address space zero.
1849  if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1850  return true;
1851 
1852  // Walk the GEP operands and see if any operand introduces a non-zero offset.
1853  // If so, then the GEP cannot produce a null pointer, as doing so would
1854  // inherently violate the inbounds contract within address space zero.
1855  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1856  GTI != GTE; ++GTI) {
1857  // Struct types are easy -- they must always be indexed by a constant.
1858  if (StructType *STy = GTI.getStructTypeOrNull()) {
1859  ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1860  unsigned ElementIdx = OpC->getZExtValue();
1861  const StructLayout *SL = Q.DL.getStructLayout(STy);
1862  uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1863  if (ElementOffset > 0)
1864  return true;
1865  continue;
1866  }
1867 
1868  // If we have a zero-sized type, the index doesn't matter. Keep looping.
1869  if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1870  continue;
1871 
1872  // Fast path the constant operand case both for efficiency and so we don't
1873  // increment Depth when just zipping down an all-constant GEP.
1874  if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1875  if (!OpC->isZero())
1876  return true;
1877  continue;
1878  }
1879 
1880  // We post-increment Depth here because while isKnownNonZero increments it
1881  // as well, when we pop back up that increment won't persist. We don't want
1882  // to recurse 10k times just because we have 10k GEP operands. We don't
1883  // bail completely out because we want to handle constant GEPs regardless
1884  // of depth.
1885  if (Depth++ >= MaxDepth)
1886  continue;
1887 
1888  if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1889  return true;
1890  }
1891 
1892  return false;
1893 }
1894 
1896  const Instruction *CtxI,
1897  const DominatorTree *DT) {
1898  assert(V->getType()->isPointerTy() && "V must be pointer type");
1899  assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1900 
1901  if (!CtxI || !DT)
1902  return false;
1903 
1904  unsigned NumUsesExplored = 0;
1905  for (auto *U : V->users()) {
1906  // Avoid massive lists
1907  if (NumUsesExplored >= DomConditionsMaxUses)
1908  break;
1909  NumUsesExplored++;
1910 
1911  // If the value is used as an argument to a call or invoke, then argument
1912  // attributes may provide an answer about null-ness.
1913  if (auto CS = ImmutableCallSite(U))
1914  if (auto *CalledFunc = CS.getCalledFunction())
1915  for (const Argument &Arg : CalledFunc->args())
1916  if (CS.getArgOperand(Arg.getArgNo()) == V &&
1917  Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1918  return true;
1919 
1920  // Consider only compare instructions uniquely controlling a branch
1921  CmpInst::Predicate Pred;
1922  if (!match(const_cast<User *>(U),
1923  m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1924  (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1925  continue;
1926 
1929  for (auto *CmpU : U->users()) {
1930  assert(WorkList.empty() && "Should be!");
1931  if (Visited.insert(CmpU).second)
1932  WorkList.push_back(CmpU);
1933 
1934  while (!WorkList.empty()) {
1935  auto *Curr = WorkList.pop_back_val();
1936 
1937  // If a user is an AND, add all its users to the work list. We only
1938  // propagate "pred != null" condition through AND because it is only
1939  // correct to assume that all conditions of AND are met in true branch.
1940  // TODO: Support similar logic of OR and EQ predicate?
1941  if (Pred == ICmpInst::ICMP_NE)
1942  if (auto *BO = dyn_cast<BinaryOperator>(Curr))
1943  if (BO->getOpcode() == Instruction::And) {
1944  for (auto *BOU : BO->users())
1945  if (Visited.insert(BOU).second)
1946  WorkList.push_back(BOU);
1947  continue;
1948  }
1949 
1950  if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
1951  assert(BI->isConditional() && "uses a comparison!");
1952 
1953  BasicBlock *NonNullSuccessor =
1954  BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1955  BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1956  if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1957  return true;
1958  } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
1959  DT->dominates(cast<Instruction>(Curr), CtxI)) {
1960  return true;
1961  }
1962  }
1963  }
1964  }
1965 
1966  return false;
1967 }
1968 
1969 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1970 /// ensure that the value it's attached to is never Value? 'RangeType' is
1971 /// is the type of the value described by the range.
1972 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1973  const unsigned NumRanges = Ranges->getNumOperands() / 2;
1974  assert(NumRanges >= 1);
1975  for (unsigned i = 0; i < NumRanges; ++i) {
1976  ConstantInt *Lower =
1977  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1978  ConstantInt *Upper =
1979  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1980  ConstantRange Range(Lower->getValue(), Upper->getValue());
1981  if (Range.contains(Value))
1982  return false;
1983  }
1984  return true;
1985 }
1986 
1987 /// Return true if the given value is known to be non-zero when defined. For
1988 /// vectors, return true if every element is known to be non-zero when
1989 /// defined. For pointers, if the context instruction and dominator tree are
1990 /// specified, perform context-sensitive analysis and return true if the
1991 /// pointer couldn't possibly be null at the specified instruction.
1992 /// Supports values with integer or pointer type and vectors of integers.
1993 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1994  if (auto *C = dyn_cast<Constant>(V)) {
1995  if (C->isNullValue())
1996  return false;
1997  if (isa<ConstantInt>(C))
1998  // Must be non-zero due to null test above.
1999  return true;
2000 
2001  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2002  // See the comment for IntToPtr/PtrToInt instructions below.
2003  if (CE->getOpcode() == Instruction::IntToPtr ||
2004  CE->getOpcode() == Instruction::PtrToInt)
2005  if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <=
2006  Q.DL.getTypeSizeInBits(CE->getType()))
2007  return isKnownNonZero(CE->getOperand(0), Depth, Q);
2008  }
2009 
2010  // For constant vectors, check that all elements are undefined or known
2011  // non-zero to determine that the whole vector is known non-zero.
2012  if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
2013  for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2014  Constant *Elt = C->getAggregateElement(i);
2015  if (!Elt || Elt->isNullValue())
2016  return false;
2017  if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2018  return false;
2019  }
2020  return true;
2021  }
2022 
2023  // A global variable in address space 0 is non null unless extern weak
2024  // or an absolute symbol reference. Other address spaces may have null as a
2025  // valid address for a global, so we can't assume anything.
2026  if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2027  if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2028  GV->getType()->getAddressSpace() == 0)
2029  return true;
2030  } else
2031  return false;
2032  }
2033 
2034  if (auto *I = dyn_cast<Instruction>(V)) {
2035  if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2036  // If the possible ranges don't contain zero, then the value is
2037  // definitely non-zero.
2038  if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2039  const APInt ZeroValue(Ty->getBitWidth(), 0);
2040  if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2041  return true;
2042  }
2043  }
2044  }
2045 
2046  // Some of the tests below are recursive, so bail out if we hit the limit.
2047  if (Depth++ >= MaxDepth)
2048  return false;
2049 
2050  // Check for pointer simplifications.
2051  if (V->getType()->isPointerTy()) {
2052  // Alloca never returns null, malloc might.
2053  if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2054  return true;
2055 
2056  // A byval, inalloca, or nonnull argument is never null.
2057  if (const Argument *A = dyn_cast<Argument>(V))
2058  if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
2059  return true;
2060 
2061  // A Load tagged with nonnull metadata is never null.
2062  if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2063  if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2064  return true;
2065 
2066  if (const auto *Call = dyn_cast<CallBase>(V)) {
2067  if (Call->isReturnNonNull())
2068  return true;
2069  if (const auto *RP = getArgumentAliasingToReturnedPointer(Call))
2070  return isKnownNonZero(RP, Depth, Q);
2071  }
2072  }
2073 
2074 
2075  // Check for recursive pointer simplifications.
2076  if (V->getType()->isPointerTy()) {
2077  if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2078  return true;
2079 
2080  // Look through bitcast operations, GEPs, and int2ptr instructions as they
2081  // do not alter the value, or at least not the nullness property of the
2082  // value, e.g., int2ptr is allowed to zero/sign extend the value.
2083  //
2084  // Note that we have to take special care to avoid looking through
2085  // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2086  // as casts that can alter the value, e.g., AddrSpaceCasts.
2087  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2088  if (isGEPKnownNonNull(GEP, Depth, Q))
2089  return true;
2090 
2091  if (auto *BCO = dyn_cast<BitCastOperator>(V))
2092  return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2093 
2094  if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2095  if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2096  Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2097  return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2098  }
2099 
2100  // Similar to int2ptr above, we can look through ptr2int here if the cast
2101  // is a no-op or an extend and not a truncate.
2102  if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2103  if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2104  Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2105  return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2106 
2107  unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2108 
2109  // X | Y != 0 if X != 0 or Y != 0.
2110  Value *X = nullptr, *Y = nullptr;
2111  if (match(V, m_Or(m_Value(X), m_Value(Y))))
2112  return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
2113 
2114  // ext X != 0 if X != 0.
2115  if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2116  return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2117 
2118  // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2119  // if the lowest bit is shifted off the end.
2120  if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2121  // shl nuw can't remove any non-zero bits.
2122  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2123  if (Q.IIQ.hasNoUnsignedWrap(BO))
2124  return isKnownNonZero(X, Depth, Q);
2125 
2126  KnownBits Known(BitWidth);
2127  computeKnownBits(X, Known, Depth, Q);
2128  if (Known.One[0])
2129  return true;
2130  }
2131  // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2132  // defined if the sign bit is shifted off the end.
2133  else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2134  // shr exact can only shift out zero bits.
2135  const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2136  if (BO->isExact())
2137  return isKnownNonZero(X, Depth, Q);
2138 
2139  KnownBits Known = computeKnownBits(X, Depth, Q);
2140  if (Known.isNegative())
2141  return true;
2142 
2143  // If the shifter operand is a constant, and all of the bits shifted
2144  // out are known to be zero, and X is known non-zero then at least one
2145  // non-zero bit must remain.
2146  if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2147  auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2148  // Is there a known one in the portion not shifted out?
2149  if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2150  return true;
2151  // Are all the bits to be shifted out known zero?
2152  if (Known.countMinTrailingZeros() >= ShiftVal)
2153  return isKnownNonZero(X, Depth, Q);
2154  }
2155  }
2156  // div exact can only produce a zero if the dividend is zero.
2157  else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2158  return isKnownNonZero(X, Depth, Q);
2159  }
2160  // X + Y.
2161  else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2162  KnownBits XKnown = computeKnownBits(X, Depth, Q);
2163  KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2164 
2165  // If X and Y are both non-negative (as signed values) then their sum is not
2166  // zero unless both X and Y are zero.
2167  if (XKnown.isNonNegative() && YKnown.isNonNegative())
2168  if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2169  return true;
2170 
2171  // If X and Y are both negative (as signed values) then their sum is not
2172  // zero unless both X and Y equal INT_MIN.
2173  if (XKnown.isNegative() && YKnown.isNegative()) {
2174  APInt Mask = APInt::getSignedMaxValue(BitWidth);
2175  // The sign bit of X is set. If some other bit is set then X is not equal
2176  // to INT_MIN.
2177  if (XKnown.One.intersects(Mask))
2178  return true;
2179  // The sign bit of Y is set. If some other bit is set then Y is not equal
2180  // to INT_MIN.
2181  if (YKnown.One.intersects(Mask))
2182  return true;
2183  }
2184 
2185  // The sum of a non-negative number and a power of two is not zero.
2186  if (XKnown.isNonNegative() &&
2187  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2188  return true;
2189  if (YKnown.isNonNegative() &&
2190  isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2191  return true;
2192  }
2193  // X * Y.
2194  else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2195  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2196  // If X and Y are non-zero then so is X * Y as long as the multiplication
2197  // does not overflow.
2198  if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2199  isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2200  return true;
2201  }
2202  // (C ? X : Y) != 0 if X != 0 and Y != 0.
2203  else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2204  if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2205  isKnownNonZero(SI->getFalseValue(), Depth, Q))
2206  return true;
2207  }
2208  // PHI
2209  else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2210  // Try and detect a recurrence that monotonically increases from a
2211  // starting value, as these are common as induction variables.
2212  if (PN->getNumIncomingValues() == 2) {
2213  Value *Start = PN->getIncomingValue(0);
2214  Value *Induction = PN->getIncomingValue(1);
2215  if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2216  std::swap(Start, Induction);
2217  if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2218  if (!C->isZero() && !C->isNegative()) {
2219  ConstantInt *X;
2220  if (Q.IIQ.UseInstrInfo &&
2221  (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2222  match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2223  !X->isNegative())
2224  return true;
2225  }
2226  }
2227  }
2228  // Check if all incoming values are non-zero constant.
2229  bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2230  return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2231  });
2232  if (AllNonZeroConstants)
2233  return true;
2234  }
2235 
2236  KnownBits Known(BitWidth);
2237  computeKnownBits(V, Known, Depth, Q);
2238  return Known.One != 0;
2239 }
2240 
2241 /// Return true if V2 == V1 + X, where X is known non-zero.
2242 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2243  const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2244  if (!BO || BO->getOpcode() != Instruction::Add)
2245  return false;
2246  Value *Op = nullptr;
2247  if (V2 == BO->getOperand(0))
2248  Op = BO->getOperand(1);
2249  else if (V2 == BO->getOperand(1))
2250  Op = BO->getOperand(0);
2251  else
2252  return false;
2253  return isKnownNonZero(Op, 0, Q);
2254 }
2255 
2256 /// Return true if it is known that V1 != V2.
2257 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2258  if (V1 == V2)
2259  return false;
2260  if (V1->getType() != V2->getType())
2261  // We can't look through casts yet.
2262  return false;
2263  if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2264  return true;
2265 
2266  if (V1->getType()->isIntOrIntVectorTy()) {
2267  // Are any known bits in V1 contradictory to known bits in V2? If V1
2268  // has a known zero where V2 has a known one, they must not be equal.
2269  KnownBits Known1 = computeKnownBits(V1, 0, Q);
2270  KnownBits Known2 = computeKnownBits(V2, 0, Q);
2271 
2272  if (Known1.Zero.intersects(Known2.One) ||
2273  Known2.Zero.intersects(Known1.One))
2274  return true;
2275  }
2276  return false;
2277 }
2278 
2279 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2280 /// simplify operations downstream. Mask is known to be zero for bits that V
2281 /// cannot have.
2282 ///
2283 /// This function is defined on values with integer type, values with pointer
2284 /// type, and vectors of integers. In the case
2285 /// where V is a vector, the mask, known zero, and known one values are the
2286 /// same width as the vector element, and the bit is set only if it is true
2287 /// for all of the elements in the vector.
2288 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2289  const Query &Q) {
2290  KnownBits Known(Mask.getBitWidth());
2291  computeKnownBits(V, Known, Depth, Q);
2292  return Mask.isSubsetOf(Known.Zero);
2293 }
2294 
2295 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2296 // Returns the input and lower/upper bounds.
2297 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2298  const APInt *&CLow, const APInt *&CHigh) {
2299  assert(isa<Operator>(Select) &&
2300  cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2301  "Input should be a Select!");
2302 
2303  const Value *LHS, *RHS, *LHS2, *RHS2;
2304  SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2305  if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2306  return false;
2307 
2308  if (!match(RHS, m_APInt(CLow)))
2309  return false;
2310 
2311  SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2312  if (getInverseMinMaxFlavor(SPF) != SPF2)
2313  return false;
2314 
2315  if (!match(RHS2, m_APInt(CHigh)))
2316  return false;
2317 
2318  if (SPF == SPF_SMIN)
2319  std::swap(CLow, CHigh);
2320 
2321  In = LHS2;
2322  return CLow->sle(*CHigh);
2323 }
2324 
2325 /// For vector constants, loop over the elements and find the constant with the
2326 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2327 /// or if any element was not analyzed; otherwise, return the count for the
2328 /// element with the minimum number of sign bits.
2329 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2330  unsigned TyBits) {
2331  const auto *CV = dyn_cast<Constant>(V);
2332  if (!CV || !CV->getType()->isVectorTy())
2333  return 0;
2334 
2335  unsigned MinSignBits = TyBits;
2336  unsigned NumElts = CV->getType()->getVectorNumElements();
2337  for (unsigned i = 0; i != NumElts; ++i) {
2338  // If we find a non-ConstantInt, bail out.
2339  auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2340  if (!Elt)
2341  return 0;
2342 
2343  MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2344  }
2345 
2346  return MinSignBits;
2347 }
2348 
2349 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2350  const Query &Q);
2351 
2352 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2353  const Query &Q) {
2354  unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2355  assert(Result > 0 && "At least one sign bit needs to be present!");
2356  return Result;
2357 }
2358 
2359 /// Return the number of times the sign bit of the register is replicated into
2360 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2361 /// (itself), but other cases can give us information. For example, immediately
2362 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2363 /// other, so we return 3. For vectors, return the number of sign bits for the
2364 /// vector element with the minimum number of known sign bits.
2365 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2366  const Query &Q) {
2367  assert(Depth <= MaxDepth && "Limit Search Depth");
2368 
2369  // We return the minimum number of sign bits that are guaranteed to be present
2370  // in V, so for undef we have to conservatively return 1. We don't have the
2371  // same behavior for poison though -- that's a FIXME today.
2372 
2373  Type *ScalarTy = V->getType()->getScalarType();
2374  unsigned TyBits = ScalarTy->isPointerTy() ?
2375  Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2376  Q.DL.getTypeSizeInBits(ScalarTy);
2377 
2378  unsigned Tmp, Tmp2;
2379  unsigned FirstAnswer = 1;
2380 
2381  // Note that ConstantInt is handled by the general computeKnownBits case
2382  // below.
2383 
2384  if (Depth == MaxDepth)
2385  return 1; // Limit search depth.
2386 
2387  const Operator *U = dyn_cast<Operator>(V);
2388  switch (Operator::getOpcode(V)) {
2389  default: break;
2390  case Instruction::SExt:
2391  Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2392  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2393 
2394  case Instruction::SDiv: {
2395  const APInt *Denominator;
2396  // sdiv X, C -> adds log(C) sign bits.
2397  if (match(U->getOperand(1), m_APInt(Denominator))) {
2398 
2399  // Ignore non-positive denominator.
2400  if (!Denominator->isStrictlyPositive())
2401  break;
2402 
2403  // Calculate the incoming numerator bits.
2404  unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2405 
2406  // Add floor(log(C)) bits to the numerator bits.
2407  return std::min(TyBits, NumBits + Denominator->logBase2());
2408  }
2409  break;
2410  }
2411 
2412  case Instruction::SRem: {
2413  const APInt *Denominator;
2414  // srem X, C -> we know that the result is within [-C+1,C) when C is a
2415  // positive constant. This let us put a lower bound on the number of sign
2416  // bits.
2417  if (match(U->getOperand(1), m_APInt(Denominator))) {
2418 
2419  // Ignore non-positive denominator.
2420  if (!Denominator->isStrictlyPositive())
2421  break;
2422 
2423  // Calculate the incoming numerator bits. SRem by a positive constant
2424  // can't lower the number of sign bits.
2425  unsigned NumrBits =
2426  ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2427 
2428  // Calculate the leading sign bit constraints by examining the
2429  // denominator. Given that the denominator is positive, there are two
2430  // cases:
2431  //
2432  // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2433  // (1 << ceilLogBase2(C)).
2434  //
2435  // 2. the numerator is negative. Then the result range is (-C,0] and
2436  // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2437  //
2438  // Thus a lower bound on the number of sign bits is `TyBits -
2439  // ceilLogBase2(C)`.
2440 
2441  unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2442  return std::max(NumrBits, ResBits);
2443  }
2444  break;
2445  }
2446 
2447  case Instruction::AShr: {
2448  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2449  // ashr X, C -> adds C sign bits. Vectors too.
2450  const APInt *ShAmt;
2451  if (match(U->getOperand(1), m_APInt(ShAmt))) {
2452  if (ShAmt->uge(TyBits))
2453  break; // Bad shift.
2454  unsigned ShAmtLimited = ShAmt->getZExtValue();
2455  Tmp += ShAmtLimited;
2456  if (Tmp > TyBits) Tmp = TyBits;
2457  }
2458  return Tmp;
2459  }
2460  case Instruction::Shl: {
2461  const APInt *ShAmt;
2462  if (match(U->getOperand(1), m_APInt(ShAmt))) {
2463  // shl destroys sign bits.
2464  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2465  if (ShAmt->uge(TyBits) || // Bad shift.
2466  ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2467  Tmp2 = ShAmt->getZExtValue();
2468  return Tmp - Tmp2;
2469  }
2470  break;
2471  }
2472  case Instruction::And:
2473  case Instruction::Or:
2474  case Instruction::Xor: // NOT is handled here.
2475  // Logical binary ops preserve the number of sign bits at the worst.
2476  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2477  if (Tmp != 1) {
2478  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2479  FirstAnswer = std::min(Tmp, Tmp2);
2480  // We computed what we know about the sign bits as our first
2481  // answer. Now proceed to the generic code that uses
2482  // computeKnownBits, and pick whichever answer is better.
2483  }
2484  break;
2485 
2486  case Instruction::Select: {
2487  // If we have a clamp pattern, we know that the number of sign bits will be
2488  // the minimum of the clamp min/max range.
2489  const Value *X;
2490  const APInt *CLow, *CHigh;
2491  if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2492  return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2493 
2494  Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2495  if (Tmp == 1) break;
2496  Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2497  return std::min(Tmp, Tmp2);
2498  }
2499 
2500  case Instruction::Add:
2501  // Add can have at most one carry bit. Thus we know that the output
2502  // is, at worst, one more bit than the inputs.
2503  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2504  if (Tmp == 1) break;
2505 
2506  // Special case decrementing a value (ADD X, -1):
2507  if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2508  if (CRHS->isAllOnesValue()) {
2509  KnownBits Known(TyBits);
2510  computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2511 
2512  // If the input is known to be 0 or 1, the output is 0/-1, which is all
2513  // sign bits set.
2514  if ((Known.Zero | 1).isAllOnesValue())
2515  return TyBits;
2516 
2517  // If we are subtracting one from a positive number, there is no carry
2518  // out of the result.
2519  if (Known.isNonNegative())
2520  return Tmp;
2521  }
2522 
2523  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2524  if (Tmp2 == 1) break;
2525  return std::min(Tmp, Tmp2)-1;
2526 
2527  case Instruction::Sub:
2528  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2529  if (Tmp2 == 1) break;
2530 
2531  // Handle NEG.
2532  if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2533  if (CLHS->isNullValue()) {
2534  KnownBits Known(TyBits);
2535  computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2536  // If the input is known to be 0 or 1, the output is 0/-1, which is all
2537  // sign bits set.
2538  if ((Known.Zero | 1).isAllOnesValue())
2539  return TyBits;
2540 
2541  // If the input is known to be positive (the sign bit is known clear),
2542  // the output of the NEG has the same number of sign bits as the input.
2543  if (Known.isNonNegative())
2544  return Tmp2;
2545 
2546  // Otherwise, we treat this like a SUB.
2547  }
2548 
2549  // Sub can have at most one carry bit. Thus we know that the output
2550  // is, at worst, one more bit than the inputs.
2551  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2552  if (Tmp == 1) break;
2553  return std::min(Tmp, Tmp2)-1;
2554 
2555  case Instruction::Mul: {
2556  // The output of the Mul can be at most twice the valid bits in the inputs.
2557  unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2558  if (SignBitsOp0 == 1) break;
2559  unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2560  if (SignBitsOp1 == 1) break;
2561  unsigned OutValidBits =
2562  (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2563  return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2564  }
2565 
2566  case Instruction::PHI: {
2567  const PHINode *PN = cast<PHINode>(U);
2568  unsigned NumIncomingValues = PN->getNumIncomingValues();
2569  // Don't analyze large in-degree PHIs.
2570  if (NumIncomingValues > 4) break;
2571  // Unreachable blocks may have zero-operand PHI nodes.
2572  if (NumIncomingValues == 0) break;
2573 
2574  // Take the minimum of all incoming values. This can't infinitely loop
2575  // because of our depth threshold.
2576  Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2577  for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2578  if (Tmp == 1) return Tmp;
2579  Tmp = std::min(
2580  Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2581  }
2582  return Tmp;
2583  }
2584 
2585  case Instruction::Trunc:
2586  // FIXME: it's tricky to do anything useful for this, but it is an important
2587  // case for targets like X86.
2588  break;
2589 
2590  case Instruction::ExtractElement:
2591  // Look through extract element. At the moment we keep this simple and skip
2592  // tracking the specific element. But at least we might find information
2593  // valid for all elements of the vector (for example if vector is sign
2594  // extended, shifted, etc).
2595  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2596 
2597  case Instruction::ShuffleVector: {
2598  // TODO: This is copied almost directly from the SelectionDAG version of
2599  // ComputeNumSignBits. It would be better if we could share common
2600  // code. If not, make sure that changes are translated to the DAG.
2601 
2602  // Collect the minimum number of sign bits that are shared by every vector
2603  // element referenced by the shuffle.
2604  auto *Shuf = cast<ShuffleVectorInst>(U);
2605  int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
2606  int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements();
2607  APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2608  for (int i = 0; i != NumMaskElts; ++i) {
2609  int M = Shuf->getMaskValue(i);
2610  assert(M < NumElts * 2 && "Invalid shuffle mask constant");
2611  // For undef elements, we don't know anything about the common state of
2612  // the shuffle result.
2613  if (M == -1)
2614  return 1;
2615  if (M < NumElts)
2616  DemandedLHS.setBit(M % NumElts);
2617  else
2618  DemandedRHS.setBit(M % NumElts);
2619  }
2621  if (!!DemandedLHS)
2622  Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q);
2623  if (!!DemandedRHS) {
2624  Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q);
2625  Tmp = std::min(Tmp, Tmp2);
2626  }
2627  // If we don't know anything, early out and try computeKnownBits fall-back.
2628  if (Tmp == 1)
2629  break;
2630  assert(Tmp <= V->getType()->getScalarSizeInBits() &&
2631  "Failed to determine minimum sign bits");
2632  return Tmp;
2633  }
2634  }
2635 
2636  // Finally, if we can prove that the top bits of the result are 0's or 1's,
2637  // use this information.
2638 
2639  // If we can examine all elements of a vector constant successfully, we're
2640  // done (we can't do any better than that). If not, keep trying.
2641  if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2642  return VecSignBits;
2643 
2644  KnownBits Known(TyBits);
2645  computeKnownBits(V, Known, Depth, Q);
2646 
2647  // If we know that the sign bit is either zero or one, determine the number of
2648  // identical bits in the top of the input value.
2649  return std::max(FirstAnswer, Known.countMinSignBits());
2650 }
2651 
2652 /// This function computes the integer multiple of Base that equals V.
2653 /// If successful, it returns true and returns the multiple in
2654 /// Multiple. If unsuccessful, it returns false. It looks
2655 /// through SExt instructions only if LookThroughSExt is true.
2656 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2657  bool LookThroughSExt, unsigned Depth) {
2658  const unsigned MaxDepth = 6;
2659 
2660  assert(V && "No Value?");
2661  assert(Depth <= MaxDepth && "Limit Search Depth");
2662  assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2663 
2664  Type *T = V->getType();
2665 
2666  ConstantInt *CI = dyn_cast<ConstantInt>(V);
2667 
2668  if (Base == 0)
2669  return false;
2670 
2671  if (Base == 1) {
2672  Multiple = V;
2673  return true;
2674  }
2675 
2677  Constant *BaseVal = ConstantInt::get(T, Base);
2678  if (CO && CO == BaseVal) {
2679  // Multiple is 1.
2680  Multiple = ConstantInt::get(T, 1);
2681  return true;
2682  }
2683 
2684  if (CI && CI->getZExtValue() % Base == 0) {
2685  Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2686  return true;
2687  }
2688 
2689  if (Depth == MaxDepth) return false; // Limit search depth.
2690 
2691  Operator *I = dyn_cast<Operator>(V);
2692  if (!I) return false;
2693 
2694  switch (I->getOpcode()) {
2695  default: break;
2696  case Instruction::SExt:
2697  if (!LookThroughSExt) return false;
2698  // otherwise fall through to ZExt
2700  case Instruction::ZExt:
2701  return ComputeMultiple(I->getOperand(0), Base, Multiple,
2702  LookThroughSExt, Depth+1);
2703  case Instruction::Shl:
2704  case Instruction::Mul: {
2705  Value *Op0 = I->getOperand(0);
2706  Value *Op1 = I->getOperand(1);
2707 
2708  if (I->getOpcode() == Instruction::Shl) {
2709  ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2710  if (!Op1CI) return false;
2711  // Turn Op0 << Op1 into Op0 * 2^Op1
2712  APInt Op1Int = Op1CI->getValue();
2713  uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2714  APInt API(Op1Int.getBitWidth(), 0);
2715  API.setBit(BitToSet);
2716  Op1 = ConstantInt::get(V->getContext(), API);
2717  }
2718 
2719  Value *Mul0 = nullptr;
2720  if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2721  if (Constant *Op1C = dyn_cast<Constant>(Op1))
2722  if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2723  if (Op1C->getType()->getPrimitiveSizeInBits() <
2724  MulC->getType()->getPrimitiveSizeInBits())
2725  Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2726  if (Op1C->getType()->getPrimitiveSizeInBits() >
2727  MulC->getType()->getPrimitiveSizeInBits())
2728  MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2729 
2730  // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2731  Multiple = ConstantExpr::getMul(MulC, Op1C);
2732  return true;
2733  }
2734 
2735  if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2736  if (Mul0CI->getValue() == 1) {
2737  // V == Base * Op1, so return Op1
2738  Multiple = Op1;
2739  return true;
2740  }
2741  }
2742 
2743  Value *Mul1 = nullptr;
2744  if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2745  if (Constant *Op0C = dyn_cast<Constant>(Op0))
2746  if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2747  if (Op0C->getType()->getPrimitiveSizeInBits() <
2748  MulC->getType()->getPrimitiveSizeInBits())
2749  Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2750  if (Op0C->getType()->getPrimitiveSizeInBits() >
2751  MulC->getType()->getPrimitiveSizeInBits())
2752  MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2753 
2754  // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2755  Multiple = ConstantExpr::getMul(MulC, Op0C);
2756  return true;
2757  }
2758 
2759  if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2760  if (Mul1CI->getValue() == 1) {
2761  // V == Base * Op0, so return Op0
2762  Multiple = Op0;
2763  return true;
2764  }
2765  }
2766  }
2767  }
2768 
2769  // We could not determine if V is a multiple of Base.
2770  return false;
2771 }
2772 
2774  const TargetLibraryInfo *TLI) {
2775  const Function *F = ICS.getCalledFunction();
2776  if (!F)
2777  return Intrinsic::not_intrinsic;
2778 
2779  if (F->isIntrinsic())
2780  return F->getIntrinsicID();
2781 
2782  if (!TLI)
2783  return Intrinsic::not_intrinsic;
2784 
2785  LibFunc Func;
2786  // We're going to make assumptions on the semantics of the functions, check
2787  // that the target knows that it's available in this environment and it does
2788  // not have local linkage.
2789  if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2790  return Intrinsic::not_intrinsic;
2791 
2792  if (!ICS.onlyReadsMemory())
2793  return Intrinsic::not_intrinsic;
2794 
2795  // Otherwise check if we have a call to a function that can be turned into a
2796  // vector intrinsic.
2797  switch (Func) {
2798  default:
2799  break;
2800  case LibFunc_sin:
2801  case LibFunc_sinf:
2802  case LibFunc_sinl:
2803  return Intrinsic::sin;
2804  case LibFunc_cos:
2805  case LibFunc_cosf:
2806  case LibFunc_cosl:
2807  return Intrinsic::cos;
2808  case LibFunc_exp:
2809  case LibFunc_expf:
2810  case LibFunc_expl:
2811  return Intrinsic::exp;
2812  case LibFunc_exp2:
2813  case LibFunc_exp2f:
2814  case LibFunc_exp2l:
2815  return Intrinsic::exp2;
2816  case LibFunc_log:
2817  case LibFunc_logf:
2818  case LibFunc_logl:
2819  return Intrinsic::log;
2820  case LibFunc_log10:
2821  case LibFunc_log10f:
2822  case LibFunc_log10l:
2823  return Intrinsic::log10;
2824  case LibFunc_log2:
2825  case LibFunc_log2f:
2826  case LibFunc_log2l:
2827  return Intrinsic::log2;
2828  case LibFunc_fabs:
2829  case LibFunc_fabsf:
2830  case LibFunc_fabsl:
2831  return Intrinsic::fabs;
2832  case LibFunc_fmin:
2833  case LibFunc_fminf:
2834  case LibFunc_fminl:
2835  return Intrinsic::minnum;
2836  case LibFunc_fmax:
2837  case LibFunc_fmaxf:
2838  case LibFunc_fmaxl:
2839  return Intrinsic::maxnum;
2840  case LibFunc_copysign:
2841  case LibFunc_copysignf:
2842  case LibFunc_copysignl:
2843  return Intrinsic::copysign;
2844  case LibFunc_floor:
2845  case LibFunc_floorf:
2846  case LibFunc_floorl:
2847  return Intrinsic::floor;
2848  case LibFunc_ceil:
2849  case LibFunc_ceilf:
2850  case LibFunc_ceill:
2851  return Intrinsic::ceil;
2852  case LibFunc_trunc:
2853  case LibFunc_truncf:
2854  case LibFunc_truncl:
2855  return Intrinsic::trunc;
2856  case LibFunc_rint:
2857  case LibFunc_rintf:
2858  case LibFunc_rintl:
2859  return Intrinsic::rint;
2860  case LibFunc_nearbyint:
2861  case LibFunc_nearbyintf:
2862  case LibFunc_nearbyintl:
2863  return Intrinsic::nearbyint;
2864  case LibFunc_round:
2865  case LibFunc_roundf:
2866  case LibFunc_roundl:
2867  return Intrinsic::round;
2868  case LibFunc_pow:
2869  case LibFunc_powf:
2870  case LibFunc_powl:
2871  return Intrinsic::pow;
2872  case LibFunc_sqrt:
2873  case LibFunc_sqrtf:
2874  case LibFunc_sqrtl:
2875  return Intrinsic::sqrt;
2876  }
2877 
2878  return Intrinsic::not_intrinsic;
2879 }
2880 
2881 /// Return true if we can prove that the specified FP value is never equal to
2882 /// -0.0.
2883 ///
2884 /// NOTE: this function will need to be revisited when we support non-default
2885 /// rounding modes!
2887  unsigned Depth) {
2888  if (auto *CFP = dyn_cast<ConstantFP>(V))
2889  return !CFP->getValueAPF().isNegZero();
2890 
2891  // Limit search depth.
2892  if (Depth == MaxDepth)
2893  return false;
2894 
2895  auto *Op = dyn_cast<Operator>(V);
2896  if (!Op)
2897  return false;
2898 
2899  // Check if the nsz fast-math flag is set.
2900  if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2901  if (FPO->hasNoSignedZeros())
2902  return true;
2903 
2904  // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2905  if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2906  return true;
2907 
2908  // sitofp and uitofp turn into +0.0 for zero.
2909  if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2910  return true;
2911 
2912  if (auto *Call = dyn_cast<CallInst>(Op)) {
2913  Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2914  switch (IID) {
2915  default:
2916  break;
2917  // sqrt(-0.0) = -0.0, no other negative results are possible.
2918  case Intrinsic::sqrt:
2919  case Intrinsic::canonicalize:
2920  return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2921  // fabs(x) != -0.0
2922  case Intrinsic::fabs:
2923  return true;
2924  }
2925  }
2926 
2927  return false;
2928 }
2929 
2930 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2931 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2932 /// bit despite comparing equal.
2934  const TargetLibraryInfo *TLI,
2935  bool SignBitOnly,
2936  unsigned Depth) {
2937  // TODO: This function does not do the right thing when SignBitOnly is true
2938  // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2939  // which flips the sign bits of NaNs. See
2940  // https://llvm.org/bugs/show_bug.cgi?id=31702.
2941 
2942  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2943  return !CFP->getValueAPF().isNegative() ||
2944  (!SignBitOnly && CFP->getValueAPF().isZero());
2945  }
2946 
2947  // Handle vector of constants.
2948  if (auto *CV = dyn_cast<Constant>(V)) {
2949  if (CV->getType()->isVectorTy()) {
2950  unsigned NumElts = CV->getType()->getVectorNumElements();
2951  for (unsigned i = 0; i != NumElts; ++i) {
2952  auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2953  if (!CFP)
2954  return false;
2955  if (CFP->getValueAPF().isNegative() &&
2956  (SignBitOnly || !CFP->getValueAPF().isZero()))
2957  return false;
2958  }
2959 
2960  // All non-negative ConstantFPs.
2961  return true;
2962  }
2963  }
2964 
2965  if (Depth == MaxDepth)
2966  return false; // Limit search depth.
2967 
2968  const Operator *I = dyn_cast<Operator>(V);
2969  if (!I)
2970  return false;
2971 
2972  switch (I->getOpcode()) {
2973  default:
2974  break;
2975  // Unsigned integers are always nonnegative.
2976  case Instruction::UIToFP:
2977  return true;
2978  case Instruction::FMul:
2979  // x*x is always non-negative or a NaN.
2980  if (I->getOperand(0) == I->getOperand(1) &&
2981  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2982  return true;
2983 
2985  case Instruction::FAdd:
2986  case Instruction::FDiv:
2987  case Instruction::FRem:
2988  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2989  Depth + 1) &&
2990  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2991  Depth + 1);
2992  case Instruction::Select:
2993  return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2994  Depth + 1) &&
2995  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2996  Depth + 1);
2997  case Instruction::FPExt:
2998  case Instruction::FPTrunc:
2999  // Widening/narrowing never change sign.
3000  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3001  Depth + 1);
3002  case Instruction::ExtractElement:
3003  // Look through extract element. At the moment we keep this simple and skip
3004  // tracking the specific element. But at least we might find information
3005  // valid for all elements of the vector.
3006  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3007  Depth + 1);
3008  case Instruction::Call:
3009  const auto *CI = cast<CallInst>(I);
3010  Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
3011  switch (IID) {
3012  default:
3013  break;
3014  case Intrinsic::maxnum:
3015  return (isKnownNeverNaN(I->getOperand(0), TLI) &&
3017  SignBitOnly, Depth + 1)) ||
3018  (isKnownNeverNaN(I->getOperand(1), TLI) &&
3020  SignBitOnly, Depth + 1));
3021 
3022  case Intrinsic::maximum:
3023  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3024  Depth + 1) ||
3025  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3026  Depth + 1);
3027  case Intrinsic::minnum:
3028  case Intrinsic::minimum:
3029  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3030  Depth + 1) &&
3031  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3032  Depth + 1);
3033  case Intrinsic::exp:
3034  case Intrinsic::exp2:
3035  case Intrinsic::fabs:
3036  return true;
3037 
3038  case Intrinsic::sqrt:
3039  // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3040  if (!SignBitOnly)
3041  return true;
3042  return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3043  CannotBeNegativeZero(CI->getOperand(0), TLI));
3044 
3045  case Intrinsic::powi:
3046  if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3047  // powi(x,n) is non-negative if n is even.
3048  if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3049  return true;
3050  }
3051  // TODO: This is not correct. Given that exp is an integer, here are the
3052  // ways that pow can return a negative value:
3053  //
3054  // pow(x, exp) --> negative if exp is odd and x is negative.
3055  // pow(-0, exp) --> -inf if exp is negative odd.
3056  // pow(-0, exp) --> -0 if exp is positive odd.
3057  // pow(-inf, exp) --> -0 if exp is negative odd.
3058  // pow(-inf, exp) --> -inf if exp is positive odd.
3059  //
3060  // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3061  // but we must return false if x == -0. Unfortunately we do not currently
3062  // have a way of expressing this constraint. See details in
3063  // https://llvm.org/bugs/show_bug.cgi?id=31702.
3064  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3065  Depth + 1);
3066 
3067  case Intrinsic::fma:
3068  case Intrinsic::fmuladd:
3069  // x*x+y is non-negative if y is non-negative.
3070  return I->getOperand(0) == I->getOperand(1) &&
3071  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3072  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3073  Depth + 1);
3074  }
3075  break;
3076  }
3077  return false;
3078 }
3079 
3081  const TargetLibraryInfo *TLI) {
3082  return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3083 }
3084 
3086  return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3087 }
3088 
3090  unsigned Depth) {
3091  assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3092 
3093  // If we're told that NaNs won't happen, assume they won't.
3094  if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3095  if (FPMathOp->hasNoNaNs())
3096  return true;
3097 
3098  // Handle scalar constants.
3099  if (auto *CFP = dyn_cast<ConstantFP>(V))
3100  return !CFP->isNaN();
3101 
3102  if (Depth == MaxDepth)
3103  return false;
3104 
3105  if (auto *Inst = dyn_cast<Instruction>(V)) {
3106  switch (Inst->getOpcode()) {
3107  case Instruction::FAdd:
3108  case Instruction::FMul:
3109  case Instruction::FSub:
3110  case Instruction::FDiv:
3111  case Instruction::FRem: {
3112  // TODO: Need isKnownNeverInfinity
3113  return false;
3114  }
3115  case Instruction::Select: {
3116  return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3117  isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3118  }
3119  case Instruction::SIToFP:
3120  case Instruction::UIToFP:
3121  return true;
3122  case Instruction::FPTrunc:
3123  case Instruction::FPExt:
3124  return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3125  default:
3126  break;
3127  }
3128  }
3129 
3130  if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3131  switch (II->getIntrinsicID()) {
3132  case Intrinsic::canonicalize:
3133  case Intrinsic::fabs:
3134  case Intrinsic::copysign:
3135  case Intrinsic::exp:
3136  case Intrinsic::exp2:
3137  case Intrinsic::floor:
3138  case Intrinsic::ceil:
3139  case Intrinsic::trunc:
3140  case Intrinsic::rint:
3141  case Intrinsic::nearbyint:
3142  case Intrinsic::round:
3143  return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3144  case Intrinsic::sqrt:
3145  return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3146  CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3147  case Intrinsic::minnum:
3148  case Intrinsic::maxnum:
3149  // If either operand is not NaN, the result is not NaN.
3150  return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3151  isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3152  default:
3153  return false;
3154  }
3155  }
3156 
3157  // Bail out for constant expressions, but try to handle vector constants.
3158  if (!V->getType()->isVectorTy() || !isa<Constant>(V))
3159  return false;
3160 
3161  // For vectors, verify that each element is not NaN.
3162  unsigned NumElts = V->getType()->getVectorNumElements();
3163  for (unsigned i = 0; i != NumElts; ++i) {
3164  Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3165  if (!Elt)
3166  return false;
3167  if (isa<UndefValue>(Elt))
3168  continue;
3169  auto *CElt = dyn_cast<ConstantFP>(Elt);
3170  if (!CElt || CElt->isNaN())
3171  return false;
3172  }
3173  // All elements were confirmed not-NaN or undefined.
3174  return true;
3175 }
3176 
3178 
3179  // All byte-wide stores are splatable, even of arbitrary variables.
3180  if (V->getType()->isIntegerTy(8))
3181  return V;
3182 
3183  LLVMContext &Ctx = V->getContext();
3184 
3185  // Undef don't care.
3186  auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3187  if (isa<UndefValue>(V))
3188  return UndefInt8;
3189 
3190  const uint64_t Size = DL.getTypeStoreSize(V->getType());
3191  if (!Size)
3192  return UndefInt8;
3193 
3194  Constant *C = dyn_cast<Constant>(V);
3195  if (!C) {
3196  // Conceptually, we could handle things like:
3197  // %a = zext i8 %X to i16
3198  // %b = shl i16 %a, 8
3199  // %c = or i16 %a, %b
3200  // but until there is an example that actually needs this, it doesn't seem
3201  // worth worrying about.
3202  return nullptr;
3203  }
3204 
3205  // Handle 'null' ConstantArrayZero etc.
3206  if (C->isNullValue())
3208 
3209  // Constant floating-point values can be handled as integer values if the
3210  // corresponding integer value is "byteable". An important case is 0.0.
3211  if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3212  Type *Ty = nullptr;
3213  if (CFP->getType()->isHalfTy())
3214  Ty = Type::getInt16Ty(Ctx);
3215  else if (CFP->getType()->isFloatTy())
3216  Ty = Type::getInt32Ty(Ctx);
3217  else if (CFP->getType()->isDoubleTy())
3218  Ty = Type::getInt64Ty(Ctx);
3219  // Don't handle long double formats, which have strange constraints.
3220  return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3221  : nullptr;
3222  }
3223 
3224  // We can handle constant integers that are multiple of 8 bits.
3225  if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3226  if (CI->getBitWidth() % 8 == 0) {
3227  assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3228  if (!CI->getValue().isSplat(8))
3229  return nullptr;
3230  return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3231  }
3232  }
3233 
3234  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3235  if (CE->getOpcode() == Instruction::IntToPtr) {
3236  auto PS = DL.getPointerSizeInBits(
3237  cast<PointerType>(CE->getType())->getAddressSpace());
3238  return isBytewiseValue(
3239  ConstantExpr::getIntegerCast(CE->getOperand(0),
3240  Type::getIntNTy(Ctx, PS), false),
3241  DL);
3242  }
3243  }
3244 
3245  auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3246  if (LHS == RHS)
3247  return LHS;
3248  if (!LHS || !RHS)
3249  return nullptr;
3250  if (LHS == UndefInt8)
3251  return RHS;
3252  if (RHS == UndefInt8)
3253  return LHS;
3254  return nullptr;
3255  };
3256 
3257  if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3258  Value *Val = UndefInt8;
3259  for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3260  if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3261  return nullptr;
3262  return Val;
3263  }
3264 
3265  if (isa<ConstantAggregate>(C)) {
3266  Value *Val = UndefInt8;
3267  for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3268  if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3269  return nullptr;
3270  return Val;
3271  }
3272 
3273  // Don't try to handle the handful of other constants.
3274  return nullptr;
3275 }
3276 
3277 // This is the recursive version of BuildSubAggregate. It takes a few different
3278 // arguments. Idxs is the index within the nested struct From that we are
3279 // looking at now (which is of type IndexedType). IdxSkip is the number of
3280 // indices from Idxs that should be left out when inserting into the resulting
3281 // struct. To is the result struct built so far, new insertvalue instructions
3282 // build on that.
3283 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3285  unsigned IdxSkip,
3286  Instruction *InsertBefore) {
3287  StructType *STy = dyn_cast<StructType>(IndexedType);
3288  if (STy) {
3289  // Save the original To argument so we can modify it
3290  Value *OrigTo = To;
3291  // General case, the type indexed by Idxs is a struct
3292  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3293  // Process each struct element recursively
3294  Idxs.push_back(i);
3295  Value *PrevTo = To;
3296  To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3297  InsertBefore);
3298  Idxs.pop_back();
3299  if (!To) {
3300  // Couldn't find any inserted value for this index? Cleanup
3301  while (PrevTo != OrigTo) {
3302  InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3303  PrevTo = Del->getAggregateOperand();
3304  Del->eraseFromParent();
3305  }
3306  // Stop processing elements
3307  break;
3308  }
3309  }
3310  // If we successfully found a value for each of our subaggregates
3311  if (To)
3312  return To;
3313  }
3314  // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3315  // the struct's elements had a value that was inserted directly. In the latter
3316  // case, perhaps we can't determine each of the subelements individually, but
3317  // we might be able to find the complete struct somewhere.
3318 
3319  // Find the value that is at that particular spot
3320  Value *V = FindInsertedValue(From, Idxs);
3321 
3322  if (!V)
3323  return nullptr;
3324 
3325  // Insert the value in the new (sub) aggregate
3326  return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3327  "tmp", InsertBefore);
3328 }
3329 
3330 // This helper takes a nested struct and extracts a part of it (which is again a
3331 // struct) into a new value. For example, given the struct:
3332 // { a, { b, { c, d }, e } }
3333 // and the indices "1, 1" this returns
3334 // { c, d }.
3335 //
3336 // It does this by inserting an insertvalue for each element in the resulting
3337 // struct, as opposed to just inserting a single struct. This will only work if
3338 // each of the elements of the substruct are known (ie, inserted into From by an
3339 // insertvalue instruction somewhere).
3340 //
3341 // All inserted insertvalue instructions are inserted before InsertBefore
3343  Instruction *InsertBefore) {
3344  assert(InsertBefore && "Must have someplace to insert!");
3345  Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3346  idx_range);
3347  Value *To = UndefValue::get(IndexedType);
3348  SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3349  unsigned IdxSkip = Idxs.size();
3350 
3351  return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3352 }
3353 
3354 /// Given an aggregate and a sequence of indices, see if the scalar value
3355 /// indexed is already around as a register, for example if it was inserted
3356 /// directly into the aggregate.
3357 ///
3358 /// If InsertBefore is not null, this function will duplicate (modified)
3359 /// insertvalues when a part of a nested struct is extracted.
3361  Instruction *InsertBefore) {
3362  // Nothing to index? Just return V then (this is useful at the end of our
3363  // recursion).
3364  if (idx_range.empty())
3365  return V;
3366  // We have indices, so V should have an indexable type.
3367  assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3368  "Not looking at a struct or array?");
3369  assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3370  "Invalid indices for type?");
3371 
3372  if (Constant *C = dyn_cast<Constant>(V)) {
3373  C = C->getAggregateElement(idx_range[0]);
3374  if (!C) return nullptr;
3375  return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3376  }
3377 
3378  if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3379  // Loop the indices for the insertvalue instruction in parallel with the
3380  // requested indices
3381  const unsigned *req_idx = idx_range.begin();
3382  for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3383  i != e; ++i, ++req_idx) {
3384  if (req_idx == idx_range.end()) {
3385  // We can't handle this without inserting insertvalues
3386  if (!InsertBefore)
3387  return nullptr;
3388 
3389  // The requested index identifies a part of a nested aggregate. Handle
3390  // this specially. For example,
3391  // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3392  // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3393  // %C = extractvalue {i32, { i32, i32 } } %B, 1
3394  // This can be changed into
3395  // %A = insertvalue {i32, i32 } undef, i32 10, 0
3396  // %C = insertvalue {i32, i32 } %A, i32 11, 1
3397  // which allows the unused 0,0 element from the nested struct to be
3398  // removed.
3399  return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3400  InsertBefore);
3401  }
3402 
3403  // This insert value inserts something else than what we are looking for.
3404  // See if the (aggregate) value inserted into has the value we are
3405  // looking for, then.
3406  if (*req_idx != *i)
3407  return FindInsertedValue(I->getAggregateOperand(), idx_range,
3408  InsertBefore);
3409  }
3410  // If we end up here, the indices of the insertvalue match with those
3411  // requested (though possibly only partially). Now we recursively look at
3412  // the inserted value, passing any remaining indices.
3413  return FindInsertedValue(I->getInsertedValueOperand(),
3414  makeArrayRef(req_idx, idx_range.end()),
3415  InsertBefore);
3416  }
3417 
3418  if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3419  // If we're extracting a value from an aggregate that was extracted from
3420  // something else, we can extract from that something else directly instead.
3421  // However, we will need to chain I's indices with the requested indices.
3422 
3423  // Calculate the number of indices required
3424  unsigned size = I->getNumIndices() + idx_range.size();
3425  // Allocate some space to put the new indices in
3427  Idxs.reserve(size);
3428  // Add indices from the extract value instruction
3429  Idxs.append(I->idx_begin(), I->idx_end());
3430 
3431  // Add requested indices
3432  Idxs.append(idx_range.begin(), idx_range.end());
3433 
3434  assert(Idxs.size() == size
3435  && "Number of indices added not correct?");
3436 
3437  return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3438  }
3439  // Otherwise, we don't know (such as, extracting from a function return value
3440  // or load instruction)
3441  return nullptr;
3442 }
3443 
3445  unsigned CharSize) {
3446  // Make sure the GEP has exactly three arguments.
3447  if (GEP->getNumOperands() != 3)
3448  return false;
3449 
3450  // Make sure the index-ee is a pointer to array of \p CharSize integers.
3451  // CharSize.
3453  if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3454  return false;
3455 
3456  // Check to make sure that the first operand of the GEP is an integer and
3457  // has value 0 so that we are sure we're indexing into the initializer.
3458  const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3459  if (!FirstIdx || !FirstIdx->isZero())
3460  return false;
3461 
3462  return true;
3463 }
3464 
3466  ConstantDataArraySlice &Slice,
3467  unsigned ElementSize, uint64_t Offset) {
3468  assert(V);
3469 
3470  // Look through bitcast instructions and geps.
3471  V = V->stripPointerCasts();
3472 
3473  // If the value is a GEP instruction or constant expression, treat it as an
3474  // offset.
3475  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3476  // The GEP operator should be based on a pointer to string constant, and is
3477  // indexing into the string constant.
3478  if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3479  return false;
3480 
3481  // If the second index isn't a ConstantInt, then this is a variable index
3482  // into the array. If this occurs, we can't say anything meaningful about
3483  // the string.
3484  uint64_t StartIdx = 0;
3485  if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3486  StartIdx = CI->getZExtValue();
3487  else
3488  return false;
3489  return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3490  StartIdx + Offset);
3491  }
3492 
3493  // The GEP instruction, constant or instruction, must reference a global
3494  // variable that is a constant and is initialized. The referenced constant
3495  // initializer is the array that we'll use for optimization.
3496  const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3497  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3498  return false;
3499 
3500  const ConstantDataArray *Array;
3501  ArrayType *ArrayTy;
3502  if (GV->getInitializer()->isNullValue()) {
3503  Type *GVTy = GV->getValueType();
3504  if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3505  // A zeroinitializer for the array; there is no ConstantDataArray.
3506  Array = nullptr;
3507  } else {
3508  const DataLayout &DL = GV->getParent()->getDataLayout();
3509  uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3510  uint64_t Length = SizeInBytes / (ElementSize / 8);
3511  if (Length <= Offset)
3512  return false;
3513 
3514  Slice.Array = nullptr;
3515  Slice.Offset = 0;
3516  Slice.Length = Length - Offset;
3517  return true;
3518  }
3519  } else {
3520  // This must be a ConstantDataArray.
3521  Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3522  if (!Array)
3523  return false;
3524  ArrayTy = Array->getType();
3525  }
3526  if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3527  return false;
3528 
3529  uint64_t NumElts = ArrayTy->getArrayNumElements();
3530  if (Offset > NumElts)
3531  return false;
3532 
3533  Slice.Array = Array;
3534  Slice.Offset = Offset;
3535  Slice.Length = NumElts - Offset;
3536  return true;
3537 }
3538 
3539 /// This function computes the length of a null-terminated C string pointed to
3540 /// by V. If successful, it returns true and returns the string in Str.
3541 /// If unsuccessful, it returns false.
3543  uint64_t Offset, bool TrimAtNul) {
3544  ConstantDataArraySlice Slice;
3545  if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3546  return false;
3547 
3548  if (Slice.Array == nullptr) {
3549  if (TrimAtNul) {
3550  Str = StringRef();
3551  return true;
3552  }
3553  if (Slice.Length == 1) {
3554  Str = StringRef("", 1);
3555  return true;
3556  }
3557  // We cannot instantiate a StringRef as we do not have an appropriate string
3558  // of 0s at hand.
3559  return false;
3560  }
3561 
3562  // Start out with the entire array in the StringRef.
3563  Str = Slice.Array->getAsString();
3564  // Skip over 'offset' bytes.
3565  Str = Str.substr(Slice.Offset);
3566 
3567  if (TrimAtNul) {
3568  // Trim off the \0 and anything after it. If the array is not nul
3569  // terminated, we just return the whole end of string. The client may know
3570  // some other way that the string is length-bound.
3571  Str = Str.substr(0, Str.find('\0'));
3572  }
3573  return true;
3574 }
3575 
3576 // These next two are very similar to the above, but also look through PHI
3577 // nodes.
3578 // TODO: See if we can integrate these two together.
3579 
3580 /// If we can compute the length of the string pointed to by
3581 /// the specified pointer, return 'len+1'. If we can't, return 0.
3582 static uint64_t GetStringLengthH(const Value *V,
3584  unsigned CharSize) {
3585  // Look through noop bitcast instructions.
3586  V = V->stripPointerCasts();
3587 
3588  // If this is a PHI node, there are two cases: either we have already seen it
3589  // or we haven't.
3590  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3591  if (!PHIs.insert(PN).second)
3592  return ~0ULL; // already in the set.
3593 
3594  // If it was new, see if all the input strings are the same length.
3595  uint64_t LenSoFar = ~0ULL;
3596  for (Value *IncValue : PN->incoming_values()) {
3597  uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3598  if (Len == 0) return 0; // Unknown length -> unknown.
3599 
3600  if (Len == ~0ULL) continue;
3601 
3602  if (Len != LenSoFar && LenSoFar != ~0ULL)
3603  return 0; // Disagree -> unknown.
3604  LenSoFar = Len;
3605  }
3606 
3607  // Success, all agree.
3608  return LenSoFar;
3609  }
3610 
3611  // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3612  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3613  uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3614  if (Len1 == 0) return 0;
3615  uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3616  if (Len2 == 0) return 0;
3617  if (Len1 == ~0ULL) return Len2;
3618  if (Len2 == ~0ULL) return Len1;
3619  if (Len1 != Len2) return 0;
3620  return Len1;
3621  }
3622 
3623  // Otherwise, see if we can read the string.
3624  ConstantDataArraySlice Slice;
3625  if (!getConstantDataArrayInfo(V, Slice, CharSize))
3626  return 0;
3627 
3628  if (Slice.Array == nullptr)
3629  return 1;
3630 
3631  // Search for nul characters
3632  unsigned NullIndex = 0;
3633  for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3634  if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3635  break;
3636  }
3637 
3638  return NullIndex + 1;
3639 }
3640 
3641 /// If we can compute the length of the string pointed to by
3642 /// the specified pointer, return 'len+1'. If we can't, return 0.
3643 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3644  if (!V->getType()->isPointerTy())
3645  return 0;
3646 
3648  uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3649  // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3650  // an empty string as a length.
3651  return Len == ~0ULL ? 1 : Len;
3652 }
3653 
3655  assert(Call &&
3656  "getArgumentAliasingToReturnedPointer only works on nonnull calls");
3657  if (const Value *RV = Call->getReturnedArgOperand())
3658  return RV;
3659  // This can be used only as a aliasing property.
3661  return Call->getArgOperand(0);
3662  return nullptr;
3663 }
3664 
3666  const CallBase *Call) {
3667  return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
3668  Call->getIntrinsicID() == Intrinsic::strip_invariant_group ||
3669  Call->getIntrinsicID() == Intrinsic::aarch64_irg ||
3670  Call->getIntrinsicID() == Intrinsic::aarch64_tagp;
3671 }
3672 
3673 /// \p PN defines a loop-variant pointer to an object. Check if the
3674 /// previous iteration of the loop was referring to the same object as \p PN.
3676  const LoopInfo *LI) {
3677  // Find the loop-defined value.
3678  Loop *L = LI->getLoopFor(PN->getParent());
3679  if (PN->getNumIncomingValues() != 2)
3680  return true;
3681 
3682  // Find the value from previous iteration.
3683  auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3684  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3685  PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3686  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3687  return true;
3688 
3689  // If a new pointer is loaded in the loop, the pointer references a different
3690  // object in every iteration. E.g.:
3691  // for (i)
3692  // int *p = a[i];
3693  // ...
3694  if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3695  if (!L->isLoopInvariant(Load->getPointerOperand()))
3696  return false;
3697  return true;
3698 }
3699 
3701  unsigned MaxLookup) {
3702  if (!V->getType()->isPointerTy())
3703  return V;
3704  for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3705  if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3706  V = GEP->getPointerOperand();
3707  } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3708  Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3709  V = cast<Operator>(V)->getOperand(0);
3710  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3711  if (GA->isInterposable())
3712  return V;
3713  V = GA->getAliasee();
3714  } else if (isa<AllocaInst>(V)) {
3715  // An alloca can't be further simplified.
3716  return V;
3717  } else {
3718  if (auto *Call = dyn_cast<CallBase>(V)) {
3719  // CaptureTracking can know about special capturing properties of some
3720  // intrinsics like launder.invariant.group, that can't be expressed with
3721  // the attributes, but have properties like returning aliasing pointer.
3722  // Because some analysis may assume that nocaptured pointer is not
3723  // returned from some special intrinsic (because function would have to
3724  // be marked with returns attribute), it is crucial to use this function
3725  // because it should be in sync with CaptureTracking. Not using it may
3726  // cause weird miscompilations where 2 aliasing pointers are assumed to
3727  // noalias.
3728  if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
3729  V = RP;
3730  continue;
3731  }
3732  }
3733 
3734  // See if InstructionSimplify knows any relevant tricks.
3735  if (Instruction *I = dyn_cast<Instruction>(V))
3736  // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3737  if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3738  V = Simplified;
3739  continue;
3740  }
3741 
3742  return V;
3743  }
3744  assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3745  }
3746  return V;
3747 }
3748 
3751  const DataLayout &DL, LoopInfo *LI,
3752  unsigned MaxLookup) {
3755  Worklist.push_back(V);
3756  do {
3757  const Value *P = Worklist.pop_back_val();
3758  P = GetUnderlyingObject(P, DL, MaxLookup);
3759 
3760  if (!Visited.insert(P).second)
3761  continue;
3762 
3763  if (auto *SI = dyn_cast<SelectInst>(P)) {
3764  Worklist.push_back(SI->getTrueValue());
3765  Worklist.push_back(SI->getFalseValue());
3766  continue;
3767  }
3768 
3769  if (auto *PN = dyn_cast<PHINode>(P)) {
3770  // If this PHI changes the underlying object in every iteration of the
3771  // loop, don't look through it. Consider:
3772  // int **A;
3773  // for (i) {
3774  // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3775  // Curr = A[i];
3776  // *Prev, *Curr;
3777  //
3778  // Prev is tracking Curr one iteration behind so they refer to different
3779  // underlying objects.
3780  if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3782  for (Value *IncValue : PN->incoming_values())
3783  Worklist.push_back(IncValue);
3784  continue;
3785  }
3786 
3787  Objects.push_back(P);
3788  } while (!Worklist.empty());
3789 }
3790 
3791 /// This is the function that does the work of looking through basic
3792 /// ptrtoint+arithmetic+inttoptr sequences.
3793 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3794  do {
3795  if (const Operator *U = dyn_cast<Operator>(V)) {
3796  // If we find a ptrtoint, we can transfer control back to the
3797  // regular getUnderlyingObjectFromInt.
3798  if (U->getOpcode() == Instruction::PtrToInt)
3799  return U->getOperand(0);
3800  // If we find an add of a constant, a multiplied value, or a phi, it's
3801  // likely that the other operand will lead us to the base
3802  // object. We don't have to worry about the case where the
3803  // object address is somehow being computed by the multiply,
3804  // because our callers only care when the result is an
3805  // identifiable object.
3806  if (U->getOpcode() != Instruction::Add ||
3807  (!isa<ConstantInt>(U->getOperand(1)) &&
3808  Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3809  !isa<PHINode>(U->getOperand(1))))
3810  return V;
3811  V = U->getOperand(0);
3812  } else {
3813  return V;
3814  }
3815  assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3816  } while (true);
3817 }
3818 
3819 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3820 /// ptrtoint+arithmetic+inttoptr sequences.
3821 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3823  SmallVectorImpl<Value *> &Objects,
3824  const DataLayout &DL) {
3826  SmallVector<const Value *, 4> Working(1, V);
3827  do {
3828  V = Working.pop_back_val();
3829 
3831  GetUnderlyingObjects(V, Objs, DL);
3832 
3833  for (const Value *V : Objs) {
3834  if (!Visited.insert(V).second)
3835  continue;
3836  if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3837  const Value *O =
3838  getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3839  if (O->getType()->isPointerTy()) {
3840  Working.push_back(O);
3841  continue;
3842  }
3843  }
3844  // If GetUnderlyingObjects fails to find an identifiable object,
3845  // getUnderlyingObjectsForCodeGen also fails for safety.
3846  if (!isIdentifiedObject(V)) {
3847  Objects.clear();
3848  return false;
3849  }
3850  Objects.push_back(const_cast<Value *>(V));
3851  }
3852  } while (!Working.empty());
3853  return true;
3854 }
3855 
3856 /// Return true if the only users of this pointer are lifetime markers.
3858  for (const User *U : V->users()) {
3859  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3860  if (!II) return false;
3861 
3862  if (!II->isLifetimeStartOrEnd())
3863  return false;
3864  }
3865  return true;
3866 }
3867 
3869  const Instruction *CtxI,
3870  const DominatorTree *DT) {
3871  const Operator *Inst = dyn_cast<Operator>(V);
3872  if (!Inst)
3873  return false;
3874 
3875  for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3876  if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3877  if (C->canTrap())
3878  return false;
3879 
3880  switch (Inst->getOpcode()) {
3881  default:
3882  return true;
3883  case Instruction::UDiv:
3884  case Instruction::URem: {
3885  // x / y is undefined if y == 0.
3886  const APInt *V;
3887  if (match(Inst->getOperand(1), m_APInt(V)))
3888  return *V != 0;
3889  return false;
3890  }
3891  case Instruction::SDiv:
3892  case Instruction::SRem: {
3893  // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3894  const APInt *Numerator, *Denominator;
3895  if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3896  return false;
3897  // We cannot hoist this division if the denominator is 0.
3898  if (*Denominator == 0)
3899  return false;
3900  // It's safe to hoist if the denominator is not 0 or -1.
3901  if (*Denominator != -1)
3902  return true;
3903  // At this point we know that the denominator is -1. It is safe to hoist as
3904  // long we know that the numerator is not INT_MIN.
3905  if (match(Inst->getOperand(0), m_APInt(Numerator)))
3906  return !Numerator->isMinSignedValue();
3907  // The numerator *might* be MinSignedValue.
3908  return false;
3909  }
3910  case Instruction::Load: {
3911  const LoadInst *LI = cast<LoadInst>(Inst);
3912  if (!LI->isUnordered() ||
3913  // Speculative load may create a race that did not exist in the source.
3914  LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3915  // Speculative load may load data from dirty regions.
3916  LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3917  LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3918  return false;
3919  const DataLayout &DL = LI->getModule()->getDataLayout();
3921  LI->getType(), LI->getAlignment(),
3922  DL, CtxI, DT);
3923  }
3924  case Instruction::Call: {
3925  auto *CI = cast<const CallInst>(Inst);
3926  const Function *Callee = CI->getCalledFunction();
3927 
3928  // The called function could have undefined behavior or side-effects, even
3929  // if marked readnone nounwind.
3930  return Callee && Callee->isSpeculatable();
3931  }
3932  case Instruction::VAArg:
3933  case Instruction::Alloca:
3934  case Instruction::Invoke:
3935  case Instruction::CallBr:
3936  case Instruction::PHI:
3937  case Instruction::Store:
3938  case Instruction::Ret:
3939  case Instruction::Br:
3940  case Instruction::IndirectBr:
3941  case Instruction::Switch:
3942  case Instruction::Unreachable:
3943  case Instruction::Fence:
3944  case Instruction::AtomicRMW:
3945  case Instruction::AtomicCmpXchg:
3946  case Instruction::LandingPad:
3947  case Instruction::Resume:
3948  case Instruction::CatchSwitch:
3949  case Instruction::CatchPad:
3950  case Instruction::CatchRet:
3951  case Instruction::CleanupPad:
3952  case Instruction::CleanupRet:
3953  return false; // Misc instructions which have effects
3954  }
3955 }
3956 
3959 }
3960 
3961 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
3963  switch (OR) {
3972  }
3973  llvm_unreachable("Unknown OverflowResult");
3974 }
3975 
3976 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
3978  const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
3979  AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
3980  OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
3981  KnownBits Known = computeKnownBits(
3982  V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
3983  ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
3984  ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
3987  return CR1.intersectWith(CR2, RangeType);
3988 }
3989 
3991  const Value *LHS, const Value *RHS, const DataLayout &DL,
3992  AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
3993  bool UseInstrInfo) {
3994  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
3995  nullptr, UseInstrInfo);
3996  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
3997  nullptr, UseInstrInfo);
3998  ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
3999  ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4000  return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4001 }
4002 
4005  const DataLayout &DL, AssumptionCache *AC,
4006  const Instruction *CxtI,
4007  const DominatorTree *DT, bool UseInstrInfo) {
4008  // Multiplying n * m significant bits yields a result of n + m significant
4009  // bits. If the total number of significant bits does not exceed the
4010  // result bit width (minus 1), there is no overflow.
4011  // This means if we have enough leading sign bits in the operands
4012  // we can guarantee that the result does not overflow.
4013  // Ref: "Hacker's Delight" by Henry Warren
4014  unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4015 
4016  // Note that underestimating the number of sign bits gives a more
4017  // conservative answer.
4018  unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4019  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4020 
4021  // First handle the easy case: if we have enough sign bits there's
4022  // definitely no overflow.
4023  if (SignBits > BitWidth + 1)
4025 
4026  // There are two ambiguous cases where there can be no overflow:
4027  // SignBits == BitWidth + 1 and
4028  // SignBits == BitWidth
4029  // The second case is difficult to check, therefore we only handle the
4030  // first case.
4031  if (SignBits == BitWidth + 1) {
4032  // It overflows only when both arguments are negative and the true
4033  // product is exactly the minimum negative number.
4034  // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4035  // For simplicity we just check if at least one side is not negative.
4036  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4037  nullptr, UseInstrInfo);
4038  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4039  nullptr, UseInstrInfo);
4040  if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4042  }
4044 }
4045 
4047  const Value *LHS, const Value *RHS, const DataLayout &DL,
4048  AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4049  bool UseInstrInfo) {
4051  LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4052  nullptr, UseInstrInfo);
4054  RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4055  nullptr, UseInstrInfo);
4056  return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4057 }
4058 
4060  const Value *RHS,
4061  const AddOperator *Add,
4062  const DataLayout &DL,
4063  AssumptionCache *AC,
4064  const Instruction *CxtI,
4065  const DominatorTree *DT) {
4066  if (Add && Add->hasNoSignedWrap()) {
4068  }
4069 
4070  // If LHS and RHS each have at least two sign bits, the addition will look
4071  // like
4072  //
4073  // XX..... +
4074  // YY.....
4075  //
4076  // If the carry into the most significant position is 0, X and Y can't both
4077  // be 1 and therefore the carry out of the addition is also 0.
4078  //
4079  // If the carry into the most significant position is 1, X and Y can't both
4080  // be 0 and therefore the carry out of the addition is also 1.
4081  //
4082  // Since the carry into the most significant position is always equal to
4083  // the carry out of the addition, there is no signed overflow.
4084  if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4085  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4087 
4089  LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4091  RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4092  OverflowResult OR =
4093  mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4094  if (OR != OverflowResult::MayOverflow)
4095  return OR;
4096 
4097  // The remaining code needs Add to be available. Early returns if not so.
4098  if (!Add)
4100 
4101  // If the sign of Add is the same as at least one of the operands, this add
4102  // CANNOT overflow. If this can be determined from the known bits of the
4103  // operands the above signedAddMayOverflow() check will have already done so.
4104  // The only other way to improve on the known bits is from an assumption, so
4105  // call computeKnownBitsFromAssume() directly.
4106  bool LHSOrRHSKnownNonNegative =
4107  (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4108  bool LHSOrRHSKnownNegative =
4109  (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4110  if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4111  KnownBits AddKnown(LHSRange.getBitWidth());
4113  Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4114  if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4115  (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4117  }
4118 
4120 }
4121 
4123  const Value *RHS,
4124  const DataLayout &DL,
4125  AssumptionCache *AC,
4126  const Instruction *CxtI,
4127  const DominatorTree *DT) {
4129  LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4131  RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4132  return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4133 }
4134 
4136  const Value *RHS,
4137  const DataLayout &DL,
4138  AssumptionCache *AC,
4139  const Instruction *CxtI,
4140  const DominatorTree *DT) {
4141  // If LHS and RHS each have at least two sign bits, the subtraction
4142  // cannot overflow.
4143  if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4144  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4146 
4148  LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4150  RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4151  return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4152 }
4153 
4155  const DominatorTree &DT) {
4156  SmallVector<const BranchInst *, 2> GuardingBranches;
4158 
4159  for (const User *U : WO->users()) {
4160  if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4161  assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4162 
4163  if (EVI->getIndices()[0] == 0)
4164  Results.push_back(EVI);
4165  else {
4166  assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4167 
4168  for (const auto *U : EVI->users())
4169  if (const auto *B = dyn_cast<BranchInst>(U)) {
4170  assert(B->isConditional() && "How else is it using an i1?");
4171  GuardingBranches.push_back(B);
4172  }
4173  }
4174  } else {
4175  // We are using the aggregate directly in a way we don't want to analyze
4176  // here (storing it to a global, say).
4177  return false;
4178  }
4179  }
4180 
4181  auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4182  BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4183  if (!NoWrapEdge.isSingleEdge())
4184  return false;
4185 
4186  // Check if all users of the add are provably no-wrap.
4187  for (const auto *Result : Results) {
4188  // If the extractvalue itself is not executed on overflow, the we don't
4189  // need to check each use separately, since domination is transitive.
4190  if (DT.dominates(NoWrapEdge, Result->getParent()))
4191  continue;
4192 
4193  for (auto &RU : Result->uses())
4194  if (!DT.dominates(NoWrapEdge, RU))
4195  return false;
4196  }
4197 
4198  return true;
4199  };
4200 
4201  return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4202 }
4203 
4204 
4206  const DataLayout &DL,
4207  AssumptionCache *AC,
4208  const Instruction *CxtI,
4209  const DominatorTree *DT) {
4211  Add, DL, AC, CxtI, DT);
4212 }
4213 
4215  const Value *RHS,
4216  const DataLayout &DL,
4217  AssumptionCache *AC,
4218  const Instruction *CxtI,
4219  const DominatorTree *DT) {
4220  return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4221 }
4222 
4224  // A memory operation returns normally if it isn't volatile. A volatile
4225  // operation is allowed to trap.
4226  //
4227  // An atomic operation isn't guaranteed to return in a reasonable amount of
4228  // time because it's possible for another thread to interfere with it for an
4229  // arbitrary length of time, but programs aren't allowed to rely on that.
4230  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
4231  return !LI->isVolatile();
4232  if (const StoreInst *SI = dyn_cast<StoreInst>(I))
4233  return !SI->isVolatile();
4234  if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
4235  return !CXI->isVolatile();
4236  if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
4237  return !RMWI->isVolatile();
4238  if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
4239  return !MII->isVolatile();
4240 
4241  // If there is no successor, then execution can't transfer to it.
4242  if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4243  return !CRI->unwindsToCaller();
4244  if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4245  return !CatchSwitch->unwindsToCaller();
4246  if (isa<ResumeInst>(I))
4247  return false;
4248  if (isa<ReturnInst>(I))
4249  return false;
4250  if (isa<UnreachableInst>(I))
4251  return false;
4252 
4253  // Calls can throw, or contain an infinite loop, or kill the process.
4254  if (auto CS = ImmutableCallSite(I)) {
4255  // Call sites that throw have implicit non-local control flow.
4256  if (!CS.doesNotThrow())
4257  return false;
4258 
4259  // A function which doens't throw and has "willreturn" attribute will
4260  // always return.
4261  if (CS.hasFnAttr(Attribute::WillReturn))
4262  return true;
4263 
4264  // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4265  // etc. and thus not return. However, LLVM already assumes that
4266  //
4267  // - Thread exiting actions are modeled as writes to memory invisible to
4268  // the program.
4269  //
4270  // - Loops that don't have side effects (side effects are volatile/atomic
4271  // stores and IO) always terminate (see http://llvm.org/PR965).
4272  // Furthermore IO itself is also modeled as writes to memory invisible to
4273  // the program.
4274  //
4275  // We rely on those assumptions here, and use the memory effects of the call
4276  // target as a proxy for checking that it always returns.
4277 
4278  // FIXME: This isn't aggressive enough; a call which only writes to a global
4279  // is guaranteed to return.
4280  return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
4281  match(I, m_Intrinsic<Intrinsic::assume>()) ||
4282  match(I, m_Intrinsic<Intrinsic::sideeffect>()) ||
4283  match(I, m_Intrinsic<Intrinsic::experimental_widenable_condition>());
4284  }
4285 
4286  // Other instructions return normally.
4287  return true;
4288 }
4289 
4291  // TODO: This is slightly conservative for invoke instruction since exiting
4292  // via an exception *is* normal control for them.
4293  for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4295  return false;
4296  return true;
4297 }
4298 
4300  const Loop *L) {
4301  // The loop header is guaranteed to be executed for every iteration.
4302  //
4303  // FIXME: Relax this constraint to cover all basic blocks that are
4304  // guaranteed to be executed at every iteration.
4305  if (I->getParent() != L->getHeader()) return false;
4306 
4307  for (const Instruction &LI : *L->getHeader()) {
4308  if (&LI == I) return true;
4309  if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
4310  }
4311  llvm_unreachable("Instruction not contained in its own parent basic block.");
4312 }
4313 
4315  // TODO: This should include all instructions apart from phis, selects and
4316  // call-like instructions.
4317  switch (I->getOpcode()) {
4318  case Instruction::Add:
4319  case Instruction::Sub:
4320  case Instruction::Xor:
4321  case Instruction::Trunc:
4322  case Instruction::BitCast:
4323  case Instruction::AddrSpaceCast:
4324  case Instruction::Mul:
4325  case Instruction::Shl:
4326  case Instruction::GetElementPtr:
4327  // These operations all propagate poison unconditionally. Note that poison
4328  // is not any particular value, so xor or subtraction of poison with
4329  // itself still yields poison, not zero.
4330  return true;
4331 
4332  case Instruction::AShr:
4333  case Instruction::SExt:
4334  // For these operations, one bit of the input is replicated across
4335  // multiple output bits. A replicated poison bit is still poison.
4336  return true;
4337 
4338  case Instruction::ICmp:
4339  // Comparing poison with any value yields poison. This is why, for
4340  // instance, x s< (x +nsw 1) can be folded to true.
4341  return true;
4342 
4343  default:
4344  return false;
4345  }
4346 }
4347 
4349  switch (I->getOpcode()) {
4350  case Instruction::Store:
4351  return cast<StoreInst>(I)->getPointerOperand();
4352 
4353  case Instruction::Load:
4354  return cast<LoadInst>(I)->getPointerOperand();
4355 
4356  case Instruction::AtomicCmpXchg:
4357  return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4358 
4359  case Instruction::AtomicRMW:
4360  return cast<AtomicRMWInst>(I)->getPointerOperand();
4361 
4362  case Instruction::UDiv:
4363  case Instruction::SDiv:
4364  case Instruction::URem:
4365  case Instruction::SRem:
4366  return I->getOperand(1);
4367 
4368  default:
4369  // Note: It's really tempting to think that a conditional branch or
4370  // switch should be listed here, but that's incorrect. It's not
4371  // branching off of poison which is UB, it is executing a side effecting
4372  // instruction which follows the branch.
4373  return nullptr;
4374  }
4375 }
4376 
4378  const SmallSet<const Value *, 16>& KnownPoison) {
4379  auto *NotPoison = getGuaranteedNonFullPoisonOp(I);
4380  return (NotPoison && KnownPoison.count(NotPoison));
4381 }
4382 
4383 
4385  // We currently only look for uses of poison values within the same basic
4386  // block, as that makes it easier to guarantee that the uses will be
4387  // executed given that PoisonI is executed.
4388  //
4389  // FIXME: Expand this to consider uses beyond the same basic block. To do
4390  // this, look out for the distinction between post-dominance and strong
4391  // post-dominance.
4392  const BasicBlock *BB = PoisonI->getParent();
4393 
4394  // Set of instructions that we have proved will yield poison if PoisonI
4395  // does.
4396  SmallSet<const Value *, 16> YieldsPoison;
4398  YieldsPoison.insert(PoisonI);
4399  Visited.insert(PoisonI->getParent());
4400 
4401  BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4402 
4403  unsigned Iter = 0;
4404  while (Iter++ < MaxDepth) {
4405  for (auto &I : make_range(Begin, End)) {
4406  if (&I != PoisonI) {
4407  if (mustTriggerUB(&I, YieldsPoison))
4408  return true;
4410  return false;
4411  }
4412 
4413  // Mark poison that propagates from I through uses of I.
4414  if (YieldsPoison.count(&I)) {
4415  for (const User *User : I.users()) {
4416  const Instruction *UserI = cast<Instruction>(User);
4417  if (propagatesFullPoison(UserI))
4418  YieldsPoison.insert(User);
4419  }
4420  }
4421  }
4422 
4423  if (auto *NextBB = BB->getSingleSuccessor()) {
4424  if (Visited.insert(NextBB).second) {
4425  BB = NextBB;
4426  Begin = BB->getFirstNonPHI()->getIterator();
4427  End = BB->end();
4428  continue;
4429  }
4430  }
4431 
4432  break;
4433  }
4434  return false;
4435 }
4436 
4437 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4438  if (FMF.noNaNs())
4439  return true;
4440 
4441  if (auto *C = dyn_cast<ConstantFP>(V))
4442  return !C->isNaN();
4443 
4444  if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4445  if (!C->getElementType()->isFloatingPointTy())
4446  return false;
4447  for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4448  if (C->getElementAsAPFloat(I).isNaN())
4449  return false;
4450  }
4451  return true;
4452  }
4453 
4454  return false;
4455 }
4456 
4457 static bool isKnownNonZero(const Value *V) {
4458  if (auto *C = dyn_cast<ConstantFP>(V))
4459  return !C->isZero();
4460 
4461  if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4462  if (!C->getElementType()->isFloatingPointTy())
4463  return false;
4464  for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4465  if (C->getElementAsAPFloat(I).isZero())
4466  return false;
4467  }
4468  return true;
4469  }
4470 
4471  return false;
4472 }
4473 
4474 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4475 /// Given non-min/max outer cmp/select from the clamp pattern this
4476 /// function recognizes if it can be substitued by a "canonical" min/max
4477 /// pattern.
4479  Value *CmpLHS, Value *CmpRHS,
4480  Value *TrueVal, Value *FalseVal,
4481  Value *&LHS, Value *&RHS) {
4482  // Try to match
4483  // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4484  // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4485  // and return description of the outer Max/Min.
4486 
4487  // First, check if select has inverse order:
4488  if (CmpRHS == FalseVal) {
4489  std::swap(TrueVal, FalseVal);
4490  Pred = CmpInst::getInversePredicate(Pred);
4491  }
4492 
4493  // Assume success now. If there's no match, callers should not use these anyway.
4494  LHS = TrueVal;
4495  RHS = FalseVal;
4496 
4497  const APFloat *FC1;
4498  if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4499  return {SPF_UNKNOWN, SPNB_NA, false};
4500 
4501  const APFloat *FC2;
4502  switch (Pred) {
4503  case CmpInst::FCMP_OLT:
4504  case CmpInst::FCMP_OLE:
4505  case CmpInst::FCMP_ULT:
4506  case CmpInst::FCMP_ULE:
4507  if (match(FalseVal,
4508  m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4509  m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4510  FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4511  return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4512  break;
4513  case CmpInst::FCMP_OGT:
4514  case CmpInst::FCMP_OGE:
4515  case CmpInst::FCMP_UGT:
4516  case CmpInst::FCMP_UGE:
4517  if (match(FalseVal,
4518  m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4519  m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4520  FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4521  return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4522  break;
4523  default:
4524  break;
4525  }
4526 
4527  return {SPF_UNKNOWN, SPNB_NA, false};
4528 }
4529 
4530 /// Recognize variations of:
4531 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4533  Value *CmpLHS, Value *CmpRHS,
4534  Value *TrueVal, Value *FalseVal) {
4535  // Swap the select operands and predicate to match the patterns below.
4536  if (CmpRHS != TrueVal) {
4537  Pred = ICmpInst::getSwappedPredicate(Pred);
4538  std::swap(TrueVal, FalseVal);
4539  }
4540  const APInt *C1;
4541  if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4542  const APInt *C2;
4543  // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4544  if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4545  C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4546  return {SPF_SMAX, SPNB_NA, false};
4547 
4548  // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4549  if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4550  C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4551  return {SPF_SMIN, SPNB_NA, false};
4552 
4553  // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4554  if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4555  C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4556  return {SPF_UMAX, SPNB_NA, false};
4557 
4558  // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4559  if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4560  C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4561  return {SPF_UMIN, SPNB_NA, false};
4562  }
4563  return {SPF_UNKNOWN, SPNB_NA, false};
4564 }
4565 
4566 /// Recognize variations of:
4567 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4569  Value *CmpLHS, Value *CmpRHS,
4570  Value *TVal, Value *FVal,
4571  unsigned Depth) {
4572  // TODO: Allow FP min/max with nnan/nsz.
4573  assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4574 
4575  Value *A, *B;
4576  SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4578  return {SPF_UNKNOWN, SPNB_NA, false};
4579 
4580  Value *C, *D;
4581  SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4582  if (L.Flavor != R.Flavor)
4583  return {SPF_UNKNOWN, SPNB_NA, false};
4584 
4585  // We have something like: x Pred y ? min(a, b) : min(c, d).
4586  // Try to match the compare to the min/max operations of the select operands.
4587  // First, make sure we have the right compare predicate.
4588  switch (L.Flavor) {
4589  case SPF_SMIN:
4590  if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4591  Pred = ICmpInst::getSwappedPredicate(Pred);
4592  std::swap(CmpLHS, CmpRHS);
4593  }
4594  if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4595  break;
4596  return {SPF_UNKNOWN, SPNB_NA, false};
4597  case SPF_SMAX:
4598  if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4599  Pred = ICmpInst::getSwappedPredicate(Pred);
4600  std::swap(CmpLHS, CmpRHS);
4601  }
4602  if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4603  break;
4604  return {SPF_UNKNOWN, SPNB_NA, false};
4605  case SPF_UMIN:
4606  if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4607  Pred = ICmpInst::getSwappedPredicate(Pred);
4608  std::swap(CmpLHS, CmpRHS);
4609  }
4610  if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4611  break;
4612  return {SPF_UNKNOWN, SPNB_NA, false};
4613  case SPF_UMAX:
4614  if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4615  Pred = ICmpInst::getSwappedPredicate(Pred);
4616  std::swap(CmpLHS, CmpRHS);
4617  }
4618  if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4619  break;
4620  return {SPF_UNKNOWN, SPNB_NA, false};
4621  default:
4622  return {SPF_UNKNOWN, SPNB_NA, false};
4623  }
4624 
4625  // If there is a common operand in the already matched min/max and the other
4626  // min/max operands match the compare operands (either directly or inverted),
4627  // then this is min/max of the same flavor.
4628 
4629  // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4630  // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4631  if (D == B) {
4632  if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4633  match(A, m_Not(m_Specific(CmpRHS)))))
4634  return {L.Flavor, SPNB_NA, false};
4635  }
4636  // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4637  // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4638  if (C == B) {
4639  if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4640  match(A, m_Not(m_Specific(CmpRHS)))))
4641  return {L.Flavor, SPNB_NA, false};
4642  }
4643  // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4644  // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4645  if (D == A) {
4646  if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4647  match(B, m_Not(m_Specific(CmpRHS)))))
4648  return {L.Flavor, SPNB_NA, false};
4649  }
4650  // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4651  // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4652  if (C == A) {
4653  if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4654  match(B, m_Not(m_Specific(CmpRHS)))))
4655  return {L.Flavor, SPNB_NA, false};
4656  }
4657 
4658  return {SPF_UNKNOWN, SPNB_NA, false};
4659 }
4660 
4661 /// Match non-obvious integer minimum and maximum sequences.
4663  Value *CmpLHS, Value *CmpRHS,
4664  Value *TrueVal, Value *FalseVal,
4665  Value *&LHS, Value *&RHS,
4666  unsigned Depth) {
4667  // Assume success. If there's no match, callers should not use these anyway.
4668  LHS = TrueVal;
4669  RHS = FalseVal;
4670 
4671  SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4673  return SPR;
4674 
4675  SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4677  return SPR;
4678 
4679  if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4680  return {SPF_UNKNOWN, SPNB_NA, false};
4681 
4682  // Z = X -nsw Y
4683  // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4684  // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4685  if (match(TrueVal, m_Zero()) &&
4686  match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4687  return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4688 
4689  // Z = X -nsw Y
4690  // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4691  // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4692  if (match(FalseVal, m_Zero()) &&
4693  match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4694  return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4695 
4696  const APInt *C1;
4697  if (!match(CmpRHS, m_APInt(C1)))
4698  return {SPF_UNKNOWN, SPNB_NA, false};
4699 
4700  // An unsigned min/max can be written with a signed compare.
4701  const APInt *C2;
4702  if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4703  (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4704  // Is the sign bit set?
4705  // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4706  // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4707  if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4708  C2->isMaxSignedValue())
4709  return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4710 
4711  // Is the sign bit clear?
4712  // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4713  // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4714  if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4715  C2->isMinSignedValue())
4716  return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4717  }
4718 
4719  // Look through 'not' ops to find disguised signed min/max.
4720  // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4721  // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4722  if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4723  match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4724  return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4725 
4726  // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4727  // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4728  if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4729  match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4730  return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4731 
4732  return {SPF_UNKNOWN, SPNB_NA, false};
4733 }
4734 
4735 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
4736  assert(X && Y && "Invalid operand");
4737 
4738  // X = sub (0, Y) || X = sub nsw (0, Y)
4739  if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
4740  (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
4741  return true;
4742 
4743  // Y = sub (0, X) || Y = sub nsw (0, X)
4744  if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
4745  (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
4746  return true;
4747 
4748  // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
4749  Value *A, *B;
4750  return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
4751  match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
4752  (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
4753  match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
4754 }
4755 
4757  FastMathFlags FMF,
4758  Value *CmpLHS, Value *CmpRHS,
4759  Value *TrueVal, Value *FalseVal,
4760  Value *&LHS, Value *&RHS,
4761  unsigned Depth) {
4762  if (CmpInst::isFPPredicate(Pred)) {
4763  // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
4764  // 0.0 operand, set the compare's 0.0 operands to that same value for the
4765  // purpose of identifying min/max. Disregard vector constants with undefined
4766  // elements because those can not be back-propagated for analysis.
4767  Value *OutputZeroVal = nullptr;
4768  if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
4769  !cast<Constant>(TrueVal)->containsUndefElement())
4770  OutputZeroVal = TrueVal;
4771  else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
4772  !cast<Constant>(FalseVal)->containsUndefElement())
4773  OutputZeroVal = FalseVal;
4774 
4775  if (OutputZeroVal) {
4776  if (match(CmpLHS, m_AnyZeroFP()))
4777  CmpLHS = OutputZeroVal;
4778  if (match(CmpRHS, m_AnyZeroFP()))
4779  CmpRHS = OutputZeroVal;
4780  }
4781  }
4782 
4783  LHS = CmpLHS;
4784  RHS = CmpRHS;
4785 
4786  // Signed zero may return inconsistent results between implementations.
4787  // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4788  // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4789  // Therefore, we behave conservatively and only proceed if at least one of the
4790  // operands is known to not be zero or if we don't care about signed zero.
4791  switch (Pred) {
4792  default: break;
4793  // FIXME: Include OGT/OLT/UGT/ULT.
4796  if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4797  !isKnownNonZero(CmpRHS))
4798  return {SPF_UNKNOWN, SPNB_NA, false};
4799  }
4800 
4801  SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4802  bool Ordered = false;
4803 
4804  // When given one NaN and one non-NaN input:
4805  // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4806  // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4807  // ordered comparison fails), which could be NaN or non-NaN.
4808  // so here we discover exactly what NaN behavior is required/accepted.
4809  if (CmpInst::isFPPredicate(Pred)) {
4810  bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4811  bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4812 
4813  if (LHSSafe && RHSSafe) {
4814  // Both operands are known non-NaN.
4815  NaNBehavior = SPNB_RETURNS_ANY;
4816  } else if (CmpInst::isOrdered(Pred)) {
4817  // An ordered comparison will return false when given a NaN, so it
4818  // returns the RHS.
4819  Ordered = true;
4820  if (LHSSafe)
4821  // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4822  NaNBehavior = SPNB_RETURNS_NAN;
4823  else if (RHSSafe)
4824  NaNBehavior = SPNB_RETURNS_OTHER;
4825  else
4826  // Completely unsafe.
4827  return {SPF_UNKNOWN, SPNB_NA, false};
4828  } else {
4829  Ordered = false;
4830  // An unordered comparison will return true when given a NaN, so it
4831  // returns the LHS.
4832  if (LHSSafe)
4833  // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4834  NaNBehavior = SPNB_RETURNS_OTHER;
4835  else if (RHSSafe)
4836  NaNBehavior = SPNB_RETURNS_NAN;
4837  else
4838  // Completely unsafe.
4839  return {SPF_UNKNOWN, SPNB_NA, false};
4840  }
4841  }
4842 
4843  if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4844  std::swap(CmpLHS, CmpRHS);
4845  Pred = CmpInst::getSwappedPredicate(Pred);
4846  if (NaNBehavior == SPNB_RETURNS_NAN)
4847  NaNBehavior = SPNB_RETURNS_OTHER;
4848  else if (NaNBehavior == SPNB_RETURNS_OTHER)
4849  NaNBehavior = SPNB_RETURNS_NAN;
4850  Ordered = !Ordered;
4851  }
4852 
4853  // ([if]cmp X, Y) ? X : Y
4854  if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4855  switch (Pred) {
4856  default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4857  case ICmpInst::ICMP_UGT:
4858  case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4859  case ICmpInst::ICMP_SGT:
4860  case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4861  case ICmpInst::ICMP_ULT:
4862  case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4863  case ICmpInst::ICMP_SLT:
4864  case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4865  case FCmpInst::FCMP_UGT:
4866  case FCmpInst::FCMP_UGE:
4867  case FCmpInst::FCMP_OGT:
4868  case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4869  case FCmpInst::FCMP_ULT:
4870  case FCmpInst::FCMP_ULE:
4871  case FCmpInst::FCMP_OLT:
4872  case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4873  }
4874  }
4875 
4876  if (isKnownNegation(TrueVal, FalseVal)) {
4877  // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
4878  // match against either LHS or sext(LHS).
4879  auto MaybeSExtCmpLHS =
4880  m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
4881  auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
4882  auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
4883  if (match(TrueVal, MaybeSExtCmpLHS)) {
4884  // Set the return values. If the compare uses the negated value (-X >s 0),
4885  // swap the return values because the negated value is always 'RHS'.
4886  LHS = TrueVal;
4887  RHS = FalseVal;
4888  if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
4889  std::swap(LHS, RHS);
4890 
4891  // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
4892  // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
4893  if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4894  return {SPF_ABS, SPNB_NA, false};
4895 
4896  // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
4897  if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
4898  return {SPF_ABS, SPNB_NA, false};
4899 
4900  // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
4901  // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
4902  if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4903  return {SPF_NABS, SPNB_NA, false};
4904  }
4905  else if (match(FalseVal, MaybeSExtCmpLHS)) {
4906  // Set the return values. If the compare uses the negated value (-X >s 0),
4907  // swap the return values because the negated value is always 'RHS'.
4908  LHS = FalseVal;
4909  RHS = TrueVal;
4910  if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
4911  std::swap(LHS, RHS);
4912 
4913  // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
4914  // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
4915  if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4916  return {SPF_NABS, SPNB_NA, false};
4917 
4918  // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
4919  // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
4920  if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4921  return {SPF_ABS, SPNB_NA, false};
4922  }
4923  }
4924 
4925  if (CmpInst::isIntPredicate(Pred))
4926  return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4927 
4928  // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4929  // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4930  // semantics than minNum. Be conservative in such case.
4931  if (NaNBehavior != SPNB_RETURNS_ANY ||
4932  (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4933  !isKnownNonZero(CmpRHS)))
4934  return {SPF_UNKNOWN, SPNB_NA, false};
4935 
4936  return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4937 }
4938 
4939 /// Helps to match a select pattern in case of a type mismatch.
4940 ///
4941 /// The function processes the case when type of true and false values of a
4942 /// select instruction differs from type of the cmp instruction operands because
4943 /// of a cast instruction. The function checks if it is legal to move the cast
4944 /// operation after "select". If yes, it returns the new second value of
4945 /// "select" (with the assumption that cast is moved):
4946 /// 1. As operand of cast instruction when both values of "select" are same cast
4947 /// instructions.
4948 /// 2. As restored constant (by applying reverse cast operation) when the first
4949 /// value of the "select" is a cast operation and the second value is a
4950 /// constant.
4951 /// NOTE: We return only the new second value because the first value could be
4952 /// accessed as operand of cast instruction.
4953 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4954  Instruction::CastOps *CastOp) {
4955  auto *Cast1 = dyn_cast<CastInst>(V1);
4956  if (!Cast1)
4957  return nullptr;
4958 
4959  *CastOp = Cast1->getOpcode();
4960  Type *SrcTy = Cast1->getSrcTy();
4961  if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4962  // If V1 and V2 are both the same cast from the same type, look through V1.
4963  if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4964  return Cast2->getOperand(0);
4965  return nullptr;
4966  }
4967 
4968  auto *C = dyn_cast<Constant>(V2);
4969  if (!C)
4970  return nullptr;
4971 
4972  Constant *CastedTo = nullptr;
4973  switch (*CastOp) {
4974  case Instruction::ZExt:
4975  if (CmpI->isUnsigned())
4976  CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4977  break;
4978  case Instruction::SExt:
4979  if (CmpI->isSigned())
4980  CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4981  break;
4982  case Instruction::Trunc:
4983  Constant *CmpConst;
4984  if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4985  CmpConst->getType() == SrcTy) {
4986  // Here we have the following case:
4987  //
4988  // %cond = cmp iN %x, CmpConst
4989  // %tr = trunc iN %x to iK
4990  // %narrowsel = select i1 %cond, iK %t, iK C
4991  //
4992  // We can always move trunc after select operation:
4993  //
4994  // %cond = cmp iN %x, CmpConst
4995  // %widesel = select i1 %cond, iN %x, iN CmpConst
4996  // %tr = trunc iN %widesel to iK
4997  //
4998  // Note that C could be extended in any way because we don't care about
4999  // upper bits after truncation. It can't be abs pattern, because it would
5000  // look like:
5001  //
5002  // select i1 %cond, x, -x.
5003  //
5004  // So only min/max pattern could be matched. Such match requires widened C
5005  // == CmpConst. That is why set widened C = CmpConst, condition trunc
5006  // CmpConst == C is checked below.
5007  CastedTo = CmpConst;
5008  } else {
5009  CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5010  }
5011  break;
5012  case Instruction::FPTrunc:
5013  CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5014  break;
5015  case Instruction::FPExt:
5016  CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5017  break;
5018  case Instruction::FPToUI:
5019  CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5020  break;
5021  case Instruction::FPToSI:
5022  CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5023  break;
5024  case Instruction::UIToFP:
5025  CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5026  break;
5027  case Instruction::SIToFP:
5028  CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5029  break;
5030  default:
5031  break;
5032  }
5033 
5034  if (!CastedTo)
5035  return nullptr;
5036 
5037  // Make sure the cast doesn't lose any information.
5038  Constant *CastedBack =
5039  ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5040  if (CastedBack != C)
5041  return nullptr;
5042 
5043  return CastedTo;
5044 }
5045 
5047  Instruction::CastOps *CastOp,
5048  unsigned Depth) {
5049  if (Depth >= MaxDepth)
5050  return {SPF_UNKNOWN, SPNB_NA, false};
5051 
5053  if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5054 
5055  CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5056  if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5057 
5058  Value *TrueVal = SI->getTrueValue();
5059  Value *FalseVal = SI->getFalseValue();
5060 
5061  return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5062  CastOp, Depth);
5063 }
5064 
5066  CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5067  Instruction::CastOps *CastOp, unsigned Depth) {
5068  CmpInst::Predicate Pred = CmpI->getPredicate();
5069  Value *CmpLHS = CmpI->getOperand(0);
5070  Value *CmpRHS = CmpI->getOperand(1);
5071  FastMathFlags FMF;
5072  if (isa<FPMathOperator>(CmpI))
5073  FMF = CmpI->getFastMathFlags();
5074 
5075  // Bail out early.
5076  if (CmpI->isEquality())
5077  return {SPF_UNKNOWN, SPNB_NA, false};
5078 
5079  // Deal with type mismatches.
5080  if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5081  if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5082  // If this is a potential fmin/fmax with a cast to integer, then ignore
5083  // -0.0 because there is no corresponding integer value.
5084  if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5085  FMF.setNoSignedZeros();
5086  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5087  cast<CastInst>(TrueVal)->getOperand(0), C,
5088  LHS, RHS, Depth);
5089  }
5090  if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5091  // If this is a potential fmin/fmax with a cast to integer, then ignore
5092  // -0.0 because there is no corresponding integer value.
5093  if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5094  FMF.setNoSignedZeros();
5095  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5096  C, cast<CastInst>(FalseVal)->getOperand(0),
5097  LHS, RHS, Depth);
5098  }
5099  }
5100  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5101  LHS, RHS, Depth);
5102 }
5103 
5105  if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5106  if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5107  if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5108  if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5109  if (SPF == SPF_FMINNUM)
5110  return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5111  if (SPF == SPF_FMAXNUM)
5112  return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5113  llvm_unreachable("unhandled!");
5114 }
5115 
5117  if (SPF == SPF_SMIN) return SPF_SMAX;
5118  if (SPF == SPF_UMIN) return SPF_UMAX;
5119  if (SPF == SPF_SMAX) return SPF_SMIN;
5120  if (SPF == SPF_UMAX) return SPF_UMIN;
5121  llvm_unreachable("unhandled!");
5122 }
5123 
5126 }
5127 
5128 /// Return true if "icmp Pred LHS RHS" is always true.
5129 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5130  const Value *RHS, const DataLayout &DL,
5131  unsigned Depth) {
5132  assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5133  if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5134  return true;
5135 
5136  switch (Pred) {
5137  default:
5138  return false;
5139 
5140  case CmpInst::ICMP_SLE: {
5141  const APInt *C;
5142 
5143  // LHS s<= LHS +_{nsw} C if C >= 0
5144  if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5145  return !C->isNegative();
5146  return false;
5147  }
5148 
5149  case CmpInst::ICMP_ULE: {
5150  const APInt *C;
5151 
5152  // LHS u<= LHS +_{nuw} C for any C
5153  if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5154  return true;
5155 
5156  // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5157  auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5158  const Value *&X,
5159  const APInt *&CA, const APInt *&CB) {
5160  if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&