LLVM  7.0.0svn
ValueTracking.cpp
Go to the documentation of this file.
1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Operator.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/User.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/KnownBits.h"
69 #include <algorithm>
70 #include <array>
71 #include <cassert>
72 #include <cstdint>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 using namespace llvm::PatternMatch;
78 
79 const unsigned MaxDepth = 6;
80 
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84  cl::Hidden, cl::init(20));
85 
86 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
87 /// returns the element type's bitwidth.
88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
89  if (unsigned BitWidth = Ty->getScalarSizeInBits())
90  return BitWidth;
91 
92  return DL.getIndexTypeSizeInBits(Ty);
93 }
94 
95 namespace {
96 
97 // Simplifying using an assume can only be done in a particular control-flow
98 // context (the context instruction provides that context). If an assume and
99 // the context instruction are not in the same block then the DT helps in
100 // figuring out if we can use it.
101 struct Query {
102  const DataLayout &DL;
103  AssumptionCache *AC;
104  const Instruction *CxtI;
105  const DominatorTree *DT;
106 
107  // Unlike the other analyses, this may be a nullptr because not all clients
108  // provide it currently.
110 
111  /// Set of assumptions that should be excluded from further queries.
112  /// This is because of the potential for mutual recursion to cause
113  /// computeKnownBits to repeatedly visit the same assume intrinsic. The
114  /// classic case of this is assume(x = y), which will attempt to determine
115  /// bits in x from bits in y, which will attempt to determine bits in y from
116  /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
117  /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
118  /// (all of which can call computeKnownBits), and so on.
119  std::array<const Value *, MaxDepth> Excluded;
120 
121  unsigned NumExcluded = 0;
122 
123  Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
124  const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
125  : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {}
126 
127  Query(const Query &Q, const Value *NewExcl)
128  : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
129  NumExcluded(Q.NumExcluded) {
130  Excluded = Q.Excluded;
131  Excluded[NumExcluded++] = NewExcl;
132  assert(NumExcluded <= Excluded.size());
133  }
134 
135  bool isExcluded(const Value *Value) const {
136  if (NumExcluded == 0)
137  return false;
138  auto End = Excluded.begin() + NumExcluded;
139  return std::find(Excluded.begin(), End, Value) != End;
140  }
141 };
142 
143 } // end anonymous namespace
144 
145 // Given the provided Value and, potentially, a context instruction, return
146 // the preferred context instruction (if any).
147 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
148  // If we've been provided with a context instruction, then use that (provided
149  // it has been inserted).
150  if (CxtI && CxtI->getParent())
151  return CxtI;
152 
153  // If the value is really an already-inserted instruction, then use that.
154  CxtI = dyn_cast<Instruction>(V);
155  if (CxtI && CxtI->getParent())
156  return CxtI;
157 
158  return nullptr;
159 }
160 
161 static void computeKnownBits(const Value *V, KnownBits &Known,
162  unsigned Depth, const Query &Q);
163 
164 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
165  const DataLayout &DL, unsigned Depth,
166  AssumptionCache *AC, const Instruction *CxtI,
167  const DominatorTree *DT,
169  ::computeKnownBits(V, Known, Depth,
170  Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
171 }
172 
173 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
174  const Query &Q);
175 
177  unsigned Depth, AssumptionCache *AC,
178  const Instruction *CxtI,
179  const DominatorTree *DT,
181  return ::computeKnownBits(V, Depth,
182  Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
183 }
184 
185 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
186  const DataLayout &DL,
187  AssumptionCache *AC, const Instruction *CxtI,
188  const DominatorTree *DT) {
189  assert(LHS->getType() == RHS->getType() &&
190  "LHS and RHS should have the same type");
191  assert(LHS->getType()->isIntOrIntVectorTy() &&
192  "LHS and RHS should be integers");
193  // Look for an inverted mask: (X & ~M) op (Y & M).
194  Value *M;
195  if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
196  match(RHS, m_c_And(m_Specific(M), m_Value())))
197  return true;
198  if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
199  match(LHS, m_c_And(m_Specific(M), m_Value())))
200  return true;
201  IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
202  KnownBits LHSKnown(IT->getBitWidth());
203  KnownBits RHSKnown(IT->getBitWidth());
204  computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
205  computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
206  return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
207 }
208 
210  for (const User *U : CxtI->users()) {
211  if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
212  if (IC->isEquality())
213  if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
214  if (C->isNullValue())
215  continue;
216  return false;
217  }
218  return true;
219 }
220 
221 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
222  const Query &Q);
223 
225  bool OrZero,
226  unsigned Depth, AssumptionCache *AC,
227  const Instruction *CxtI,
228  const DominatorTree *DT) {
229  return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
230  Query(DL, AC, safeCxtI(V, CxtI), DT));
231 }
232 
233 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
234 
235 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
236  AssumptionCache *AC, const Instruction *CxtI,
237  const DominatorTree *DT) {
238  return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
239 }
240 
241 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
242  unsigned Depth,
243  AssumptionCache *AC, const Instruction *CxtI,
244  const DominatorTree *DT) {
245  KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
246  return Known.isNonNegative();
247 }
248 
249 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
250  AssumptionCache *AC, const Instruction *CxtI,
251  const DominatorTree *DT) {
252  if (auto *CI = dyn_cast<ConstantInt>(V))
253  return CI->getValue().isStrictlyPositive();
254 
255  // TODO: We'd doing two recursive queries here. We should factor this such
256  // that only a single query is needed.
257  return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
258  isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
259 }
260 
261 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
262  AssumptionCache *AC, const Instruction *CxtI,
263  const DominatorTree *DT) {
264  KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
265  return Known.isNegative();
266 }
267 
268 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
269 
270 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
271  const DataLayout &DL,
272  AssumptionCache *AC, const Instruction *CxtI,
273  const DominatorTree *DT) {
274  return ::isKnownNonEqual(V1, V2, Query(DL, AC,
275  safeCxtI(V1, safeCxtI(V2, CxtI)),
276  DT));
277 }
278 
279 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
280  const Query &Q);
281 
282 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
283  const DataLayout &DL,
284  unsigned Depth, AssumptionCache *AC,
285  const Instruction *CxtI, const DominatorTree *DT) {
286  return ::MaskedValueIsZero(V, Mask, Depth,
287  Query(DL, AC, safeCxtI(V, CxtI), DT));
288 }
289 
290 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
291  const Query &Q);
292 
293 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
294  unsigned Depth, AssumptionCache *AC,
295  const Instruction *CxtI,
296  const DominatorTree *DT) {
297  return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
298 }
299 
300 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
301  bool NSW,
302  KnownBits &KnownOut, KnownBits &Known2,
303  unsigned Depth, const Query &Q) {
304  unsigned BitWidth = KnownOut.getBitWidth();
305 
306  // If an initial sequence of bits in the result is not needed, the
307  // corresponding bits in the operands are not needed.
308  KnownBits LHSKnown(BitWidth);
309  computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
310  computeKnownBits(Op1, Known2, Depth + 1, Q);
311 
312  KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
313 }
314 
315 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
316  KnownBits &Known, KnownBits &Known2,
317  unsigned Depth, const Query &Q) {
318  unsigned BitWidth = Known.getBitWidth();
319  computeKnownBits(Op1, Known, Depth + 1, Q);
320  computeKnownBits(Op0, Known2, Depth + 1, Q);
321 
322  bool isKnownNegative = false;
323  bool isKnownNonNegative = false;
324  // If the multiplication is known not to overflow, compute the sign bit.
325  if (NSW) {
326  if (Op0 == Op1) {
327  // The product of a number with itself is non-negative.
328  isKnownNonNegative = true;
329  } else {
330  bool isKnownNonNegativeOp1 = Known.isNonNegative();
331  bool isKnownNonNegativeOp0 = Known2.isNonNegative();
332  bool isKnownNegativeOp1 = Known.isNegative();
333  bool isKnownNegativeOp0 = Known2.isNegative();
334  // The product of two numbers with the same sign is non-negative.
335  isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
336  (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
337  // The product of a negative number and a non-negative number is either
338  // negative or zero.
339  if (!isKnownNonNegative)
340  isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
341  isKnownNonZero(Op0, Depth, Q)) ||
342  (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
343  isKnownNonZero(Op1, Depth, Q));
344  }
345  }
346 
347  assert(!Known.hasConflict() && !Known2.hasConflict());
348  // Compute a conservative estimate for high known-0 bits.
349  unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
350  Known2.countMinLeadingZeros(),
351  BitWidth) - BitWidth;
352  LeadZ = std::min(LeadZ, BitWidth);
353 
354  // The result of the bottom bits of an integer multiply can be
355  // inferred by looking at the bottom bits of both operands and
356  // multiplying them together.
357  // We can infer at least the minimum number of known trailing bits
358  // of both operands. Depending on number of trailing zeros, we can
359  // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
360  // a and b are divisible by m and n respectively.
361  // We then calculate how many of those bits are inferrable and set
362  // the output. For example, the i8 mul:
363  // a = XXXX1100 (12)
364  // b = XXXX1110 (14)
365  // We know the bottom 3 bits are zero since the first can be divided by
366  // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
367  // Applying the multiplication to the trimmed arguments gets:
368  // XX11 (3)
369  // X111 (7)
370  // -------
371  // XX11
372  // XX11
373  // XX11
374  // XX11
375  // -------
376  // XXXXX01
377  // Which allows us to infer the 2 LSBs. Since we're multiplying the result
378  // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
379  // The proof for this can be described as:
380  // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
381  // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
382  // umin(countTrailingZeros(C2), C6) +
383  // umin(C5 - umin(countTrailingZeros(C1), C5),
384  // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
385  // %aa = shl i8 %a, C5
386  // %bb = shl i8 %b, C6
387  // %aaa = or i8 %aa, C1
388  // %bbb = or i8 %bb, C2
389  // %mul = mul i8 %aaa, %bbb
390  // %mask = and i8 %mul, C7
391  // =>
392  // %mask = i8 ((C1*C2)&C7)
393  // Where C5, C6 describe the known bits of %a, %b
394  // C1, C2 describe the known bottom bits of %a, %b.
395  // C7 describes the mask of the known bits of the result.
396  APInt Bottom0 = Known.One;
397  APInt Bottom1 = Known2.One;
398 
399  // How many times we'd be able to divide each argument by 2 (shr by 1).
400  // This gives us the number of trailing zeros on the multiplication result.
401  unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
402  unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
403  unsigned TrailZero0 = Known.countMinTrailingZeros();
404  unsigned TrailZero1 = Known2.countMinTrailingZeros();
405  unsigned TrailZ = TrailZero0 + TrailZero1;
406 
407  // Figure out the fewest known-bits operand.
408  unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
409  TrailBitsKnown1 - TrailZero1);
410  unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
411 
412  APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
413  Bottom1.getLoBits(TrailBitsKnown1);
414 
415  Known.resetAll();
416  Known.Zero.setHighBits(LeadZ);
417  Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
418  Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
419 
420  // Only make use of no-wrap flags if we failed to compute the sign bit
421  // directly. This matters if the multiplication always overflows, in
422  // which case we prefer to follow the result of the direct computation,
423  // though as the program is invoking undefined behaviour we can choose
424  // whatever we like here.
425  if (isKnownNonNegative && !Known.isNegative())
426  Known.makeNonNegative();
427  else if (isKnownNegative && !Known.isNonNegative())
428  Known.makeNegative();
429 }
430 
432  KnownBits &Known) {
433  unsigned BitWidth = Known.getBitWidth();
434  unsigned NumRanges = Ranges.getNumOperands() / 2;
435  assert(NumRanges >= 1);
436 
437  Known.Zero.setAllBits();
438  Known.One.setAllBits();
439 
440  for (unsigned i = 0; i < NumRanges; ++i) {
441  ConstantInt *Lower =
442  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
443  ConstantInt *Upper =
444  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
445  ConstantRange Range(Lower->getValue(), Upper->getValue());
446 
447  // The first CommonPrefixBits of all values in Range are equal.
448  unsigned CommonPrefixBits =
449  (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
450 
451  APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
452  Known.One &= Range.getUnsignedMax() & Mask;
453  Known.Zero &= ~Range.getUnsignedMax() & Mask;
454  }
455 }
456 
457 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
458  SmallVector<const Value *, 16> WorkSet(1, I);
461 
462  // The instruction defining an assumption's condition itself is always
463  // considered ephemeral to that assumption (even if it has other
464  // non-ephemeral users). See r246696's test case for an example.
465  if (is_contained(I->operands(), E))
466  return true;
467 
468  while (!WorkSet.empty()) {
469  const Value *V = WorkSet.pop_back_val();
470  if (!Visited.insert(V).second)
471  continue;
472 
473  // If all uses of this value are ephemeral, then so is this value.
474  if (llvm::all_of(V->users(), [&](const User *U) {
475  return EphValues.count(U);
476  })) {
477  if (V == E)
478  return true;
479 
480  if (V == I || isSafeToSpeculativelyExecute(V)) {
481  EphValues.insert(V);
482  if (const User *U = dyn_cast<User>(V))
483  for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
484  J != JE; ++J)
485  WorkSet.push_back(*J);
486  }
487  }
488  }
489 
490  return false;
491 }
492 
493 // Is this an intrinsic that cannot be speculated but also cannot trap?
495  if (const CallInst *CI = dyn_cast<CallInst>(I))
496  if (Function *F = CI->getCalledFunction())
497  switch (F->getIntrinsicID()) {
498  default: break;
499  // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
500  case Intrinsic::assume:
501  case Intrinsic::sideeffect:
502  case Intrinsic::dbg_declare:
503  case Intrinsic::dbg_value:
504  case Intrinsic::dbg_label:
505  case Intrinsic::invariant_start:
506  case Intrinsic::invariant_end:
507  case Intrinsic::lifetime_start:
508  case Intrinsic::lifetime_end:
509  case Intrinsic::objectsize:
510  case Intrinsic::ptr_annotation:
511  case Intrinsic::var_annotation:
512  return true;
513  }
514 
515  return false;
516 }
517 
519  const Instruction *CxtI,
520  const DominatorTree *DT) {
521  // There are two restrictions on the use of an assume:
522  // 1. The assume must dominate the context (or the control flow must
523  // reach the assume whenever it reaches the context).
524  // 2. The context must not be in the assume's set of ephemeral values
525  // (otherwise we will use the assume to prove that the condition
526  // feeding the assume is trivially true, thus causing the removal of
527  // the assume).
528 
529  if (DT) {
530  if (DT->dominates(Inv, CxtI))
531  return true;
532  } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
533  // We don't have a DT, but this trivially dominates.
534  return true;
535  }
536 
537  // With or without a DT, the only remaining case we will check is if the
538  // instructions are in the same BB. Give up if that is not the case.
539  if (Inv->getParent() != CxtI->getParent())
540  return false;
541 
542  // If we have a dom tree, then we now know that the assume doesn't dominate
543  // the other instruction. If we don't have a dom tree then we can check if
544  // the assume is first in the BB.
545  if (!DT) {
546  // Search forward from the assume until we reach the context (or the end
547  // of the block); the common case is that the assume will come first.
548  for (auto I = std::next(BasicBlock::const_iterator(Inv)),
549  IE = Inv->getParent()->end(); I != IE; ++I)
550  if (&*I == CxtI)
551  return true;
552  }
553 
554  // The context comes first, but they're both in the same block. Make sure
555  // there is nothing in between that might interrupt the control flow.
557  std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
558  I != IE; ++I)
560  return false;
561 
562  return !isEphemeralValueOf(Inv, CxtI);
563 }
564 
565 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
566  unsigned Depth, const Query &Q) {
567  // Use of assumptions is context-sensitive. If we don't have a context, we
568  // cannot use them!
569  if (!Q.AC || !Q.CxtI)
570  return;
571 
572  unsigned BitWidth = Known.getBitWidth();
573 
574  // Note that the patterns below need to be kept in sync with the code
575  // in AssumptionCache::updateAffectedValues.
576 
577  for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
578  if (!AssumeVH)
579  continue;
580  CallInst *I = cast<CallInst>(AssumeVH);
581  assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
582  "Got assumption for the wrong function!");
583  if (Q.isExcluded(I))
584  continue;
585 
586  // Warning: This loop can end up being somewhat performance sensitive.
587  // We're running this loop for once for each value queried resulting in a
588  // runtime of ~O(#assumes * #values).
589 
590  assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
591  "must be an assume intrinsic");
592 
593  Value *Arg = I->getArgOperand(0);
594 
595  if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
596  assert(BitWidth == 1 && "assume operand is not i1?");
597  Known.setAllOnes();
598  return;
599  }
600  if (match(Arg, m_Not(m_Specific(V))) &&
601  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
602  assert(BitWidth == 1 && "assume operand is not i1?");
603  Known.setAllZero();
604  return;
605  }
606 
607  // The remaining tests are all recursive, so bail out if we hit the limit.
608  if (Depth == MaxDepth)
609  continue;
610 
611  Value *A, *B;
612  auto m_V = m_CombineOr(m_Specific(V),
614  m_BitCast(m_Specific(V))));
615 
616  CmpInst::Predicate Pred;
617  uint64_t C;
618  // assume(v = a)
619  if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
620  Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
621  KnownBits RHSKnown(BitWidth);
622  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
623  Known.Zero |= RHSKnown.Zero;
624  Known.One |= RHSKnown.One;
625  // assume(v & b = a)
626  } else if (match(Arg,
627  m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
628  Pred == ICmpInst::ICMP_EQ &&
629  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
630  KnownBits RHSKnown(BitWidth);
631  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
632  KnownBits MaskKnown(BitWidth);
633  computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
634 
635  // For those bits in the mask that are known to be one, we can propagate
636  // known bits from the RHS to V.
637  Known.Zero |= RHSKnown.Zero & MaskKnown.One;
638  Known.One |= RHSKnown.One & MaskKnown.One;
639  // assume(~(v & b) = a)
640  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
641  m_Value(A))) &&
642  Pred == ICmpInst::ICMP_EQ &&
643  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
644  KnownBits RHSKnown(BitWidth);
645  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
646  KnownBits MaskKnown(BitWidth);
647  computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
648 
649  // For those bits in the mask that are known to be one, we can propagate
650  // inverted known bits from the RHS to V.
651  Known.Zero |= RHSKnown.One & MaskKnown.One;
652  Known.One |= RHSKnown.Zero & MaskKnown.One;
653  // assume(v | b = a)
654  } else if (match(Arg,
655  m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
656  Pred == ICmpInst::ICMP_EQ &&
657  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
658  KnownBits RHSKnown(BitWidth);
659  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
660  KnownBits BKnown(BitWidth);
661  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
662 
663  // For those bits in B that are known to be zero, we can propagate known
664  // bits from the RHS to V.
665  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
666  Known.One |= RHSKnown.One & BKnown.Zero;
667  // assume(~(v | b) = a)
668  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
669  m_Value(A))) &&
670  Pred == ICmpInst::ICMP_EQ &&
671  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
672  KnownBits RHSKnown(BitWidth);
673  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
674  KnownBits BKnown(BitWidth);
675  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
676 
677  // For those bits in B that are known to be zero, we can propagate
678  // inverted known bits from the RHS to V.
679  Known.Zero |= RHSKnown.One & BKnown.Zero;
680  Known.One |= RHSKnown.Zero & BKnown.Zero;
681  // assume(v ^ b = a)
682  } else if (match(Arg,
683  m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
684  Pred == ICmpInst::ICMP_EQ &&
685  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
686  KnownBits RHSKnown(BitWidth);
687  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
688  KnownBits BKnown(BitWidth);
689  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
690 
691  // For those bits in B that are known to be zero, we can propagate known
692  // bits from the RHS to V. For those bits in B that are known to be one,
693  // we can propagate inverted known bits from the RHS to V.
694  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
695  Known.One |= RHSKnown.One & BKnown.Zero;
696  Known.Zero |= RHSKnown.One & BKnown.One;
697  Known.One |= RHSKnown.Zero & BKnown.One;
698  // assume(~(v ^ b) = a)
699  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
700  m_Value(A))) &&
701  Pred == ICmpInst::ICMP_EQ &&
702  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
703  KnownBits RHSKnown(BitWidth);
704  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
705  KnownBits BKnown(BitWidth);
706  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
707 
708  // For those bits in B that are known to be zero, we can propagate
709  // inverted known bits from the RHS to V. For those bits in B that are
710  // known to be one, we can propagate known bits from the RHS to V.
711  Known.Zero |= RHSKnown.One & BKnown.Zero;
712  Known.One |= RHSKnown.Zero & BKnown.Zero;
713  Known.Zero |= RHSKnown.Zero & BKnown.One;
714  Known.One |= RHSKnown.One & BKnown.One;
715  // assume(v << c = a)
716  } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
717  m_Value(A))) &&
718  Pred == ICmpInst::ICMP_EQ &&
719  isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
720  C < BitWidth) {
721  KnownBits RHSKnown(BitWidth);
722  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
723  // For those bits in RHS that are known, we can propagate them to known
724  // bits in V shifted to the right by C.
725  RHSKnown.Zero.lshrInPlace(C);
726  Known.Zero |= RHSKnown.Zero;
727  RHSKnown.One.lshrInPlace(C);
728  Known.One |= RHSKnown.One;
729  // assume(~(v << c) = a)
730  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
731  m_Value(A))) &&
732  Pred == ICmpInst::ICMP_EQ &&
733  isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
734  C < BitWidth) {
735  KnownBits RHSKnown(BitWidth);
736  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
737  // For those bits in RHS that are known, we can propagate them inverted
738  // to known bits in V shifted to the right by C.
739  RHSKnown.One.lshrInPlace(C);
740  Known.Zero |= RHSKnown.One;
741  RHSKnown.Zero.lshrInPlace(C);
742  Known.One |= RHSKnown.Zero;
743  // assume(v >> c = a)
744  } else if (match(Arg,
745  m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
746  m_Value(A))) &&
747  Pred == ICmpInst::ICMP_EQ &&
748  isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
749  C < BitWidth) {
750  KnownBits RHSKnown(BitWidth);
751  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
752  // For those bits in RHS that are known, we can propagate them to known
753  // bits in V shifted to the right by C.
754  Known.Zero |= RHSKnown.Zero << C;
755  Known.One |= RHSKnown.One << C;
756  // assume(~(v >> c) = a)
757  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
758  m_Value(A))) &&
759  Pred == ICmpInst::ICMP_EQ &&
760  isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
761  C < BitWidth) {
762  KnownBits RHSKnown(BitWidth);
763  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
764  // For those bits in RHS that are known, we can propagate them inverted
765  // to known bits in V shifted to the right by C.
766  Known.Zero |= RHSKnown.One << C;
767  Known.One |= RHSKnown.Zero << C;
768  // assume(v >=_s c) where c is non-negative
769  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
770  Pred == ICmpInst::ICMP_SGE &&
771  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
772  KnownBits RHSKnown(BitWidth);
773  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
774 
775  if (RHSKnown.isNonNegative()) {
776  // We know that the sign bit is zero.
777  Known.makeNonNegative();
778  }
779  // assume(v >_s c) where c is at least -1.
780  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
781  Pred == ICmpInst::ICMP_SGT &&
782  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
783  KnownBits RHSKnown(BitWidth);
784  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
785 
786  if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
787  // We know that the sign bit is zero.
788  Known.makeNonNegative();
789  }
790  // assume(v <=_s c) where c is negative
791  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
792  Pred == ICmpInst::ICMP_SLE &&
793  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
794  KnownBits RHSKnown(BitWidth);
795  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
796 
797  if (RHSKnown.isNegative()) {
798  // We know that the sign bit is one.
799  Known.makeNegative();
800  }
801  // assume(v <_s c) where c is non-positive
802  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
803  Pred == ICmpInst::ICMP_SLT &&
804  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
805  KnownBits RHSKnown(BitWidth);
806  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
807 
808  if (RHSKnown.isZero() || RHSKnown.isNegative()) {
809  // We know that the sign bit is one.
810  Known.makeNegative();
811  }
812  // assume(v <=_u c)
813  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
814  Pred == ICmpInst::ICMP_ULE &&
815  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
816  KnownBits RHSKnown(BitWidth);
817  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
818 
819  // Whatever high bits in c are zero are known to be zero.
820  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
821  // assume(v <_u c)
822  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
823  Pred == ICmpInst::ICMP_ULT &&
824  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
825  KnownBits RHSKnown(BitWidth);
826  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
827 
828  // If the RHS is known zero, then this assumption must be wrong (nothing
829  // is unsigned less than zero). Signal a conflict and get out of here.
830  if (RHSKnown.isZero()) {
831  Known.Zero.setAllBits();
832  Known.One.setAllBits();
833  break;
834  }
835 
836  // Whatever high bits in c are zero are known to be zero (if c is a power
837  // of 2, then one more).
838  if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
839  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
840  else
841  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
842  }
843  }
844 
845  // If assumptions conflict with each other or previous known bits, then we
846  // have a logical fallacy. It's possible that the assumption is not reachable,
847  // so this isn't a real bug. On the other hand, the program may have undefined
848  // behavior, or we might have a bug in the compiler. We can't assert/crash, so
849  // clear out the known bits, try to warn the user, and hope for the best.
850  if (Known.Zero.intersects(Known.One)) {
851  Known.resetAll();
852 
853  if (Q.ORE)
854  Q.ORE->emit([&]() {
855  auto *CxtI = const_cast<Instruction *>(Q.CxtI);
856  return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
857  CxtI)
858  << "Detected conflicting code assumptions. Program may "
859  "have undefined behavior, or compiler may have "
860  "internal error.";
861  });
862  }
863 }
864 
865 /// Compute known bits from a shift operator, including those with a
866 /// non-constant shift amount. Known is the output of this function. Known2 is a
867 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
868 /// operator-specific functions that, given the known-zero or known-one bits
869 /// respectively, and a shift amount, compute the implied known-zero or
870 /// known-one bits of the shift operator's result respectively for that shift
871 /// amount. The results from calling KZF and KOF are conservatively combined for
872 /// all permitted shift amounts.
874  const Operator *I, KnownBits &Known, KnownBits &Known2,
875  unsigned Depth, const Query &Q,
876  function_ref<APInt(const APInt &, unsigned)> KZF,
877  function_ref<APInt(const APInt &, unsigned)> KOF) {
878  unsigned BitWidth = Known.getBitWidth();
879 
880  if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
881  unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
882 
883  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
884  Known.Zero = KZF(Known.Zero, ShiftAmt);
885  Known.One = KOF(Known.One, ShiftAmt);
886  // If the known bits conflict, this must be an overflowing left shift, so
887  // the shift result is poison. We can return anything we want. Choose 0 for
888  // the best folding opportunity.
889  if (Known.hasConflict())
890  Known.setAllZero();
891 
892  return;
893  }
894 
895  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
896 
897  // If the shift amount could be greater than or equal to the bit-width of the
898  // LHS, the value could be poison, but bail out because the check below is
899  // expensive. TODO: Should we just carry on?
900  if ((~Known.Zero).uge(BitWidth)) {
901  Known.resetAll();
902  return;
903  }
904 
905  // Note: We cannot use Known.Zero.getLimitedValue() here, because if
906  // BitWidth > 64 and any upper bits are known, we'll end up returning the
907  // limit value (which implies all bits are known).
908  uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
909  uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
910 
911  // It would be more-clearly correct to use the two temporaries for this
912  // calculation. Reusing the APInts here to prevent unnecessary allocations.
913  Known.resetAll();
914 
915  // If we know the shifter operand is nonzero, we can sometimes infer more
916  // known bits. However this is expensive to compute, so be lazy about it and
917  // only compute it when absolutely necessary.
918  Optional<bool> ShifterOperandIsNonZero;
919 
920  // Early exit if we can't constrain any well-defined shift amount.
921  if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
922  !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
923  ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
924  if (!*ShifterOperandIsNonZero)
925  return;
926  }
927 
928  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
929 
930  Known.Zero.setAllBits();
931  Known.One.setAllBits();
932  for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
933  // Combine the shifted known input bits only for those shift amounts
934  // compatible with its known constraints.
935  if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
936  continue;
937  if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
938  continue;
939  // If we know the shifter is nonzero, we may be able to infer more known
940  // bits. This check is sunk down as far as possible to avoid the expensive
941  // call to isKnownNonZero if the cheaper checks above fail.
942  if (ShiftAmt == 0) {
943  if (!ShifterOperandIsNonZero.hasValue())
944  ShifterOperandIsNonZero =
945  isKnownNonZero(I->getOperand(1), Depth + 1, Q);
946  if (*ShifterOperandIsNonZero)
947  continue;
948  }
949 
950  Known.Zero &= KZF(Known2.Zero, ShiftAmt);
951  Known.One &= KOF(Known2.One, ShiftAmt);
952  }
953 
954  // If the known bits conflict, the result is poison. Return a 0 and hope the
955  // caller can further optimize that.
956  if (Known.hasConflict())
957  Known.setAllZero();
958 }
959 
960 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
961  unsigned Depth, const Query &Q) {
962  unsigned BitWidth = Known.getBitWidth();
963 
964  KnownBits Known2(Known);
965  switch (I->getOpcode()) {
966  default: break;
967  case Instruction::Load:
968  if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
970  break;
971  case Instruction::And: {
972  // If either the LHS or the RHS are Zero, the result is zero.
973  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
974  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
975 
976  // Output known-1 bits are only known if set in both the LHS & RHS.
977  Known.One &= Known2.One;
978  // Output known-0 are known to be clear if zero in either the LHS | RHS.
979  Known.Zero |= Known2.Zero;
980 
981  // and(x, add (x, -1)) is a common idiom that always clears the low bit;
982  // here we handle the more general case of adding any odd number by
983  // matching the form add(x, add(x, y)) where y is odd.
984  // TODO: This could be generalized to clearing any bit set in y where the
985  // following bit is known to be unset in y.
986  Value *X = nullptr, *Y = nullptr;
987  if (!Known.Zero[0] && !Known.One[0] &&
988  match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
989  Known2.resetAll();
990  computeKnownBits(Y, Known2, Depth + 1, Q);
991  if (Known2.countMinTrailingOnes() > 0)
992  Known.Zero.setBit(0);
993  }
994  break;
995  }
996  case Instruction::Or:
997  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
998  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
999 
1000  // Output known-0 bits are only known if clear in both the LHS & RHS.
1001  Known.Zero &= Known2.Zero;
1002  // Output known-1 are known to be set if set in either the LHS | RHS.
1003  Known.One |= Known2.One;
1004  break;
1005  case Instruction::Xor: {
1006  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1007  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1008 
1009  // Output known-0 bits are known if clear or set in both the LHS & RHS.
1010  APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1011  // Output known-1 are known to be set if set in only one of the LHS, RHS.
1012  Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1013  Known.Zero = std::move(KnownZeroOut);
1014  break;
1015  }
1016  case Instruction::Mul: {
1017  bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1018  computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1019  Known2, Depth, Q);
1020  break;
1021  }
1022  case Instruction::UDiv: {
1023  // For the purposes of computing leading zeros we can conservatively
1024  // treat a udiv as a logical right shift by the power of 2 known to
1025  // be less than the denominator.
1026  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1027  unsigned LeadZ = Known2.countMinLeadingZeros();
1028 
1029  Known2.resetAll();
1030  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1031  unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1032  if (RHSMaxLeadingZeros != BitWidth)
1033  LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1034 
1035  Known.Zero.setHighBits(LeadZ);
1036  break;
1037  }
1038  case Instruction::Select: {
1039  const Value *LHS, *RHS;
1040  SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1042  computeKnownBits(RHS, Known, Depth + 1, Q);
1043  computeKnownBits(LHS, Known2, Depth + 1, Q);
1044  } else {
1045  computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1046  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1047  }
1048 
1049  unsigned MaxHighOnes = 0;
1050  unsigned MaxHighZeros = 0;
1051  if (SPF == SPF_SMAX) {
1052  // If both sides are negative, the result is negative.
1053  if (Known.isNegative() && Known2.isNegative())
1054  // We can derive a lower bound on the result by taking the max of the
1055  // leading one bits.
1056  MaxHighOnes =
1058  // If either side is non-negative, the result is non-negative.
1059  else if (Known.isNonNegative() || Known2.isNonNegative())
1060  MaxHighZeros = 1;
1061  } else if (SPF == SPF_SMIN) {
1062  // If both sides are non-negative, the result is non-negative.
1063  if (Known.isNonNegative() && Known2.isNonNegative())
1064  // We can derive an upper bound on the result by taking the max of the
1065  // leading zero bits.
1066  MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1067  Known2.countMinLeadingZeros());
1068  // If either side is negative, the result is negative.
1069  else if (Known.isNegative() || Known2.isNegative())
1070  MaxHighOnes = 1;
1071  } else if (SPF == SPF_UMAX) {
1072  // We can derive a lower bound on the result by taking the max of the
1073  // leading one bits.
1074  MaxHighOnes =
1076  } else if (SPF == SPF_UMIN) {
1077  // We can derive an upper bound on the result by taking the max of the
1078  // leading zero bits.
1079  MaxHighZeros =
1081  } else if (SPF == SPF_ABS) {
1082  // RHS from matchSelectPattern returns the negation part of abs pattern.
1083  // If the negate has an NSW flag we can assume the sign bit of the result
1084  // will be 0 because that makes abs(INT_MIN) undefined.
1085  if (cast<Instruction>(RHS)->hasNoSignedWrap())
1086  MaxHighZeros = 1;
1087  }
1088 
1089  // Only known if known in both the LHS and RHS.
1090  Known.One &= Known2.One;
1091  Known.Zero &= Known2.Zero;
1092  if (MaxHighOnes > 0)
1093  Known.One.setHighBits(MaxHighOnes);
1094  if (MaxHighZeros > 0)
1095  Known.Zero.setHighBits(MaxHighZeros);
1096  break;
1097  }
1098  case Instruction::FPTrunc:
1099  case Instruction::FPExt:
1100  case Instruction::FPToUI:
1101  case Instruction::FPToSI:
1102  case Instruction::SIToFP:
1103  case Instruction::UIToFP:
1104  break; // Can't work with floating point.
1105  case Instruction::PtrToInt:
1106  case Instruction::IntToPtr:
1107  // Fall through and handle them the same as zext/trunc.
1109  case Instruction::ZExt:
1110  case Instruction::Trunc: {
1111  Type *SrcTy = I->getOperand(0)->getType();
1112 
1113  unsigned SrcBitWidth;
1114  // Note that we handle pointer operands here because of inttoptr/ptrtoint
1115  // which fall through here.
1116  Type *ScalarTy = SrcTy->getScalarType();
1117  SrcBitWidth = ScalarTy->isPointerTy() ?
1118  Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1119  Q.DL.getTypeSizeInBits(ScalarTy);
1120 
1121  assert(SrcBitWidth && "SrcBitWidth can't be zero");
1122  Known = Known.zextOrTrunc(SrcBitWidth);
1123  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1124  Known = Known.zextOrTrunc(BitWidth);
1125  // Any top bits are known to be zero.
1126  if (BitWidth > SrcBitWidth)
1127  Known.Zero.setBitsFrom(SrcBitWidth);
1128  break;
1129  }
1130  case Instruction::BitCast: {
1131  Type *SrcTy = I->getOperand(0)->getType();
1132  if (SrcTy->isIntOrPtrTy() &&
1133  // TODO: For now, not handling conversions like:
1134  // (bitcast i64 %x to <2 x i32>)
1135  !I->getType()->isVectorTy()) {
1136  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1137  break;
1138  }
1139  break;
1140  }
1141  case Instruction::SExt: {
1142  // Compute the bits in the result that are not present in the input.
1143  unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1144 
1145  Known = Known.trunc(SrcBitWidth);
1146  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1147  // If the sign bit of the input is known set or clear, then we know the
1148  // top bits of the result.
1149  Known = Known.sext(BitWidth);
1150  break;
1151  }
1152  case Instruction::Shl: {
1153  // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1154  bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1155  auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1156  APInt KZResult = KnownZero << ShiftAmt;
1157  KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1158  // If this shift has "nsw" keyword, then the result is either a poison
1159  // value or has the same sign bit as the first operand.
1160  if (NSW && KnownZero.isSignBitSet())
1161  KZResult.setSignBit();
1162  return KZResult;
1163  };
1164 
1165  auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1166  APInt KOResult = KnownOne << ShiftAmt;
1167  if (NSW && KnownOne.isSignBitSet())
1168  KOResult.setSignBit();
1169  return KOResult;
1170  };
1171 
1172  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1173  break;
1174  }
1175  case Instruction::LShr: {
1176  // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1177  auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1178  APInt KZResult = KnownZero.lshr(ShiftAmt);
1179  // High bits known zero.
1180  KZResult.setHighBits(ShiftAmt);
1181  return KZResult;
1182  };
1183 
1184  auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1185  return KnownOne.lshr(ShiftAmt);
1186  };
1187 
1188  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1189  break;
1190  }
1191  case Instruction::AShr: {
1192  // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1193  auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1194  return KnownZero.ashr(ShiftAmt);
1195  };
1196 
1197  auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1198  return KnownOne.ashr(ShiftAmt);
1199  };
1200 
1201  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1202  break;
1203  }
1204  case Instruction::Sub: {
1205  bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1206  computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1207  Known, Known2, Depth, Q);
1208  break;
1209  }
1210  case Instruction::Add: {
1211  bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1212  computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1213  Known, Known2, Depth, Q);
1214  break;
1215  }
1216  case Instruction::SRem:
1217  if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1218  APInt RA = Rem->getValue().abs();
1219  if (RA.isPowerOf2()) {
1220  APInt LowBits = RA - 1;
1221  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1222 
1223  // The low bits of the first operand are unchanged by the srem.
1224  Known.Zero = Known2.Zero & LowBits;
1225  Known.One = Known2.One & LowBits;
1226 
1227  // If the first operand is non-negative or has all low bits zero, then
1228  // the upper bits are all zero.
1229  if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1230  Known.Zero |= ~LowBits;
1231 
1232  // If the first operand is negative and not all low bits are zero, then
1233  // the upper bits are all one.
1234  if (Known2.isNegative() && LowBits.intersects(Known2.One))
1235  Known.One |= ~LowBits;
1236 
1237  assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1238  break;
1239  }
1240  }
1241 
1242  // The sign bit is the LHS's sign bit, except when the result of the
1243  // remainder is zero.
1244  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1245  // If it's known zero, our sign bit is also zero.
1246  if (Known2.isNonNegative())
1247  Known.makeNonNegative();
1248 
1249  break;
1250  case Instruction::URem: {
1251  if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1252  const APInt &RA = Rem->getValue();
1253  if (RA.isPowerOf2()) {
1254  APInt LowBits = (RA - 1);
1255  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1256  Known.Zero |= ~LowBits;
1257  Known.One &= LowBits;
1258  break;
1259  }
1260  }
1261 
1262  // Since the result is less than or equal to either operand, any leading
1263  // zero bits in either operand must also exist in the result.
1264  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1265  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1266 
1267  unsigned Leaders =
1269  Known.resetAll();
1270  Known.Zero.setHighBits(Leaders);
1271  break;
1272  }
1273 
1274  case Instruction::Alloca: {
1275  const AllocaInst *AI = cast<AllocaInst>(I);
1276  unsigned Align = AI->getAlignment();
1277  if (Align == 0)
1278  Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1279 
1280  if (Align > 0)
1281  Known.Zero.setLowBits(countTrailingZeros(Align));
1282  break;
1283  }
1284  case Instruction::GetElementPtr: {
1285  // Analyze all of the subscripts of this getelementptr instruction
1286  // to determine if we can prove known low zero bits.
1287  KnownBits LocalKnown(BitWidth);
1288  computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1289  unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1290 
1292  for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1293  Value *Index = I->getOperand(i);
1294  if (StructType *STy = GTI.getStructTypeOrNull()) {
1295  // Handle struct member offset arithmetic.
1296 
1297  // Handle case when index is vector zeroinitializer
1298  Constant *CIndex = cast<Constant>(Index);
1299  if (CIndex->isZeroValue())
1300  continue;
1301 
1302  if (CIndex->getType()->isVectorTy())
1303  Index = CIndex->getSplatValue();
1304 
1305  unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1306  const StructLayout *SL = Q.DL.getStructLayout(STy);
1307  uint64_t Offset = SL->getElementOffset(Idx);
1308  TrailZ = std::min<unsigned>(TrailZ,
1309  countTrailingZeros(Offset));
1310  } else {
1311  // Handle array index arithmetic.
1312  Type *IndexedTy = GTI.getIndexedType();
1313  if (!IndexedTy->isSized()) {
1314  TrailZ = 0;
1315  break;
1316  }
1317  unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1318  uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1319  LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1320  computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1321  TrailZ = std::min(TrailZ,
1322  unsigned(countTrailingZeros(TypeSize) +
1323  LocalKnown.countMinTrailingZeros()));
1324  }
1325  }
1326 
1327  Known.Zero.setLowBits(TrailZ);
1328  break;
1329  }
1330  case Instruction::PHI: {
1331  const PHINode *P = cast<PHINode>(I);
1332  // Handle the case of a simple two-predecessor recurrence PHI.
1333  // There's a lot more that could theoretically be done here, but
1334  // this is sufficient to catch some interesting cases.
1335  if (P->getNumIncomingValues() == 2) {
1336  for (unsigned i = 0; i != 2; ++i) {
1337  Value *L = P->getIncomingValue(i);
1338  Value *R = P->getIncomingValue(!i);
1339  Operator *LU = dyn_cast<Operator>(L);
1340  if (!LU)
1341  continue;
1342  unsigned Opcode = LU->getOpcode();
1343  // Check for operations that have the property that if
1344  // both their operands have low zero bits, the result
1345  // will have low zero bits.
1346  if (Opcode == Instruction::Add ||
1347  Opcode == Instruction::Sub ||
1348  Opcode == Instruction::And ||
1349  Opcode == Instruction::Or ||
1350  Opcode == Instruction::Mul) {
1351  Value *LL = LU->getOperand(0);
1352  Value *LR = LU->getOperand(1);
1353  // Find a recurrence.
1354  if (LL == I)
1355  L = LR;
1356  else if (LR == I)
1357  L = LL;
1358  else
1359  break;
1360  // Ok, we have a PHI of the form L op= R. Check for low
1361  // zero bits.
1362  computeKnownBits(R, Known2, Depth + 1, Q);
1363 
1364  // We need to take the minimum number of known bits
1365  KnownBits Known3(Known);
1366  computeKnownBits(L, Known3, Depth + 1, Q);
1367 
1368  Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1369  Known3.countMinTrailingZeros()));
1370 
1371  auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1372  if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1373  // If initial value of recurrence is nonnegative, and we are adding
1374  // a nonnegative number with nsw, the result can only be nonnegative
1375  // or poison value regardless of the number of times we execute the
1376  // add in phi recurrence. If initial value is negative and we are
1377  // adding a negative number with nsw, the result can only be
1378  // negative or poison value. Similar arguments apply to sub and mul.
1379  //
1380  // (add non-negative, non-negative) --> non-negative
1381  // (add negative, negative) --> negative
1382  if (Opcode == Instruction::Add) {
1383  if (Known2.isNonNegative() && Known3.isNonNegative())
1384  Known.makeNonNegative();
1385  else if (Known2.isNegative() && Known3.isNegative())
1386  Known.makeNegative();
1387  }
1388 
1389  // (sub nsw non-negative, negative) --> non-negative
1390  // (sub nsw negative, non-negative) --> negative
1391  else if (Opcode == Instruction::Sub && LL == I) {
1392  if (Known2.isNonNegative() && Known3.isNegative())
1393  Known.makeNonNegative();
1394  else if (Known2.isNegative() && Known3.isNonNegative())
1395  Known.makeNegative();
1396  }
1397 
1398  // (mul nsw non-negative, non-negative) --> non-negative
1399  else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1400  Known3.isNonNegative())
1401  Known.makeNonNegative();
1402  }
1403 
1404  break;
1405  }
1406  }
1407  }
1408 
1409  // Unreachable blocks may have zero-operand PHI nodes.
1410  if (P->getNumIncomingValues() == 0)
1411  break;
1412 
1413  // Otherwise take the unions of the known bit sets of the operands,
1414  // taking conservative care to avoid excessive recursion.
1415  if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1416  // Skip if every incoming value references to ourself.
1417  if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1418  break;
1419 
1420  Known.Zero.setAllBits();
1421  Known.One.setAllBits();
1422  for (Value *IncValue : P->incoming_values()) {
1423  // Skip direct self references.
1424  if (IncValue == P) continue;
1425 
1426  Known2 = KnownBits(BitWidth);
1427  // Recurse, but cap the recursion to one level, because we don't
1428  // want to waste time spinning around in loops.
1429  computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1430  Known.Zero &= Known2.Zero;
1431  Known.One &= Known2.One;
1432  // If all bits have been ruled out, there's no need to check
1433  // more operands.
1434  if (!Known.Zero && !Known.One)
1435  break;
1436  }
1437  }
1438  break;
1439  }
1440  case Instruction::Call:
1441  case Instruction::Invoke:
1442  // If range metadata is attached to this call, set known bits from that,
1443  // and then intersect with known bits based on other properties of the
1444  // function.
1445  if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1447  if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1448  computeKnownBits(RV, Known2, Depth + 1, Q);
1449  Known.Zero |= Known2.Zero;
1450  Known.One |= Known2.One;
1451  }
1452  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1453  switch (II->getIntrinsicID()) {
1454  default: break;
1455  case Intrinsic::bitreverse:
1456  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1457  Known.Zero |= Known2.Zero.reverseBits();
1458  Known.One |= Known2.One.reverseBits();
1459  break;
1460  case Intrinsic::bswap:
1461  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1462  Known.Zero |= Known2.Zero.byteSwap();
1463  Known.One |= Known2.One.byteSwap();
1464  break;
1465  case Intrinsic::ctlz: {
1466  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1467  // If we have a known 1, its position is our upper bound.
1468  unsigned PossibleLZ = Known2.One.countLeadingZeros();
1469  // If this call is undefined for 0, the result will be less than 2^n.
1470  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1471  PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1472  unsigned LowBits = Log2_32(PossibleLZ)+1;
1473  Known.Zero.setBitsFrom(LowBits);
1474  break;
1475  }
1476  case Intrinsic::cttz: {
1477  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1478  // If we have a known 1, its position is our upper bound.
1479  unsigned PossibleTZ = Known2.One.countTrailingZeros();
1480  // If this call is undefined for 0, the result will be less than 2^n.
1481  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1482  PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1483  unsigned LowBits = Log2_32(PossibleTZ)+1;
1484  Known.Zero.setBitsFrom(LowBits);
1485  break;
1486  }
1487  case Intrinsic::ctpop: {
1488  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1489  // We can bound the space the count needs. Also, bits known to be zero
1490  // can't contribute to the population.
1491  unsigned BitsPossiblySet = Known2.countMaxPopulation();
1492  unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1493  Known.Zero.setBitsFrom(LowBits);
1494  // TODO: we could bound KnownOne using the lower bound on the number
1495  // of bits which might be set provided by popcnt KnownOne2.
1496  break;
1497  }
1498  case Intrinsic::x86_sse42_crc32_64_64:
1499  Known.Zero.setBitsFrom(32);
1500  break;
1501  }
1502  }
1503  break;
1504  case Instruction::ExtractElement:
1505  // Look through extract element. At the moment we keep this simple and skip
1506  // tracking the specific element. But at least we might find information
1507  // valid for all elements of the vector (for example if vector is sign
1508  // extended, shifted, etc).
1509  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1510  break;
1511  case Instruction::ExtractValue:
1512  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1513  const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1514  if (EVI->getNumIndices() != 1) break;
1515  if (EVI->getIndices()[0] == 0) {
1516  switch (II->getIntrinsicID()) {
1517  default: break;
1518  case Intrinsic::uadd_with_overflow:
1519  case Intrinsic::sadd_with_overflow:
1520  computeKnownBitsAddSub(true, II->getArgOperand(0),
1521  II->getArgOperand(1), false, Known, Known2,
1522  Depth, Q);
1523  break;
1524  case Intrinsic::usub_with_overflow:
1525  case Intrinsic::ssub_with_overflow:
1526  computeKnownBitsAddSub(false, II->getArgOperand(0),
1527  II->getArgOperand(1), false, Known, Known2,
1528  Depth, Q);
1529  break;
1530  case Intrinsic::umul_with_overflow:
1531  case Intrinsic::smul_with_overflow:
1532  computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1533  Known, Known2, Depth, Q);
1534  break;
1535  }
1536  }
1537  }
1538  }
1539 }
1540 
1541 /// Determine which bits of V are known to be either zero or one and return
1542 /// them.
1543 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1544  KnownBits Known(getBitWidth(V->getType(), Q.DL));
1545  computeKnownBits(V, Known, Depth, Q);
1546  return Known;
1547 }
1548 
1549 /// Determine which bits of V are known to be either zero or one and return
1550 /// them in the Known bit set.
1551 ///
1552 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1553 /// we cannot optimize based on the assumption that it is zero without changing
1554 /// it to be an explicit zero. If we don't change it to zero, other code could
1555 /// optimized based on the contradictory assumption that it is non-zero.
1556 /// Because instcombine aggressively folds operations with undef args anyway,
1557 /// this won't lose us code quality.
1558 ///
1559 /// This function is defined on values with integer type, values with pointer
1560 /// type, and vectors of integers. In the case
1561 /// where V is a vector, known zero, and known one values are the
1562 /// same width as the vector element, and the bit is set only if it is true
1563 /// for all of the elements in the vector.
1564 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1565  const Query &Q) {
1566  assert(V && "No Value?");
1567  assert(Depth <= MaxDepth && "Limit Search Depth");
1568  unsigned BitWidth = Known.getBitWidth();
1569 
1570  assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1571  V->getType()->isPtrOrPtrVectorTy()) &&
1572  "Not integer or pointer type!");
1573 
1574  Type *ScalarTy = V->getType()->getScalarType();
1575  unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1576  Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1577  assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
1578  (void)BitWidth;
1579  (void)ExpectedWidth;
1580 
1581  const APInt *C;
1582  if (match(V, m_APInt(C))) {
1583  // We know all of the bits for a scalar constant or a splat vector constant!
1584  Known.One = *C;
1585  Known.Zero = ~Known.One;
1586  return;
1587  }
1588  // Null and aggregate-zero are all-zeros.
1589  if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1590  Known.setAllZero();
1591  return;
1592  }
1593  // Handle a constant vector by taking the intersection of the known bits of
1594  // each element.
1595  if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1596  // We know that CDS must be a vector of integers. Take the intersection of
1597  // each element.
1598  Known.Zero.setAllBits(); Known.One.setAllBits();
1599  for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1600  APInt Elt = CDS->getElementAsAPInt(i);
1601  Known.Zero &= ~Elt;
1602  Known.One &= Elt;
1603  }
1604  return;
1605  }
1606 
1607  if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1608  // We know that CV must be a vector of integers. Take the intersection of
1609  // each element.
1610  Known.Zero.setAllBits(); Known.One.setAllBits();
1611  for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1612  Constant *Element = CV->getAggregateElement(i);
1613  auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1614  if (!ElementCI) {
1615  Known.resetAll();
1616  return;
1617  }
1618  const APInt &Elt = ElementCI->getValue();
1619  Known.Zero &= ~Elt;
1620  Known.One &= Elt;
1621  }
1622  return;
1623  }
1624 
1625  // Start out not knowing anything.
1626  Known.resetAll();
1627 
1628  // We can't imply anything about undefs.
1629  if (isa<UndefValue>(V))
1630  return;
1631 
1632  // There's no point in looking through other users of ConstantData for
1633  // assumptions. Confirm that we've handled them all.
1634  assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1635 
1636  // Limit search depth.
1637  // All recursive calls that increase depth must come after this.
1638  if (Depth == MaxDepth)
1639  return;
1640 
1641  // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1642  // the bits of its aliasee.
1643  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1644  if (!GA->isInterposable())
1645  computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1646  return;
1647  }
1648 
1649  if (const Operator *I = dyn_cast<Operator>(V))
1650  computeKnownBitsFromOperator(I, Known, Depth, Q);
1651 
1652  // Aligned pointers have trailing zeros - refine Known.Zero set
1653  if (V->getType()->isPointerTy()) {
1654  unsigned Align = V->getPointerAlignment(Q.DL);
1655  if (Align)
1656  Known.Zero.setLowBits(countTrailingZeros(Align));
1657  }
1658 
1659  // computeKnownBitsFromAssume strictly refines Known.
1660  // Therefore, we run them after computeKnownBitsFromOperator.
1661 
1662  // Check whether a nearby assume intrinsic can determine some known bits.
1663  computeKnownBitsFromAssume(V, Known, Depth, Q);
1664 
1665  assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1666 }
1667 
1668 /// Return true if the given value is known to have exactly one
1669 /// bit set when defined. For vectors return true if every element is known to
1670 /// be a power of two when defined. Supports values with integer or pointer
1671 /// types and vectors of integers.
1672 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1673  const Query &Q) {
1674  assert(Depth <= MaxDepth && "Limit Search Depth");
1675 
1676  // Attempt to match against constants.
1677  if (OrZero && match(V, m_Power2OrZero()))
1678  return true;
1679  if (match(V, m_Power2()))
1680  return true;
1681 
1682  // 1 << X is clearly a power of two if the one is not shifted off the end. If
1683  // it is shifted off the end then the result is undefined.
1684  if (match(V, m_Shl(m_One(), m_Value())))
1685  return true;
1686 
1687  // (signmask) >>l X is clearly a power of two if the one is not shifted off
1688  // the bottom. If it is shifted off the bottom then the result is undefined.
1689  if (match(V, m_LShr(m_SignMask(), m_Value())))
1690  return true;
1691 
1692  // The remaining tests are all recursive, so bail out if we hit the limit.
1693  if (Depth++ == MaxDepth)
1694  return false;
1695 
1696  Value *X = nullptr, *Y = nullptr;
1697  // A shift left or a logical shift right of a power of two is a power of two
1698  // or zero.
1699  if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1700  match(V, m_LShr(m_Value(X), m_Value()))))
1701  return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1702 
1703  if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1704  return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1705 
1706  if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1707  return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1708  isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1709 
1710  if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1711  // A power of two and'd with anything is a power of two or zero.
1712  if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1713  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1714  return true;
1715  // X & (-X) is always a power of two or zero.
1716  if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1717  return true;
1718  return false;
1719  }
1720 
1721  // Adding a power-of-two or zero to the same power-of-two or zero yields
1722  // either the original power-of-two, a larger power-of-two or zero.
1723  if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1724  const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1725  if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1726  if (match(X, m_And(m_Specific(Y), m_Value())) ||
1727  match(X, m_And(m_Value(), m_Specific(Y))))
1728  if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1729  return true;
1730  if (match(Y, m_And(m_Specific(X), m_Value())) ||
1731  match(Y, m_And(m_Value(), m_Specific(X))))
1732  if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1733  return true;
1734 
1735  unsigned BitWidth = V->getType()->getScalarSizeInBits();
1736  KnownBits LHSBits(BitWidth);
1737  computeKnownBits(X, LHSBits, Depth, Q);
1738 
1739  KnownBits RHSBits(BitWidth);
1740  computeKnownBits(Y, RHSBits, Depth, Q);
1741  // If i8 V is a power of two or zero:
1742  // ZeroBits: 1 1 1 0 1 1 1 1
1743  // ~ZeroBits: 0 0 0 1 0 0 0 0
1744  if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1745  // If OrZero isn't set, we cannot give back a zero result.
1746  // Make sure either the LHS or RHS has a bit set.
1747  if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1748  return true;
1749  }
1750  }
1751 
1752  // An exact divide or right shift can only shift off zero bits, so the result
1753  // is a power of two only if the first operand is a power of two and not
1754  // copying a sign bit (sdiv int_min, 2).
1755  if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1756  match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1757  return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1758  Depth, Q);
1759  }
1760 
1761  return false;
1762 }
1763 
1764 /// Test whether a GEP's result is known to be non-null.
1765 ///
1766 /// Uses properties inherent in a GEP to try to determine whether it is known
1767 /// to be non-null.
1768 ///
1769 /// Currently this routine does not support vector GEPs.
1770 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1771  const Query &Q) {
1772  const Function *F = nullptr;
1773  if (const Instruction *I = dyn_cast<Instruction>(GEP))
1774  F = I->getFunction();
1775 
1776  if (!GEP->isInBounds() ||
1778  return false;
1779 
1780  // FIXME: Support vector-GEPs.
1781  assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1782 
1783  // If the base pointer is non-null, we cannot walk to a null address with an
1784  // inbounds GEP in address space zero.
1785  if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1786  return true;
1787 
1788  // Walk the GEP operands and see if any operand introduces a non-zero offset.
1789  // If so, then the GEP cannot produce a null pointer, as doing so would
1790  // inherently violate the inbounds contract within address space zero.
1791  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1792  GTI != GTE; ++GTI) {
1793  // Struct types are easy -- they must always be indexed by a constant.
1794  if (StructType *STy = GTI.getStructTypeOrNull()) {
1795  ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1796  unsigned ElementIdx = OpC->getZExtValue();
1797  const StructLayout *SL = Q.DL.getStructLayout(STy);
1798  uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1799  if (ElementOffset > 0)
1800  return true;
1801  continue;
1802  }
1803 
1804  // If we have a zero-sized type, the index doesn't matter. Keep looping.
1805  if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1806  continue;
1807 
1808  // Fast path the constant operand case both for efficiency and so we don't
1809  // increment Depth when just zipping down an all-constant GEP.
1810  if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1811  if (!OpC->isZero())
1812  return true;
1813  continue;
1814  }
1815 
1816  // We post-increment Depth here because while isKnownNonZero increments it
1817  // as well, when we pop back up that increment won't persist. We don't want
1818  // to recurse 10k times just because we have 10k GEP operands. We don't
1819  // bail completely out because we want to handle constant GEPs regardless
1820  // of depth.
1821  if (Depth++ >= MaxDepth)
1822  continue;
1823 
1824  if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1825  return true;
1826  }
1827 
1828  return false;
1829 }
1830 
1832  const Instruction *CtxI,
1833  const DominatorTree *DT) {
1834  assert(V->getType()->isPointerTy() && "V must be pointer type");
1835  assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1836 
1837  if (!CtxI || !DT)
1838  return false;
1839 
1840  unsigned NumUsesExplored = 0;
1841  for (auto *U : V->users()) {
1842  // Avoid massive lists
1843  if (NumUsesExplored >= DomConditionsMaxUses)
1844  break;
1845  NumUsesExplored++;
1846 
1847  // If the value is used as an argument to a call or invoke, then argument
1848  // attributes may provide an answer about null-ness.
1849  if (auto CS = ImmutableCallSite(U))
1850  if (auto *CalledFunc = CS.getCalledFunction())
1851  for (const Argument &Arg : CalledFunc->args())
1852  if (CS.getArgOperand(Arg.getArgNo()) == V &&
1853  Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1854  return true;
1855 
1856  // Consider only compare instructions uniquely controlling a branch
1857  CmpInst::Predicate Pred;
1858  if (!match(const_cast<User *>(U),
1859  m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1860  (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1861  continue;
1862 
1863  for (auto *CmpU : U->users()) {
1864  if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
1865  assert(BI->isConditional() && "uses a comparison!");
1866 
1867  BasicBlock *NonNullSuccessor =
1868  BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1869  BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1870  if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1871  return true;
1872  } else if (Pred == ICmpInst::ICMP_NE &&
1873  match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
1874  DT->dominates(cast<Instruction>(CmpU), CtxI)) {
1875  return true;
1876  }
1877  }
1878  }
1879 
1880  return false;
1881 }
1882 
1883 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1884 /// ensure that the value it's attached to is never Value? 'RangeType' is
1885 /// is the type of the value described by the range.
1886 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1887  const unsigned NumRanges = Ranges->getNumOperands() / 2;
1888  assert(NumRanges >= 1);
1889  for (unsigned i = 0; i < NumRanges; ++i) {
1890  ConstantInt *Lower =
1891  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1892  ConstantInt *Upper =
1893  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1894  ConstantRange Range(Lower->getValue(), Upper->getValue());
1895  if (Range.contains(Value))
1896  return false;
1897  }
1898  return true;
1899 }
1900 
1901 /// Return true if the given value is known to be non-zero when defined. For
1902 /// vectors, return true if every element is known to be non-zero when
1903 /// defined. For pointers, if the context instruction and dominator tree are
1904 /// specified, perform context-sensitive analysis and return true if the
1905 /// pointer couldn't possibly be null at the specified instruction.
1906 /// Supports values with integer or pointer type and vectors of integers.
1907 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1908  if (auto *C = dyn_cast<Constant>(V)) {
1909  if (C->isNullValue())
1910  return false;
1911  if (isa<ConstantInt>(C))
1912  // Must be non-zero due to null test above.
1913  return true;
1914 
1915  // For constant vectors, check that all elements are undefined or known
1916  // non-zero to determine that the whole vector is known non-zero.
1917  if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1918  for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1919  Constant *Elt = C->getAggregateElement(i);
1920  if (!Elt || Elt->isNullValue())
1921  return false;
1922  if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1923  return false;
1924  }
1925  return true;
1926  }
1927 
1928  // A global variable in address space 0 is non null unless extern weak
1929  // or an absolute symbol reference. Other address spaces may have null as a
1930  // valid address for a global, so we can't assume anything.
1931  if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1932  if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
1933  GV->getType()->getAddressSpace() == 0)
1934  return true;
1935  } else
1936  return false;
1937  }
1938 
1939  if (auto *I = dyn_cast<Instruction>(V)) {
1940  if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1941  // If the possible ranges don't contain zero, then the value is
1942  // definitely non-zero.
1943  if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1944  const APInt ZeroValue(Ty->getBitWidth(), 0);
1945  if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1946  return true;
1947  }
1948  }
1949  }
1950 
1951  // Some of the tests below are recursive, so bail out if we hit the limit.
1952  if (Depth++ >= MaxDepth)
1953  return false;
1954 
1955  // Check for pointer simplifications.
1956  if (V->getType()->isPointerTy()) {
1957  // Alloca never returns null, malloc might.
1958  if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
1959  return true;
1960 
1961  // A byval, inalloca, or nonnull argument is never null.
1962  if (const Argument *A = dyn_cast<Argument>(V))
1963  if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
1964  return true;
1965 
1966  // A Load tagged with nonnull metadata is never null.
1967  if (const LoadInst *LI = dyn_cast<LoadInst>(V))
1968  if (LI->getMetadata(LLVMContext::MD_nonnull))
1969  return true;
1970 
1971  if (auto CS = ImmutableCallSite(V)) {
1972  if (CS.isReturnNonNull())
1973  return true;
1974  if (const auto *RP = getArgumentAliasingToReturnedPointer(CS))
1975  return isKnownNonZero(RP, Depth, Q);
1976  }
1977  }
1978 
1979 
1980  // Check for recursive pointer simplifications.
1981  if (V->getType()->isPointerTy()) {
1982  if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
1983  return true;
1984 
1985  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1986  if (isGEPKnownNonNull(GEP, Depth, Q))
1987  return true;
1988  }
1989 
1990  unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1991 
1992  // X | Y != 0 if X != 0 or Y != 0.
1993  Value *X = nullptr, *Y = nullptr;
1994  if (match(V, m_Or(m_Value(X), m_Value(Y))))
1995  return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1996 
1997  // ext X != 0 if X != 0.
1998  if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1999  return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2000 
2001  // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2002  // if the lowest bit is shifted off the end.
2003  if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2004  // shl nuw can't remove any non-zero bits.
2005  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2006  if (BO->hasNoUnsignedWrap())
2007  return isKnownNonZero(X, Depth, Q);
2008 
2009  KnownBits Known(BitWidth);
2010  computeKnownBits(X, Known, Depth, Q);
2011  if (Known.One[0])
2012  return true;
2013  }
2014  // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2015  // defined if the sign bit is shifted off the end.
2016  else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2017  // shr exact can only shift out zero bits.
2018  const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2019  if (BO->isExact())
2020  return isKnownNonZero(X, Depth, Q);
2021 
2022  KnownBits Known = computeKnownBits(X, Depth, Q);
2023  if (Known.isNegative())
2024  return true;
2025 
2026  // If the shifter operand is a constant, and all of the bits shifted
2027  // out are known to be zero, and X is known non-zero then at least one
2028  // non-zero bit must remain.
2029  if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2030  auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2031  // Is there a known one in the portion not shifted out?
2032  if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2033  return true;
2034  // Are all the bits to be shifted out known zero?
2035  if (Known.countMinTrailingZeros() >= ShiftVal)
2036  return isKnownNonZero(X, Depth, Q);
2037  }
2038  }
2039  // div exact can only produce a zero if the dividend is zero.
2040  else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2041  return isKnownNonZero(X, Depth, Q);
2042  }
2043  // X + Y.
2044  else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2045  KnownBits XKnown = computeKnownBits(X, Depth, Q);
2046  KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2047 
2048  // If X and Y are both non-negative (as signed values) then their sum is not
2049  // zero unless both X and Y are zero.
2050  if (XKnown.isNonNegative() && YKnown.isNonNegative())
2051  if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2052  return true;
2053 
2054  // If X and Y are both negative (as signed values) then their sum is not
2055  // zero unless both X and Y equal INT_MIN.
2056  if (XKnown.isNegative() && YKnown.isNegative()) {
2057  APInt Mask = APInt::getSignedMaxValue(BitWidth);
2058  // The sign bit of X is set. If some other bit is set then X is not equal
2059  // to INT_MIN.
2060  if (XKnown.One.intersects(Mask))
2061  return true;
2062  // The sign bit of Y is set. If some other bit is set then Y is not equal
2063  // to INT_MIN.
2064  if (YKnown.One.intersects(Mask))
2065  return true;
2066  }
2067 
2068  // The sum of a non-negative number and a power of two is not zero.
2069  if (XKnown.isNonNegative() &&
2070  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2071  return true;
2072  if (YKnown.isNonNegative() &&
2073  isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2074  return true;
2075  }
2076  // X * Y.
2077  else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2078  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2079  // If X and Y are non-zero then so is X * Y as long as the multiplication
2080  // does not overflow.
2081  if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
2082  isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2083  return true;
2084  }
2085  // (C ? X : Y) != 0 if X != 0 and Y != 0.
2086  else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2087  if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2088  isKnownNonZero(SI->getFalseValue(), Depth, Q))
2089  return true;
2090  }
2091  // PHI
2092  else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2093  // Try and detect a recurrence that monotonically increases from a
2094  // starting value, as these are common as induction variables.
2095  if (PN->getNumIncomingValues() == 2) {
2096  Value *Start = PN->getIncomingValue(0);
2097  Value *Induction = PN->getIncomingValue(1);
2098  if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2099  std::swap(Start, Induction);
2100  if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2101  if (!C->isZero() && !C->isNegative()) {
2102  ConstantInt *X;
2103  if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2104  match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2105  !X->isNegative())
2106  return true;
2107  }
2108  }
2109  }
2110  // Check if all incoming values are non-zero constant.
2111  bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2112  return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2113  });
2114  if (AllNonZeroConstants)
2115  return true;
2116  }
2117 
2118  KnownBits Known(BitWidth);
2119  computeKnownBits(V, Known, Depth, Q);
2120  return Known.One != 0;
2121 }
2122 
2123 /// Return true if V2 == V1 + X, where X is known non-zero.
2124 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2125  const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2126  if (!BO || BO->getOpcode() != Instruction::Add)
2127  return false;
2128  Value *Op = nullptr;
2129  if (V2 == BO->getOperand(0))
2130  Op = BO->getOperand(1);
2131  else if (V2 == BO->getOperand(1))
2132  Op = BO->getOperand(0);
2133  else
2134  return false;
2135  return isKnownNonZero(Op, 0, Q);
2136 }
2137 
2138 /// Return true if it is known that V1 != V2.
2139 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2140  if (V1 == V2)
2141  return false;
2142  if (V1->getType() != V2->getType())
2143  // We can't look through casts yet.
2144  return false;
2145  if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2146  return true;
2147 
2148  if (V1->getType()->isIntOrIntVectorTy()) {
2149  // Are any known bits in V1 contradictory to known bits in V2? If V1
2150  // has a known zero where V2 has a known one, they must not be equal.
2151  KnownBits Known1 = computeKnownBits(V1, 0, Q);
2152  KnownBits Known2 = computeKnownBits(V2, 0, Q);
2153 
2154  if (Known1.Zero.intersects(Known2.One) ||
2155  Known2.Zero.intersects(Known1.One))
2156  return true;
2157  }
2158  return false;
2159 }
2160 
2161 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2162 /// simplify operations downstream. Mask is known to be zero for bits that V
2163 /// cannot have.
2164 ///
2165 /// This function is defined on values with integer type, values with pointer
2166 /// type, and vectors of integers. In the case
2167 /// where V is a vector, the mask, known zero, and known one values are the
2168 /// same width as the vector element, and the bit is set only if it is true
2169 /// for all of the elements in the vector.
2170 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2171  const Query &Q) {
2172  KnownBits Known(Mask.getBitWidth());
2173  computeKnownBits(V, Known, Depth, Q);
2174  return Mask.isSubsetOf(Known.Zero);
2175 }
2176 
2177 /// For vector constants, loop over the elements and find the constant with the
2178 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2179 /// or if any element was not analyzed; otherwise, return the count for the
2180 /// element with the minimum number of sign bits.
2181 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2182  unsigned TyBits) {
2183  const auto *CV = dyn_cast<Constant>(V);
2184  if (!CV || !CV->getType()->isVectorTy())
2185  return 0;
2186 
2187  unsigned MinSignBits = TyBits;
2188  unsigned NumElts = CV->getType()->getVectorNumElements();
2189  for (unsigned i = 0; i != NumElts; ++i) {
2190  // If we find a non-ConstantInt, bail out.
2191  auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2192  if (!Elt)
2193  return 0;
2194 
2195  MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2196  }
2197 
2198  return MinSignBits;
2199 }
2200 
2201 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2202  const Query &Q);
2203 
2204 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2205  const Query &Q) {
2206  unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2207  assert(Result > 0 && "At least one sign bit needs to be present!");
2208  return Result;
2209 }
2210 
2211 /// Return the number of times the sign bit of the register is replicated into
2212 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2213 /// (itself), but other cases can give us information. For example, immediately
2214 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2215 /// other, so we return 3. For vectors, return the number of sign bits for the
2216 /// vector element with the minimum number of known sign bits.
2217 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2218  const Query &Q) {
2219  assert(Depth <= MaxDepth && "Limit Search Depth");
2220 
2221  // We return the minimum number of sign bits that are guaranteed to be present
2222  // in V, so for undef we have to conservatively return 1. We don't have the
2223  // same behavior for poison though -- that's a FIXME today.
2224 
2225  Type *ScalarTy = V->getType()->getScalarType();
2226  unsigned TyBits = ScalarTy->isPointerTy() ?
2227  Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2228  Q.DL.getTypeSizeInBits(ScalarTy);
2229 
2230  unsigned Tmp, Tmp2;
2231  unsigned FirstAnswer = 1;
2232 
2233  // Note that ConstantInt is handled by the general computeKnownBits case
2234  // below.
2235 
2236  if (Depth == MaxDepth)
2237  return 1; // Limit search depth.
2238 
2239  const Operator *U = dyn_cast<Operator>(V);
2240  switch (Operator::getOpcode(V)) {
2241  default: break;
2242  case Instruction::SExt:
2243  Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2244  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2245 
2246  case Instruction::SDiv: {
2247  const APInt *Denominator;
2248  // sdiv X, C -> adds log(C) sign bits.
2249  if (match(U->getOperand(1), m_APInt(Denominator))) {
2250 
2251  // Ignore non-positive denominator.
2252  if (!Denominator->isStrictlyPositive())
2253  break;
2254 
2255  // Calculate the incoming numerator bits.
2256  unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2257 
2258  // Add floor(log(C)) bits to the numerator bits.
2259  return std::min(TyBits, NumBits + Denominator->logBase2());
2260  }
2261  break;
2262  }
2263 
2264  case Instruction::SRem: {
2265  const APInt *Denominator;
2266  // srem X, C -> we know that the result is within [-C+1,C) when C is a
2267  // positive constant. This let us put a lower bound on the number of sign
2268  // bits.
2269  if (match(U->getOperand(1), m_APInt(Denominator))) {
2270 
2271  // Ignore non-positive denominator.
2272  if (!Denominator->isStrictlyPositive())
2273  break;
2274 
2275  // Calculate the incoming numerator bits. SRem by a positive constant
2276  // can't lower the number of sign bits.
2277  unsigned NumrBits =
2278  ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2279 
2280  // Calculate the leading sign bit constraints by examining the
2281  // denominator. Given that the denominator is positive, there are two
2282  // cases:
2283  //
2284  // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2285  // (1 << ceilLogBase2(C)).
2286  //
2287  // 2. the numerator is negative. Then the result range is (-C,0] and
2288  // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2289  //
2290  // Thus a lower bound on the number of sign bits is `TyBits -
2291  // ceilLogBase2(C)`.
2292 
2293  unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2294  return std::max(NumrBits, ResBits);
2295  }
2296  break;
2297  }
2298 
2299  case Instruction::AShr: {
2300  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2301  // ashr X, C -> adds C sign bits. Vectors too.
2302  const APInt *ShAmt;
2303  if (match(U->getOperand(1), m_APInt(ShAmt))) {
2304  if (ShAmt->uge(TyBits))
2305  break; // Bad shift.
2306  unsigned ShAmtLimited = ShAmt->getZExtValue();
2307  Tmp += ShAmtLimited;
2308  if (Tmp > TyBits) Tmp = TyBits;
2309  }
2310  return Tmp;
2311  }
2312  case Instruction::Shl: {
2313  const APInt *ShAmt;
2314  if (match(U->getOperand(1), m_APInt(ShAmt))) {
2315  // shl destroys sign bits.
2316  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2317  if (ShAmt->uge(TyBits) || // Bad shift.
2318  ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2319  Tmp2 = ShAmt->getZExtValue();
2320  return Tmp - Tmp2;
2321  }
2322  break;
2323  }
2324  case Instruction::And:
2325  case Instruction::Or:
2326  case Instruction::Xor: // NOT is handled here.
2327  // Logical binary ops preserve the number of sign bits at the worst.
2328  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2329  if (Tmp != 1) {
2330  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2331  FirstAnswer = std::min(Tmp, Tmp2);
2332  // We computed what we know about the sign bits as our first
2333  // answer. Now proceed to the generic code that uses
2334  // computeKnownBits, and pick whichever answer is better.
2335  }
2336  break;
2337 
2338  case Instruction::Select:
2339  Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2340  if (Tmp == 1) return 1; // Early out.
2341  Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2342  return std::min(Tmp, Tmp2);
2343 
2344  case Instruction::Add:
2345  // Add can have at most one carry bit. Thus we know that the output
2346  // is, at worst, one more bit than the inputs.
2347  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2348  if (Tmp == 1) return 1; // Early out.
2349 
2350  // Special case decrementing a value (ADD X, -1):
2351  if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2352  if (CRHS->isAllOnesValue()) {
2353  KnownBits Known(TyBits);
2354  computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2355 
2356  // If the input is known to be 0 or 1, the output is 0/-1, which is all
2357  // sign bits set.
2358  if ((Known.Zero | 1).isAllOnesValue())
2359  return TyBits;
2360 
2361  // If we are subtracting one from a positive number, there is no carry
2362  // out of the result.
2363  if (Known.isNonNegative())
2364  return Tmp;
2365  }
2366 
2367  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2368  if (Tmp2 == 1) return 1;
2369  return std::min(Tmp, Tmp2)-1;
2370 
2371  case Instruction::Sub:
2372  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2373  if (Tmp2 == 1) return 1;
2374 
2375  // Handle NEG.
2376  if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2377  if (CLHS->isNullValue()) {
2378  KnownBits Known(TyBits);
2379  computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2380  // If the input is known to be 0 or 1, the output is 0/-1, which is all
2381  // sign bits set.
2382  if ((Known.Zero | 1).isAllOnesValue())
2383  return TyBits;
2384 
2385  // If the input is known to be positive (the sign bit is known clear),
2386  // the output of the NEG has the same number of sign bits as the input.
2387  if (Known.isNonNegative())
2388  return Tmp2;
2389 
2390  // Otherwise, we treat this like a SUB.
2391  }
2392 
2393  // Sub can have at most one carry bit. Thus we know that the output
2394  // is, at worst, one more bit than the inputs.
2395  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2396  if (Tmp == 1) return 1; // Early out.
2397  return std::min(Tmp, Tmp2)-1;
2398 
2399  case Instruction::Mul: {
2400  // The output of the Mul can be at most twice the valid bits in the inputs.
2401  unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2402  if (SignBitsOp0 == 1) return 1; // Early out.
2403  unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2404  if (SignBitsOp1 == 1) return 1;
2405  unsigned OutValidBits =
2406  (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2407  return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2408  }
2409 
2410  case Instruction::PHI: {
2411  const PHINode *PN = cast<PHINode>(U);
2412  unsigned NumIncomingValues = PN->getNumIncomingValues();
2413  // Don't analyze large in-degree PHIs.
2414  if (NumIncomingValues > 4) break;
2415  // Unreachable blocks may have zero-operand PHI nodes.
2416  if (NumIncomingValues == 0) break;
2417 
2418  // Take the minimum of all incoming values. This can't infinitely loop
2419  // because of our depth threshold.
2420  Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2421  for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2422  if (Tmp == 1) return Tmp;
2423  Tmp = std::min(
2424  Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2425  }
2426  return Tmp;
2427  }
2428 
2429  case Instruction::Trunc:
2430  // FIXME: it's tricky to do anything useful for this, but it is an important
2431  // case for targets like X86.
2432  break;
2433 
2434  case Instruction::ExtractElement:
2435  // Look through extract element. At the moment we keep this simple and skip
2436  // tracking the specific element. But at least we might find information
2437  // valid for all elements of the vector (for example if vector is sign
2438  // extended, shifted, etc).
2439  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2440  }
2441 
2442  // Finally, if we can prove that the top bits of the result are 0's or 1's,
2443  // use this information.
2444 
2445  // If we can examine all elements of a vector constant successfully, we're
2446  // done (we can't do any better than that). If not, keep trying.
2447  if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2448  return VecSignBits;
2449 
2450  KnownBits Known(TyBits);
2451  computeKnownBits(V, Known, Depth, Q);
2452 
2453  // If we know that the sign bit is either zero or one, determine the number of
2454  // identical bits in the top of the input value.
2455  return std::max(FirstAnswer, Known.countMinSignBits());
2456 }
2457 
2458 /// This function computes the integer multiple of Base that equals V.
2459 /// If successful, it returns true and returns the multiple in
2460 /// Multiple. If unsuccessful, it returns false. It looks
2461 /// through SExt instructions only if LookThroughSExt is true.
2462 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2463  bool LookThroughSExt, unsigned Depth) {
2464  const unsigned MaxDepth = 6;
2465 
2466  assert(V && "No Value?");
2467  assert(Depth <= MaxDepth && "Limit Search Depth");
2468  assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2469 
2470  Type *T = V->getType();
2471 
2472  ConstantInt *CI = dyn_cast<ConstantInt>(V);
2473 
2474  if (Base == 0)
2475  return false;
2476 
2477  if (Base == 1) {
2478  Multiple = V;
2479  return true;
2480  }
2481 
2483  Constant *BaseVal = ConstantInt::get(T, Base);
2484  if (CO && CO == BaseVal) {
2485  // Multiple is 1.
2486  Multiple = ConstantInt::get(T, 1);
2487  return true;
2488  }
2489 
2490  if (CI && CI->getZExtValue() % Base == 0) {
2491  Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2492  return true;
2493  }
2494 
2495  if (Depth == MaxDepth) return false; // Limit search depth.
2496 
2497  Operator *I = dyn_cast<Operator>(V);
2498  if (!I) return false;
2499 
2500  switch (I->getOpcode()) {
2501  default: break;
2502  case Instruction::SExt:
2503  if (!LookThroughSExt) return false;
2504  // otherwise fall through to ZExt
2506  case Instruction::ZExt:
2507  return ComputeMultiple(I->getOperand(0), Base, Multiple,
2508  LookThroughSExt, Depth+1);
2509  case Instruction::Shl:
2510  case Instruction::Mul: {
2511  Value *Op0 = I->getOperand(0);
2512  Value *Op1 = I->getOperand(1);
2513 
2514  if (I->getOpcode() == Instruction::Shl) {
2515  ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2516  if (!Op1CI) return false;
2517  // Turn Op0 << Op1 into Op0 * 2^Op1
2518  APInt Op1Int = Op1CI->getValue();
2519  uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2520  APInt API(Op1Int.getBitWidth(), 0);
2521  API.setBit(BitToSet);
2522  Op1 = ConstantInt::get(V->getContext(), API);
2523  }
2524 
2525  Value *Mul0 = nullptr;
2526  if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2527  if (Constant *Op1C = dyn_cast<Constant>(Op1))
2528  if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2529  if (Op1C->getType()->getPrimitiveSizeInBits() <
2530  MulC->getType()->getPrimitiveSizeInBits())
2531  Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2532  if (Op1C->getType()->getPrimitiveSizeInBits() >
2533  MulC->getType()->getPrimitiveSizeInBits())
2534  MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2535 
2536  // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2537  Multiple = ConstantExpr::getMul(MulC, Op1C);
2538  return true;
2539  }
2540 
2541  if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2542  if (Mul0CI->getValue() == 1) {
2543  // V == Base * Op1, so return Op1
2544  Multiple = Op1;
2545  return true;
2546  }
2547  }
2548 
2549  Value *Mul1 = nullptr;
2550  if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2551  if (Constant *Op0C = dyn_cast<Constant>(Op0))
2552  if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2553  if (Op0C->getType()->getPrimitiveSizeInBits() <
2554  MulC->getType()->getPrimitiveSizeInBits())
2555  Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2556  if (Op0C->getType()->getPrimitiveSizeInBits() >
2557  MulC->getType()->getPrimitiveSizeInBits())
2558  MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2559 
2560  // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2561  Multiple = ConstantExpr::getMul(MulC, Op0C);
2562  return true;
2563  }
2564 
2565  if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2566  if (Mul1CI->getValue() == 1) {
2567  // V == Base * Op0, so return Op0
2568  Multiple = Op0;
2569  return true;
2570  }
2571  }
2572  }
2573  }
2574 
2575  // We could not determine if V is a multiple of Base.
2576  return false;
2577 }
2578 
2580  const TargetLibraryInfo *TLI) {
2581  const Function *F = ICS.getCalledFunction();
2582  if (!F)
2583  return Intrinsic::not_intrinsic;
2584 
2585  if (F->isIntrinsic())
2586  return F->getIntrinsicID();
2587 
2588  if (!TLI)
2589  return Intrinsic::not_intrinsic;
2590 
2591  LibFunc Func;
2592  // We're going to make assumptions on the semantics of the functions, check
2593  // that the target knows that it's available in this environment and it does
2594  // not have local linkage.
2595  if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2596  return Intrinsic::not_intrinsic;
2597 
2598  if (!ICS.onlyReadsMemory())
2599  return Intrinsic::not_intrinsic;
2600 
2601  // Otherwise check if we have a call to a function that can be turned into a
2602  // vector intrinsic.
2603  switch (Func) {
2604  default:
2605  break;
2606  case LibFunc_sin:
2607  case LibFunc_sinf:
2608  case LibFunc_sinl:
2609  return Intrinsic::sin;
2610  case LibFunc_cos:
2611  case LibFunc_cosf:
2612  case LibFunc_cosl:
2613  return Intrinsic::cos;
2614  case LibFunc_exp:
2615  case LibFunc_expf:
2616  case LibFunc_expl:
2617  return Intrinsic::exp;
2618  case LibFunc_exp2:
2619  case LibFunc_exp2f:
2620  case LibFunc_exp2l:
2621  return Intrinsic::exp2;
2622  case LibFunc_log:
2623  case LibFunc_logf:
2624  case LibFunc_logl:
2625  return Intrinsic::log;
2626  case LibFunc_log10:
2627  case LibFunc_log10f:
2628  case LibFunc_log10l:
2629  return Intrinsic::log10;
2630  case LibFunc_log2:
2631  case LibFunc_log2f:
2632  case LibFunc_log2l:
2633  return Intrinsic::log2;
2634  case LibFunc_fabs:
2635  case LibFunc_fabsf:
2636  case LibFunc_fabsl:
2637  return Intrinsic::fabs;
2638  case LibFunc_fmin:
2639  case LibFunc_fminf:
2640  case LibFunc_fminl:
2641  return Intrinsic::minnum;
2642  case LibFunc_fmax:
2643  case LibFunc_fmaxf:
2644  case LibFunc_fmaxl:
2645  return Intrinsic::maxnum;
2646  case LibFunc_copysign:
2647  case LibFunc_copysignf:
2648  case LibFunc_copysignl:
2649  return Intrinsic::copysign;
2650  case LibFunc_floor:
2651  case LibFunc_floorf:
2652  case LibFunc_floorl:
2653  return Intrinsic::floor;
2654  case LibFunc_ceil:
2655  case LibFunc_ceilf:
2656  case LibFunc_ceill:
2657  return Intrinsic::ceil;
2658  case LibFunc_trunc:
2659  case LibFunc_truncf:
2660  case LibFunc_truncl:
2661  return Intrinsic::trunc;
2662  case LibFunc_rint:
2663  case LibFunc_rintf:
2664  case LibFunc_rintl:
2665  return Intrinsic::rint;
2666  case LibFunc_nearbyint:
2667  case LibFunc_nearbyintf:
2668  case LibFunc_nearbyintl:
2669  return Intrinsic::nearbyint;
2670  case LibFunc_round:
2671  case LibFunc_roundf:
2672  case LibFunc_roundl:
2673  return Intrinsic::round;
2674  case LibFunc_pow:
2675  case LibFunc_powf:
2676  case LibFunc_powl:
2677  return Intrinsic::pow;
2678  case LibFunc_sqrt:
2679  case LibFunc_sqrtf:
2680  case LibFunc_sqrtl:
2681  return Intrinsic::sqrt;
2682  }
2683 
2684  return Intrinsic::not_intrinsic;
2685 }
2686 
2687 /// Return true if we can prove that the specified FP value is never equal to
2688 /// -0.0.
2689 ///
2690 /// NOTE: this function will need to be revisited when we support non-default
2691 /// rounding modes!
2693  unsigned Depth) {
2694  if (auto *CFP = dyn_cast<ConstantFP>(V))
2695  return !CFP->getValueAPF().isNegZero();
2696 
2697  // Limit search depth.
2698  if (Depth == MaxDepth)
2699  return false;
2700 
2701  auto *Op = dyn_cast<Operator>(V);
2702  if (!Op)
2703  return false;
2704 
2705  // Check if the nsz fast-math flag is set.
2706  if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2707  if (FPO->hasNoSignedZeros())
2708  return true;
2709 
2710  // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2711  if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2712  return true;
2713 
2714  // sitofp and uitofp turn into +0.0 for zero.
2715  if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2716  return true;
2717 
2718  if (auto *Call = dyn_cast<CallInst>(Op)) {
2719  Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2720  switch (IID) {
2721  default:
2722  break;
2723  // sqrt(-0.0) = -0.0, no other negative results are possible.
2724  case Intrinsic::sqrt:
2725  return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2726  // fabs(x) != -0.0
2727  case Intrinsic::fabs:
2728  return true;
2729  }
2730  }
2731 
2732  return false;
2733 }
2734 
2735 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2736 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2737 /// bit despite comparing equal.
2739  const TargetLibraryInfo *TLI,
2740  bool SignBitOnly,
2741  unsigned Depth) {
2742  // TODO: This function does not do the right thing when SignBitOnly is true
2743  // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2744  // which flips the sign bits of NaNs. See
2745  // https://llvm.org/bugs/show_bug.cgi?id=31702.
2746 
2747  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2748  return !CFP->getValueAPF().isNegative() ||
2749  (!SignBitOnly && CFP->getValueAPF().isZero());
2750  }
2751 
2752  // Handle vector of constants.
2753  if (auto *CV = dyn_cast<Constant>(V)) {
2754  if (CV->getType()->isVectorTy()) {
2755  unsigned NumElts = CV->getType()->getVectorNumElements();
2756  for (unsigned i = 0; i != NumElts; ++i) {
2757  auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2758  if (!CFP)
2759  return false;
2760  if (CFP->getValueAPF().isNegative() &&
2761  (SignBitOnly || !CFP->getValueAPF().isZero()))
2762  return false;
2763  }
2764 
2765  // All non-negative ConstantFPs.
2766  return true;
2767  }
2768  }
2769 
2770  if (Depth == MaxDepth)
2771  return false; // Limit search depth.
2772 
2773  const Operator *I = dyn_cast<Operator>(V);
2774  if (!I)
2775  return false;
2776 
2777  switch (I->getOpcode()) {
2778  default:
2779  break;
2780  // Unsigned integers are always nonnegative.
2781  case Instruction::UIToFP:
2782  return true;
2783  case Instruction::FMul:
2784  // x*x is always non-negative or a NaN.
2785  if (I->getOperand(0) == I->getOperand(1) &&
2786  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2787  return true;
2788 
2790  case Instruction::FAdd:
2791  case Instruction::FDiv:
2792  case Instruction::FRem:
2793  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2794  Depth + 1) &&
2795  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2796  Depth + 1);
2797  case Instruction::Select:
2798  return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2799  Depth + 1) &&
2800  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2801  Depth + 1);
2802  case Instruction::FPExt:
2803  case Instruction::FPTrunc:
2804  // Widening/narrowing never change sign.
2805  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2806  Depth + 1);
2807  case Instruction::ExtractElement:
2808  // Look through extract element. At the moment we keep this simple and skip
2809  // tracking the specific element. But at least we might find information
2810  // valid for all elements of the vector.
2811  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2812  Depth + 1);
2813  case Instruction::Call:
2814  const auto *CI = cast<CallInst>(I);
2815  Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2816  switch (IID) {
2817  default:
2818  break;
2819  case Intrinsic::maxnum:
2820  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2821  Depth + 1) ||
2822  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2823  Depth + 1);
2824  case Intrinsic::minnum:
2825  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2826  Depth + 1) &&
2827  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2828  Depth + 1);
2829  case Intrinsic::exp:
2830  case Intrinsic::exp2:
2831  case Intrinsic::fabs:
2832  return true;
2833 
2834  case Intrinsic::sqrt:
2835  // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
2836  if (!SignBitOnly)
2837  return true;
2838  return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2839  CannotBeNegativeZero(CI->getOperand(0), TLI));
2840 
2841  case Intrinsic::powi:
2842  if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2843  // powi(x,n) is non-negative if n is even.
2844  if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2845  return true;
2846  }
2847  // TODO: This is not correct. Given that exp is an integer, here are the
2848  // ways that pow can return a negative value:
2849  //
2850  // pow(x, exp) --> negative if exp is odd and x is negative.
2851  // pow(-0, exp) --> -inf if exp is negative odd.
2852  // pow(-0, exp) --> -0 if exp is positive odd.
2853  // pow(-inf, exp) --> -0 if exp is negative odd.
2854  // pow(-inf, exp) --> -inf if exp is positive odd.
2855  //
2856  // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2857  // but we must return false if x == -0. Unfortunately we do not currently
2858  // have a way of expressing this constraint. See details in
2859  // https://llvm.org/bugs/show_bug.cgi?id=31702.
2860  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2861  Depth + 1);
2862 
2863  case Intrinsic::fma:
2864  case Intrinsic::fmuladd:
2865  // x*x+y is non-negative if y is non-negative.
2866  return I->getOperand(0) == I->getOperand(1) &&
2867  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2868  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2869  Depth + 1);
2870  }
2871  break;
2872  }
2873  return false;
2874 }
2875 
2877  const TargetLibraryInfo *TLI) {
2878  return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2879 }
2880 
2882  return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2883 }
2884 
2886  assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
2887 
2888  // If we're told that NaNs won't happen, assume they won't.
2889  if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
2890  if (FPMathOp->hasNoNaNs())
2891  return true;
2892 
2893  // TODO: Handle instructions and potentially recurse like other 'isKnown'
2894  // functions. For example, the result of sitofp is never NaN.
2895 
2896  // Handle scalar constants.
2897  if (auto *CFP = dyn_cast<ConstantFP>(V))
2898  return !CFP->isNaN();
2899 
2900  // Bail out for constant expressions, but try to handle vector constants.
2901  if (!V->getType()->isVectorTy() || !isa<Constant>(V))
2902  return false;
2903 
2904  // For vectors, verify that each element is not NaN.
2905  unsigned NumElts = V->getType()->getVectorNumElements();
2906  for (unsigned i = 0; i != NumElts; ++i) {
2907  Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
2908  if (!Elt)
2909  return false;
2910  if (isa<UndefValue>(Elt))
2911  continue;
2912  auto *CElt = dyn_cast<ConstantFP>(Elt);
2913  if (!CElt || CElt->isNaN())
2914  return false;
2915  }
2916  // All elements were confirmed not-NaN or undefined.
2917  return true;
2918 }
2919 
2920 /// If the specified value can be set by repeating the same byte in memory,
2921 /// return the i8 value that it is represented with. This is
2922 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2923 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
2924 /// byte store (e.g. i16 0x1234), return null.
2926  // All byte-wide stores are splatable, even of arbitrary variables.
2927  if (V->getType()->isIntegerTy(8)) return V;
2928 
2929  // Handle 'null' ConstantArrayZero etc.
2930  if (Constant *C = dyn_cast<Constant>(V))
2931  if (C->isNullValue())
2933 
2934  // Constant float and double values can be handled as integer values if the
2935  // corresponding integer value is "byteable". An important case is 0.0.
2936  if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2937  if (CFP->getType()->isFloatTy())
2939  if (CFP->getType()->isDoubleTy())
2941  // Don't handle long double formats, which have strange constraints.
2942  }
2943 
2944  // We can handle constant integers that are multiple of 8 bits.
2945  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2946  if (CI->getBitWidth() % 8 == 0) {
2947  assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2948 
2949  if (!CI->getValue().isSplat(8))
2950  return nullptr;
2951  return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2952  }
2953  }
2954 
2955  // A ConstantDataArray/Vector is splatable if all its members are equal and
2956  // also splatable.
2957  if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2958  Value *Elt = CA->getElementAsConstant(0);
2959  Value *Val = isBytewiseValue(Elt);
2960  if (!Val)
2961  return nullptr;
2962 
2963  for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2964  if (CA->getElementAsConstant(I) != Elt)
2965  return nullptr;
2966 
2967  return Val;
2968  }
2969 
2970  // Conceptually, we could handle things like:
2971  // %a = zext i8 %X to i16
2972  // %b = shl i16 %a, 8
2973  // %c = or i16 %a, %b
2974  // but until there is an example that actually needs this, it doesn't seem
2975  // worth worrying about.
2976  return nullptr;
2977 }
2978 
2979 // This is the recursive version of BuildSubAggregate. It takes a few different
2980 // arguments. Idxs is the index within the nested struct From that we are
2981 // looking at now (which is of type IndexedType). IdxSkip is the number of
2982 // indices from Idxs that should be left out when inserting into the resulting
2983 // struct. To is the result struct built so far, new insertvalue instructions
2984 // build on that.
2985 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2987  unsigned IdxSkip,
2988  Instruction *InsertBefore) {
2989  StructType *STy = dyn_cast<StructType>(IndexedType);
2990  if (STy) {
2991  // Save the original To argument so we can modify it
2992  Value *OrigTo = To;
2993  // General case, the type indexed by Idxs is a struct
2994  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2995  // Process each struct element recursively
2996  Idxs.push_back(i);
2997  Value *PrevTo = To;
2998  To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2999  InsertBefore);
3000  Idxs.pop_back();
3001  if (!To) {
3002  // Couldn't find any inserted value for this index? Cleanup
3003  while (PrevTo != OrigTo) {
3004  InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3005  PrevTo = Del->getAggregateOperand();
3006  Del->eraseFromParent();
3007  }
3008  // Stop processing elements
3009  break;
3010  }
3011  }
3012  // If we successfully found a value for each of our subaggregates
3013  if (To)
3014  return To;
3015  }
3016  // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3017  // the struct's elements had a value that was inserted directly. In the latter
3018  // case, perhaps we can't determine each of the subelements individually, but
3019  // we might be able to find the complete struct somewhere.
3020 
3021  // Find the value that is at that particular spot
3022  Value *V = FindInsertedValue(From, Idxs);
3023 
3024  if (!V)
3025  return nullptr;
3026 
3027  // Insert the value in the new (sub) aggregate
3028  return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3029  "tmp", InsertBefore);
3030 }
3031 
3032 // This helper takes a nested struct and extracts a part of it (which is again a
3033 // struct) into a new value. For example, given the struct:
3034 // { a, { b, { c, d }, e } }
3035 // and the indices "1, 1" this returns
3036 // { c, d }.
3037 //
3038 // It does this by inserting an insertvalue for each element in the resulting
3039 // struct, as opposed to just inserting a single struct. This will only work if
3040 // each of the elements of the substruct are known (ie, inserted into From by an
3041 // insertvalue instruction somewhere).
3042 //
3043 // All inserted insertvalue instructions are inserted before InsertBefore
3045  Instruction *InsertBefore) {
3046  assert(InsertBefore && "Must have someplace to insert!");
3047  Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3048  idx_range);
3049  Value *To = UndefValue::get(IndexedType);
3050  SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3051  unsigned IdxSkip = Idxs.size();
3052 
3053  return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3054 }
3055 
3056 /// Given an aggregate and a sequence of indices, see if the scalar value
3057 /// indexed is already around as a register, for example if it was inserted
3058 /// directly into the aggregate.
3059 ///
3060 /// If InsertBefore is not null, this function will duplicate (modified)
3061 /// insertvalues when a part of a nested struct is extracted.
3063  Instruction *InsertBefore) {
3064  // Nothing to index? Just return V then (this is useful at the end of our
3065  // recursion).
3066  if (idx_range.empty())
3067  return V;
3068  // We have indices, so V should have an indexable type.
3069  assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3070  "Not looking at a struct or array?");
3071  assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3072  "Invalid indices for type?");
3073 
3074  if (Constant *C = dyn_cast<Constant>(V)) {
3075  C = C->getAggregateElement(idx_range[0]);
3076  if (!C) return nullptr;
3077  return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3078  }
3079 
3080  if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3081  // Loop the indices for the insertvalue instruction in parallel with the
3082  // requested indices
3083  const unsigned *req_idx = idx_range.begin();
3084  for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3085  i != e; ++i, ++req_idx) {
3086  if (req_idx == idx_range.end()) {
3087  // We can't handle this without inserting insertvalues
3088  if (!InsertBefore)
3089  return nullptr;
3090 
3091  // The requested index identifies a part of a nested aggregate. Handle
3092  // this specially. For example,
3093  // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3094  // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3095  // %C = extractvalue {i32, { i32, i32 } } %B, 1
3096  // This can be changed into
3097  // %A = insertvalue {i32, i32 } undef, i32 10, 0
3098  // %C = insertvalue {i32, i32 } %A, i32 11, 1
3099  // which allows the unused 0,0 element from the nested struct to be
3100  // removed.
3101  return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3102  InsertBefore);
3103  }
3104 
3105  // This insert value inserts something else than what we are looking for.
3106  // See if the (aggregate) value inserted into has the value we are
3107  // looking for, then.
3108  if (*req_idx != *i)
3109  return FindInsertedValue(I->getAggregateOperand(), idx_range,
3110  InsertBefore);
3111  }
3112  // If we end up here, the indices of the insertvalue match with those
3113  // requested (though possibly only partially). Now we recursively look at
3114  // the inserted value, passing any remaining indices.
3115  return FindInsertedValue(I->getInsertedValueOperand(),
3116  makeArrayRef(req_idx, idx_range.end()),
3117  InsertBefore);
3118  }
3119 
3120  if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3121  // If we're extracting a value from an aggregate that was extracted from
3122  // something else, we can extract from that something else directly instead.
3123  // However, we will need to chain I's indices with the requested indices.
3124 
3125  // Calculate the number of indices required
3126  unsigned size = I->getNumIndices() + idx_range.size();
3127  // Allocate some space to put the new indices in
3129  Idxs.reserve(size);
3130  // Add indices from the extract value instruction
3131  Idxs.append(I->idx_begin(), I->idx_end());
3132 
3133  // Add requested indices
3134  Idxs.append(idx_range.begin(), idx_range.end());
3135 
3136  assert(Idxs.size() == size
3137  && "Number of indices added not correct?");
3138 
3139  return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3140  }
3141  // Otherwise, we don't know (such as, extracting from a function return value
3142  // or load instruction)
3143  return nullptr;
3144 }
3145 
3146 /// Analyze the specified pointer to see if it can be expressed as a base
3147 /// pointer plus a constant offset. Return the base and offset to the caller.
3149  const DataLayout &DL) {
3150  unsigned BitWidth = DL.getIndexTypeSizeInBits(Ptr->getType());
3151  APInt ByteOffset(BitWidth, 0);
3152 
3153  // We walk up the defs but use a visited set to handle unreachable code. In
3154  // that case, we stop after accumulating the cycle once (not that it
3155  // matters).
3156  SmallPtrSet<Value *, 16> Visited;
3157  while (Visited.insert(Ptr).second) {
3158  if (Ptr->getType()->isVectorTy())
3159  break;
3160 
3161  if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
3162  // If one of the values we have visited is an addrspacecast, then
3163  // the pointer type of this GEP may be different from the type
3164  // of the Ptr parameter which was passed to this function. This
3165  // means when we construct GEPOffset, we need to use the size
3166  // of GEP's pointer type rather than the size of the original
3167  // pointer type.
3168  APInt GEPOffset(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
3169  if (!GEP->accumulateConstantOffset(DL, GEPOffset))
3170  break;
3171 
3172  ByteOffset += GEPOffset.getSExtValue();
3173 
3174  Ptr = GEP->getPointerOperand();
3175  } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
3176  Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
3177  Ptr = cast<Operator>(Ptr)->getOperand(0);
3178  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3179  if (GA->isInterposable())
3180  break;
3181  Ptr = GA->getAliasee();
3182  } else {
3183  break;
3184  }
3185  }
3186  Offset = ByteOffset.getSExtValue();
3187  return Ptr;
3188 }
3189 
3191  unsigned CharSize) {
3192  // Make sure the GEP has exactly three arguments.
3193  if (GEP->getNumOperands() != 3)
3194  return false;
3195 
3196  // Make sure the index-ee is a pointer to array of \p CharSize integers.
3197  // CharSize.
3199  if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3200  return false;
3201 
3202  // Check to make sure that the first operand of the GEP is an integer and
3203  // has value 0 so that we are sure we're indexing into the initializer.
3204  const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3205  if (!FirstIdx || !FirstIdx->isZero())
3206  return false;
3207 
3208  return true;
3209 }
3210 
3212  ConstantDataArraySlice &Slice,
3213  unsigned ElementSize, uint64_t Offset) {
3214  assert(V);
3215 
3216  // Look through bitcast instructions and geps.
3217  V = V->stripPointerCasts();
3218 
3219  // If the value is a GEP instruction or constant expression, treat it as an
3220  // offset.
3221  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3222  // The GEP operator should be based on a pointer to string constant, and is
3223  // indexing into the string constant.
3224  if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3225  return false;
3226 
3227  // If the second index isn't a ConstantInt, then this is a variable index
3228  // into the array. If this occurs, we can't say anything meaningful about
3229  // the string.
3230  uint64_t StartIdx = 0;
3231  if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3232  StartIdx = CI->getZExtValue();
3233  else
3234  return false;
3235  return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3236  StartIdx + Offset);
3237  }
3238 
3239  // The GEP instruction, constant or instruction, must reference a global
3240  // variable that is a constant and is initialized. The referenced constant
3241  // initializer is the array that we'll use for optimization.
3242  const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3243  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3244  return false;
3245 
3246  const ConstantDataArray *Array;
3247  ArrayType *ArrayTy;
3248  if (GV->getInitializer()->isNullValue()) {
3249  Type *GVTy = GV->getValueType();
3250  if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3251  // A zeroinitializer for the array; there is no ConstantDataArray.
3252  Array = nullptr;
3253  } else {
3254  const DataLayout &DL = GV->getParent()->getDataLayout();
3255  uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3256  uint64_t Length = SizeInBytes / (ElementSize / 8);
3257  if (Length <= Offset)
3258  return false;
3259 
3260  Slice.Array = nullptr;
3261  Slice.Offset = 0;
3262  Slice.Length = Length - Offset;
3263  return true;
3264  }
3265  } else {
3266  // This must be a ConstantDataArray.
3267  Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3268  if (!Array)
3269  return false;
3270  ArrayTy = Array->getType();
3271  }
3272  if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3273  return false;
3274 
3275  uint64_t NumElts = ArrayTy->getArrayNumElements();
3276  if (Offset > NumElts)
3277  return false;
3278 
3279  Slice.Array = Array;
3280  Slice.Offset = Offset;
3281  Slice.Length = NumElts - Offset;
3282  return true;
3283 }
3284 
3285 /// This function computes the length of a null-terminated C string pointed to
3286 /// by V. If successful, it returns true and returns the string in Str.
3287 /// If unsuccessful, it returns false.
3289  uint64_t Offset, bool TrimAtNul) {
3290  ConstantDataArraySlice Slice;
3291  if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3292  return false;
3293 
3294  if (Slice.Array == nullptr) {
3295  if (TrimAtNul) {
3296  Str = StringRef();
3297  return true;
3298  }
3299  if (Slice.Length == 1) {
3300  Str = StringRef("", 1);
3301  return true;
3302  }
3303  // We cannot instantiate a StringRef as we do not have an appropriate string
3304  // of 0s at hand.
3305  return false;
3306  }
3307 
3308  // Start out with the entire array in the StringRef.
3309  Str = Slice.Array->getAsString();
3310  // Skip over 'offset' bytes.
3311  Str = Str.substr(Slice.Offset);
3312 
3313  if (TrimAtNul) {
3314  // Trim off the \0 and anything after it. If the array is not nul
3315  // terminated, we just return the whole end of string. The client may know
3316  // some other way that the string is length-bound.
3317  Str = Str.substr(0, Str.find('\0'));
3318  }
3319  return true;
3320 }
3321 
3322 // These next two are very similar to the above, but also look through PHI
3323 // nodes.
3324 // TODO: See if we can integrate these two together.
3325 
3326 /// If we can compute the length of the string pointed to by
3327 /// the specified pointer, return 'len+1'. If we can't, return 0.
3328 static uint64_t GetStringLengthH(const Value *V,
3330  unsigned CharSize) {
3331  // Look through noop bitcast instructions.
3332  V = V->stripPointerCasts();
3333 
3334  // If this is a PHI node, there are two cases: either we have already seen it
3335  // or we haven't.
3336  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3337  if (!PHIs.insert(PN).second)
3338  return ~0ULL; // already in the set.
3339 
3340  // If it was new, see if all the input strings are the same length.
3341  uint64_t LenSoFar = ~0ULL;
3342  for (Value *IncValue : PN->incoming_values()) {
3343  uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3344  if (Len == 0) return 0; // Unknown length -> unknown.
3345 
3346  if (Len == ~0ULL) continue;
3347 
3348  if (Len != LenSoFar && LenSoFar != ~0ULL)
3349  return 0; // Disagree -> unknown.
3350  LenSoFar = Len;
3351  }
3352 
3353  // Success, all agree.
3354  return LenSoFar;
3355  }
3356 
3357  // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3358  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3359  uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3360  if (Len1 == 0) return 0;
3361  uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3362  if (Len2 == 0) return 0;
3363  if (Len1 == ~0ULL) return Len2;
3364  if (Len2 == ~0ULL) return Len1;
3365  if (Len1 != Len2) return 0;
3366  return Len1;
3367  }
3368 
3369  // Otherwise, see if we can read the string.
3370  ConstantDataArraySlice Slice;
3371  if (!getConstantDataArrayInfo(V, Slice, CharSize))
3372  return 0;
3373 
3374  if (Slice.Array == nullptr)
3375  return 1;
3376 
3377  // Search for nul characters
3378  unsigned NullIndex = 0;
3379  for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3380  if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3381  break;
3382  }
3383 
3384  return NullIndex + 1;
3385 }
3386 
3387 /// If we can compute the length of the string pointed to by
3388 /// the specified pointer, return 'len+1'. If we can't, return 0.
3389 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3390  if (!V->getType()->isPointerTy())
3391  return 0;
3392 
3394  uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3395  // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3396  // an empty string as a length.
3397  return Len == ~0ULL ? 1 : Len;
3398 }
3399 
3401  assert(CS &&
3402  "getArgumentAliasingToReturnedPointer only works on nonnull CallSite");
3403  if (const Value *RV = CS.getReturnedArgOperand())
3404  return RV;
3405  // This can be used only as a aliasing property.
3407  return CS.getArgOperand(0);
3408  return nullptr;
3409 }
3410 
3412  ImmutableCallSite CS) {
3413  return CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
3414  CS.getIntrinsicID() == Intrinsic::strip_invariant_group;
3415 }
3416 
3417 /// \p PN defines a loop-variant pointer to an object. Check if the
3418 /// previous iteration of the loop was referring to the same object as \p PN.
3420  const LoopInfo *LI) {
3421  // Find the loop-defined value.
3422  Loop *L = LI->getLoopFor(PN->getParent());
3423  if (PN->getNumIncomingValues() != 2)
3424  return true;
3425 
3426  // Find the value from previous iteration.
3427  auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3428  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3429  PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3430  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3431  return true;
3432 
3433  // If a new pointer is loaded in the loop, the pointer references a different
3434  // object in every iteration. E.g.:
3435  // for (i)
3436  // int *p = a[i];
3437  // ...
3438  if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3439  if (!L->isLoopInvariant(Load->getPointerOperand()))
3440  return false;
3441  return true;
3442 }
3443 
3445  unsigned MaxLookup) {
3446  if (!V->getType()->isPointerTy())
3447  return V;
3448  for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3449  if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3450  V = GEP->getPointerOperand();
3451  } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3452  Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3453  V = cast<Operator>(V)->getOperand(0);
3454  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3455  if (GA->isInterposable())
3456  return V;
3457  V = GA->getAliasee();
3458  } else if (isa<AllocaInst>(V)) {
3459  // An alloca can't be further simplified.
3460  return V;
3461  } else {
3462  if (auto CS = CallSite(V)) {
3463  // CaptureTracking can know about special capturing properties of some
3464  // intrinsics like launder.invariant.group, that can't be expressed with
3465  // the attributes, but have properties like returning aliasing pointer.
3466  // Because some analysis may assume that nocaptured pointer is not
3467  // returned from some special intrinsic (because function would have to
3468  // be marked with returns attribute), it is crucial to use this function
3469  // because it should be in sync with CaptureTracking. Not using it may
3470  // cause weird miscompilations where 2 aliasing pointers are assumed to
3471  // noalias.
3472  if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
3473  V = RP;
3474  continue;
3475  }
3476  }
3477 
3478  // See if InstructionSimplify knows any relevant tricks.
3479  if (Instruction *I = dyn_cast<Instruction>(V))
3480  // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3481  if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3482  V = Simplified;
3483  continue;
3484  }
3485 
3486  return V;
3487  }
3488  assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3489  }
3490  return V;
3491 }
3492 
3494  const DataLayout &DL, LoopInfo *LI,
3495  unsigned MaxLookup) {
3496  SmallPtrSet<Value *, 4> Visited;
3497  SmallVector<Value *, 4> Worklist;
3498  Worklist.push_back(V);
3499  do {
3500  Value *P = Worklist.pop_back_val();
3501  P = GetUnderlyingObject(P, DL, MaxLookup);
3502 
3503  if (!Visited.insert(P).second)
3504  continue;
3505 
3506  if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3507  Worklist.push_back(SI->getTrueValue());
3508  Worklist.push_back(SI->getFalseValue());
3509  continue;
3510  }
3511 
3512  if (PHINode *PN = dyn_cast<PHINode>(P)) {
3513  // If this PHI changes the underlying object in every iteration of the
3514  // loop, don't look through it. Consider:
3515  // int **A;
3516  // for (i) {
3517  // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3518  // Curr = A[i];
3519  // *Prev, *Curr;
3520  //
3521  // Prev is tracking Curr one iteration behind so they refer to different
3522  // underlying objects.
3523  if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3525  for (Value *IncValue : PN->incoming_values())
3526  Worklist.push_back(IncValue);
3527  continue;
3528  }
3529 
3530  Objects.push_back(P);
3531  } while (!Worklist.empty());
3532 }
3533 
3534 /// This is the function that does the work of looking through basic
3535 /// ptrtoint+arithmetic+inttoptr sequences.
3536 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3537  do {
3538  if (const Operator *U = dyn_cast<Operator>(V)) {
3539  // If we find a ptrtoint, we can transfer control back to the
3540  // regular getUnderlyingObjectFromInt.
3541  if (U->getOpcode() == Instruction::PtrToInt)
3542  return U->getOperand(0);
3543  // If we find an add of a constant, a multiplied value, or a phi, it's
3544  // likely that the other operand will lead us to the base
3545  // object. We don't have to worry about the case where the
3546  // object address is somehow being computed by the multiply,
3547  // because our callers only care when the result is an
3548  // identifiable object.
3549  if (U->getOpcode() != Instruction::Add ||
3550  (!isa<ConstantInt>(U->getOperand(1)) &&
3551  Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3552  !isa<PHINode>(U->getOperand(1))))
3553  return V;
3554  V = U->getOperand(0);
3555  } else {
3556  return V;
3557  }
3558  assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3559  } while (true);
3560 }
3561 
3562 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3563 /// ptrtoint+arithmetic+inttoptr sequences.
3564 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3566  SmallVectorImpl<Value *> &Objects,
3567  const DataLayout &DL) {
3569  SmallVector<const Value *, 4> Working(1, V);
3570  do {
3571  V = Working.pop_back_val();
3572 
3574  GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
3575 
3576  for (Value *V : Objs) {
3577  if (!Visited.insert(V).second)
3578  continue;
3579  if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3580  const Value *O =
3581  getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3582  if (O->getType()->isPointerTy()) {
3583  Working.push_back(O);
3584  continue;
3585  }
3586  }
3587  // If GetUnderlyingObjects fails to find an identifiable object,
3588  // getUnderlyingObjectsForCodeGen also fails for safety.
3589  if (!isIdentifiedObject(V)) {
3590  Objects.clear();
3591  return false;
3592  }
3593  Objects.push_back(const_cast<Value *>(V));
3594  }
3595  } while (!Working.empty());
3596  return true;
3597 }
3598 
3599 /// Return true if the only users of this pointer are lifetime markers.
3601  for (const User *U : V->users()) {
3602  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3603  if (!II) return false;
3604 
3605  if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3606  II->getIntrinsicID() != Intrinsic::lifetime_end)
3607  return false;
3608  }
3609  return true;
3610 }
3611 
3613  const Instruction *CtxI,
3614  const DominatorTree *DT) {
3615  const Operator *Inst = dyn_cast<Operator>(V);
3616  if (!Inst)
3617  return false;
3618 
3619  for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3620  if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3621  if (C->canTrap())
3622  return false;
3623 
3624  switch (Inst->getOpcode()) {
3625  default:
3626  return true;
3627  case Instruction::UDiv:
3628  case Instruction::URem: {
3629  // x / y is undefined if y == 0.
3630  const APInt *V;
3631  if (match(Inst->getOperand(1), m_APInt(V)))
3632  return *V != 0;
3633  return false;
3634  }
3635  case Instruction::SDiv:
3636  case Instruction::SRem: {
3637  // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3638  const APInt *Numerator, *Denominator;
3639  if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3640  return false;
3641  // We cannot hoist this division if the denominator is 0.
3642  if (*Denominator == 0)
3643  return false;
3644  // It's safe to hoist if the denominator is not 0 or -1.
3645  if (*Denominator != -1)
3646  return true;
3647  // At this point we know that the denominator is -1. It is safe to hoist as
3648  // long we know that the numerator is not INT_MIN.
3649  if (match(Inst->getOperand(0), m_APInt(Numerator)))
3650  return !Numerator->isMinSignedValue();
3651  // The numerator *might* be MinSignedValue.
3652  return false;
3653  }
3654  case Instruction::Load: {
3655  const LoadInst *LI = cast<LoadInst>(Inst);
3656  if (!LI->isUnordered() ||
3657  // Speculative load may create a race that did not exist in the source.
3658  LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3659  // Speculative load may load data from dirty regions.
3660  LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3661  LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3662  return false;
3663  const DataLayout &DL = LI->getModule()->getDataLayout();
3665  LI->getAlignment(), DL, CtxI, DT);
3666  }
3667  case Instruction::Call: {
3668  auto *CI = cast<const CallInst>(Inst);
3669  const Function *Callee = CI->getCalledFunction();
3670 
3671  // The called function could have undefined behavior or side-effects, even
3672  // if marked readnone nounwind.
3673  return Callee && Callee->isSpeculatable();
3674  }
3675  case Instruction::VAArg:
3676  case Instruction::Alloca:
3677  case Instruction::Invoke:
3678  case Instruction::PHI:
3679  case Instruction::Store:
3680  case Instruction::Ret:
3681  case Instruction::Br:
3682  case Instruction::IndirectBr:
3683  case Instruction::Switch:
3684  case Instruction::Unreachable:
3685  case Instruction::Fence:
3686  case Instruction::AtomicRMW:
3687  case Instruction::AtomicCmpXchg:
3688  case Instruction::LandingPad:
3689  case Instruction::Resume:
3690  case Instruction::CatchSwitch:
3691  case Instruction::CatchPad:
3692  case Instruction::CatchRet:
3693  case Instruction::CleanupPad:
3694  case Instruction::CleanupRet:
3695  return false; // Misc instructions which have effects
3696  }
3697 }
3698 
3701 }
3702 
3704  const Value *RHS,
3705  const DataLayout &DL,
3706  AssumptionCache *AC,
3707  const Instruction *CxtI,
3708  const DominatorTree *DT) {
3709  // Multiplying n * m significant bits yields a result of n + m significant
3710  // bits. If the total number of significant bits does not exceed the
3711  // result bit width (minus 1), there is no overflow.
3712  // This means if we have enough leading zero bits in the operands
3713  // we can guarantee that the result does not overflow.
3714  // Ref: "Hacker's Delight" by Henry Warren
3715  unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3716  KnownBits LHSKnown(BitWidth);
3717  KnownBits RHSKnown(BitWidth);
3718  computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3719  computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3720  // Note that underestimating the number of zero bits gives a more
3721  // conservative answer.
3722  unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3723  RHSKnown.countMinLeadingZeros();
3724  // First handle the easy case: if we have enough zero bits there's
3725  // definitely no overflow.
3726  if (ZeroBits >= BitWidth)
3728 
3729  // Get the largest possible values for each operand.
3730  APInt LHSMax = ~LHSKnown.Zero;
3731  APInt RHSMax = ~RHSKnown.Zero;
3732 
3733  // We know the multiply operation doesn't overflow if the maximum values for
3734  // each operand will not overflow after we multiply them together.
3735  bool MaxOverflow;
3736  (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3737  if (!MaxOverflow)
3739 
3740  // We know it always overflows if multiplying the smallest possible values for
3741  // the operands also results in overflow.
3742  bool MinOverflow;
3743  (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3744  if (MinOverflow)
3746 
3748 }
3749 
3751  const Value *RHS,
3752  const DataLayout &DL,
3753  AssumptionCache *AC,
3754  const Instruction *CxtI,
3755  const DominatorTree *DT) {
3756  // Multiplying n * m significant bits yields a result of n + m significant
3757  // bits. If the total number of significant bits does not exceed the
3758  // result bit width (minus 1), there is no overflow.
3759  // This means if we have enough leading sign bits in the operands
3760  // we can guarantee that the result does not overflow.
3761  // Ref: "Hacker's Delight" by Henry Warren
3762  unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3763 
3764  // Note that underestimating the number of sign bits gives a more
3765  // conservative answer.
3766  unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
3767  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
3768 
3769  // First handle the easy case: if we have enough sign bits there's
3770  // definitely no overflow.
3771  if (SignBits > BitWidth + 1)
3773 
3774  // There are two ambiguous cases where there can be no overflow:
3775  // SignBits == BitWidth + 1 and
3776  // SignBits == BitWidth
3777  // The second case is difficult to check, therefore we only handle the
3778  // first case.
3779  if (SignBits == BitWidth + 1) {
3780  // It overflows only when both arguments are negative and the true
3781  // product is exactly the minimum negative number.
3782  // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
3783  // For simplicity we just check if at least one side is not negative.
3784  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3785  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3786  if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
3788  }
3790 }
3791 
3793  const Value *RHS,
3794  const DataLayout &DL,
3795  AssumptionCache *AC,
3796  const Instruction *CxtI,
3797  const DominatorTree *DT) {
3798  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3799  if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3800  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3801 
3802  if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3803  // The sign bit is set in both cases: this MUST overflow.
3804  // Create a simple add instruction, and insert it into the struct.
3806  }
3807 
3808  if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3809  // The sign bit is clear in both cases: this CANNOT overflow.
3810  // Create a simple add instruction, and insert it into the struct.
3812  }
3813  }
3814 
3816 }
3817 
3818 /// Return true if we can prove that adding the two values of the
3819 /// knownbits will not overflow.
3820 /// Otherwise return false.
3821 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3822  const KnownBits &RHSKnown) {
3823  // Addition of two 2's complement numbers having opposite signs will never
3824  // overflow.
3825  if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3826  (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3827  return true;
3828 
3829  // If either of the values is known to be non-negative, adding them can only
3830  // overflow if the second is also non-negative, so we can assume that.
3831  // Two non-negative numbers will only overflow if there is a carry to the
3832  // sign bit, so we can check if even when the values are as big as possible
3833  // there is no overflow to the sign bit.
3834  if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3835  APInt MaxLHS = ~LHSKnown.Zero;
3836  MaxLHS.clearSignBit();
3837  APInt MaxRHS = ~RHSKnown.Zero;
3838  MaxRHS.clearSignBit();
3839  APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3840  return Result.isSignBitClear();
3841  }
3842 
3843  // If either of the values is known to be negative, adding them can only
3844  // overflow if the second is also negative, so we can assume that.
3845  // Two negative number will only overflow if there is no carry to the sign
3846  // bit, so we can check if even when the values are as small as possible
3847  // there is overflow to the sign bit.
3848  if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3849  APInt MinLHS = LHSKnown.One;
3850  MinLHS.clearSignBit();
3851  APInt MinRHS = RHSKnown.One;
3852  MinRHS.clearSignBit();
3853  APInt Result = std::move(MinLHS) + std::move(MinRHS);
3854  return Result.isSignBitSet();
3855  }
3856 
3857  // If we reached here it means that we know nothing about the sign bits.
3858  // In this case we can't know if there will be an overflow, since by
3859  // changing the sign bits any two values can be made to overflow.
3860  return false;
3861 }
3862 
3864  const Value *RHS,
3865  const AddOperator *Add,
3866  const DataLayout &DL,
3867  AssumptionCache *AC,
3868  const Instruction *CxtI,
3869  const DominatorTree *DT) {
3870  if (Add && Add->hasNoSignedWrap()) {
3872  }
3873 
3874  // If LHS and RHS each have at least two sign bits, the addition will look
3875  // like
3876  //
3877  // XX..... +
3878  // YY.....
3879  //
3880  // If the carry into the most significant position is 0, X and Y can't both
3881  // be 1 and therefore the carry out of the addition is also 0.
3882  //
3883  // If the carry into the most significant position is 1, X and Y can't both
3884  // be 0 and therefore the carry out of the addition is also 1.
3885  //
3886  // Since the carry into the most significant position is always equal to
3887  // the carry out of the addition, there is no signed overflow.
3888  if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3889  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3891 
3892  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3893  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3894 
3895  if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3897 
3898  // The remaining code needs Add to be available. Early returns if not so.
3899  if (!Add)
3901 
3902  // If the sign of Add is the same as at least one of the operands, this add
3903  // CANNOT overflow. This is particularly useful when the sum is
3904  // @llvm.assume'ed non-negative rather than proved so from analyzing its
3905  // operands.
3906  bool LHSOrRHSKnownNonNegative =
3907  (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3908  bool LHSOrRHSKnownNegative =
3909  (LHSKnown.isNegative() || RHSKnown.isNegative());
3910  if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3911  KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
3912  if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
3913  (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
3915  }
3916  }
3917 
3919 }
3920 
3922  const Value *RHS,
3923  const DataLayout &DL,
3924  AssumptionCache *AC,
3925  const Instruction *CxtI,
3926  const DominatorTree *DT) {
3927  // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
3928  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3929  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3930  if (LHSKnown.isNegative() && RHSKnown.isNonNegative())
3932 
3934 }
3935 
3937  const Value *RHS,
3938  const DataLayout &DL,
3939  AssumptionCache *AC,
3940  const Instruction *CxtI,
3941  const DominatorTree *DT) {
3942  // If LHS and RHS each have at least two sign bits, the subtraction
3943  // cannot overflow.
3944  if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3945  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3947 
3948  KnownBits LHSKnown = computeKnownBits(LHS, DL, 0, AC, CxtI, DT);
3949 
3950  KnownBits RHSKnown = computeKnownBits(RHS, DL, 0, AC, CxtI, DT);
3951 
3952  // Subtraction of two 2's complement numbers having identical signs will
3953  // never overflow.
3954  if ((LHSKnown.isNegative() && RHSKnown.isNegative()) ||
3955  (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()))
3957 
3958  // TODO: implement logic similar to checkRippleForAdd
3960 }
3961 
3963  const DominatorTree &DT) {
3964 #ifndef NDEBUG
3965  auto IID = II->getIntrinsicID();
3966  assert((IID == Intrinsic::sadd_with_overflow ||
3967  IID == Intrinsic::uadd_with_overflow ||
3968  IID == Intrinsic::ssub_with_overflow ||
3969  IID == Intrinsic::usub_with_overflow ||
3970  IID == Intrinsic::smul_with_overflow ||
3971  IID == Intrinsic::umul_with_overflow) &&
3972  "Not an overflow intrinsic!");
3973 #endif
3974 
3975  SmallVector<const BranchInst *, 2> GuardingBranches;
3977 
3978  for (const User *U : II->users()) {
3979  if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3980  assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3981 
3982  if (EVI->getIndices()[0] == 0)
3983  Results.push_back(EVI);
3984  else {
3985  assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3986 
3987  for (const auto *U : EVI->users())
3988  if (const auto *B = dyn_cast<BranchInst>(U)) {
3989  assert(B->isConditional() && "How else is it using an i1?");
3990  GuardingBranches.push_back(B);
3991  }
3992  }
3993  } else {
3994  // We are using the aggregate directly in a way we don't want to analyze
3995  // here (storing it to a global, say).
3996  return false;
3997  }
3998  }
3999 
4000  auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4001  BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4002  if (!NoWrapEdge.isSingleEdge())
4003  return false;
4004 
4005  // Check if all users of the add are provably no-wrap.
4006  for (const auto *Result : Results) {
4007  // If the extractvalue itself is not executed on overflow, the we don't
4008  // need to check each use separately, since domination is transitive.
4009  if (DT.dominates(NoWrapEdge, Result->getParent()))
4010  continue;
4011 
4012  for (auto &RU : Result->uses())
4013  if (!DT.dominates(NoWrapEdge, RU))
4014  return false;
4015  }
4016 
4017  return true;
4018  };
4019 
4020  return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4021 }
4022 
4023 
4025  const DataLayout &DL,
4026  AssumptionCache *AC,
4027  const Instruction *CxtI,
4028  const DominatorTree *DT) {
4030  Add, DL, AC, CxtI, DT);
4031 }
4032 
4034  const Value *RHS,
4035  const DataLayout &DL,
4036  AssumptionCache *AC,
4037  const Instruction *CxtI,
4038  const DominatorTree *DT) {
4039  return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4040 }
4041 
4043  // A memory operation returns normally if it isn't volatile. A volatile
4044  // operation is allowed to trap.
4045  //
4046  // An atomic operation isn't guaranteed to return in a reasonable amount of
4047  // time because it's possible for another thread to interfere with it for an
4048  // arbitrary length of time, but programs aren't allowed to rely on that.
4049  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
4050  return !LI->isVolatile();
4051  if (const StoreInst *SI = dyn_cast<StoreInst>(I))
4052  return !SI->isVolatile();
4053  if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
4054  return !CXI->isVolatile();
4055  if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
4056  return !RMWI->isVolatile();
4057  if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
4058  return !MII->isVolatile();
4059 
4060  // If there is no successor, then execution can't transfer to it.
4061  if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4062  return !CRI->unwindsToCaller();
4063  if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4064  return !CatchSwitch->unwindsToCaller();
4065  if (isa<ResumeInst>(I))
4066  return false;
4067  if (isa<ReturnInst>(I))
4068  return false;
4069  if (isa<UnreachableInst>(I))
4070  return false;
4071 
4072  // Calls can throw, or contain an infinite loop, or kill the process.
4073  if (auto CS = ImmutableCallSite(I)) {
4074  // Call sites that throw have implicit non-local control flow.
4075  if (!CS.doesNotThrow())
4076  return false;
4077 
4078  // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4079  // etc. and thus not return. However, LLVM already assumes that
4080  //
4081  // - Thread exiting actions are modeled as writes to memory invisible to
4082  // the program.
4083  //
4084  // - Loops that don't have side effects (side effects are volatile/atomic
4085  // stores and IO) always terminate (see http://llvm.org/PR965).
4086  // Furthermore IO itself is also modeled as writes to memory invisible to
4087  // the program.
4088  //
4089  // We rely on those assumptions here, and use the memory effects of the call
4090  // target as a proxy for checking that it always returns.
4091 
4092  // FIXME: This isn't aggressive enough; a call which only writes to a global
4093  // is guaranteed to return.
4094  return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
4095  match(I, m_Intrinsic<Intrinsic::assume>()) ||
4096  match(I, m_Intrinsic<Intrinsic::sideeffect>());
4097  }
4098 
4099  // Other instructions return normally.
4100  return true;
4101 }
4102 
4104  // TODO: This is slightly consdervative for invoke instruction since exiting
4105  // via an exception *is* normal control for them.
4106  for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4108  return false;
4109  return true;
4110 }
4111 
4113  const Loop *L) {
4114  // The loop header is guaranteed to be executed for every iteration.
4115  //
4116  // FIXME: Relax this constraint to cover all basic blocks that are
4117  // guaranteed to be executed at every iteration.
4118  if (I->getParent() != L->getHeader()) return false;
4119 
4120  for (const Instruction &LI : *L->getHeader()) {
4121  if (&LI == I) return true;
4122  if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
4123  }
4124  llvm_unreachable("Instruction not contained in its own parent basic block.");
4125 }
4126 
4128  switch (I->getOpcode()) {
4129  case Instruction::Add:
4130  case Instruction::Sub:
4131  case Instruction::Xor:
4132  case Instruction::Trunc:
4133  case Instruction::BitCast:
4134  case Instruction::AddrSpaceCast:
4135  case Instruction::Mul:
4136  case Instruction::Shl:
4137  case Instruction::GetElementPtr:
4138  // These operations all propagate poison unconditionally. Note that poison
4139  // is not any particular value, so xor or subtraction of poison with
4140  // itself still yields poison, not zero.
4141  return true;
4142 
4143  case Instruction::AShr:
4144  case Instruction::SExt:
4145  // For these operations, one bit of the input is replicated across
4146  // multiple output bits. A replicated poison bit is still poison.
4147  return true;
4148 
4149  case Instruction::ICmp:
4150  // Comparing poison with any value yields poison. This is why, for
4151  // instance, x s< (x +nsw 1) can be folded to true.
4152  return true;
4153 
4154  default:
4155  return false;
4156  }
4157 }
4158 
4160  switch (I->getOpcode()) {
4161  case Instruction::Store:
4162  return cast<StoreInst>(I)->getPointerOperand();
4163 
4164  case Instruction::Load:
4165  return cast<LoadInst>(I)->getPointerOperand();
4166 
4167  case Instruction::AtomicCmpXchg:
4168  return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4169 
4170  case Instruction::AtomicRMW:
4171  return cast<AtomicRMWInst>(I)->getPointerOperand();
4172 
4173  case Instruction::UDiv:
4174  case Instruction::SDiv:
4175  case Instruction::URem:
4176  case Instruction::SRem:
4177  return I->getOperand(1);
4178 
4179  default:
4180  return nullptr;
4181  }
4182 }
4183 
4185  // We currently only look for uses of poison values within the same basic
4186  // block, as that makes it easier to guarantee that the uses will be
4187  // executed given that PoisonI is executed.
4188  //
4189  // FIXME: Expand this to consider uses beyond the same basic block. To do
4190  // this, look out for the distinction between post-dominance and strong
4191  // post-dominance.
4192  const BasicBlock *BB = PoisonI->getParent();
4193 
4194  // Set of instructions that we have proved will yield poison if PoisonI
4195  // does.
4196  SmallSet<const Value *, 16> YieldsPoison;
4198  YieldsPoison.insert(PoisonI);
4199  Visited.insert(PoisonI->getParent());
4200 
4201  BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4202 
4203  unsigned Iter = 0;
4204  while (Iter++ < MaxDepth) {
4205  for (auto &I : make_range(Begin, End)) {
4206  if (&I != PoisonI) {
4207  const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
4208  if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
4209  return true;
4211  return false;
4212  }
4213 
4214  // Mark poison that propagates from I through uses of I.
4215  if (YieldsPoison.count(&I)) {
4216  for (const User *User : I.users()) {
4217  const Instruction *UserI = cast<Instruction>(User);
4218  if (propagatesFullPoison(UserI))
4219  YieldsPoison.insert(User);
4220  }
4221  }
4222  }
4223 
4224  if (auto *NextBB = BB->getSingleSuccessor()) {
4225  if (Visited.insert(NextBB).second) {
4226  BB = NextBB;
4227  Begin = BB->getFirstNonPHI()->getIterator();
4228  End = BB->end();
4229  continue;
4230  }
4231  }
4232 
4233  break;
4234  }
4235  return false;
4236 }
4237 
4238 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4239  if (FMF.noNaNs())
4240  return true;
4241 
4242  if (auto *C = dyn_cast<ConstantFP>(V))
4243  return !C->isNaN();
4244  return false;
4245 }
4246 
4247 static bool isKnownNonZero(const Value *V) {
4248  if (auto *C = dyn_cast<ConstantFP>(V))
4249  return !C->isZero();
4250  return false;
4251 }
4252 
4253 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4254 /// Given non-min/max outer cmp/select from the clamp pattern this
4255 /// function recognizes if it can be substitued by a "canonical" min/max
4256 /// pattern.
4258  Value *CmpLHS, Value *CmpRHS,
4259  Value *TrueVal, Value *FalseVal,
4260  Value *&LHS, Value *&RHS) {
4261  // Try to match
4262  // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4263  // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4264  // and return description of the outer Max/Min.
4265 
4266  // First, check if select has inverse order:
4267  if (CmpRHS == FalseVal) {
4268  std::swap(TrueVal, FalseVal);
4269  Pred = CmpInst::getInversePredicate(Pred);
4270  }
4271 
4272  // Assume success now. If there's no match, callers should not use these anyway.
4273  LHS = TrueVal;
4274  RHS = FalseVal;
4275 
4276  const APFloat *FC1;
4277  if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4278  return {SPF_UNKNOWN, SPNB_NA, false};
4279 
4280  const APFloat *FC2;
4281  switch (Pred) {
4282  case CmpInst::FCMP_OLT:
4283  case CmpInst::FCMP_OLE:
4284  case CmpInst::FCMP_ULT:
4285  case CmpInst::FCMP_ULE:
4286  if (match(FalseVal,
4287  m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4288  m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4289  FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4290  return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4291  break;
4292  case CmpInst::FCMP_OGT:
4293  case CmpInst::FCMP_OGE:
4294  case CmpInst::FCMP_UGT:
4295  case CmpInst::FCMP_UGE:
4296  if (match(FalseVal,
4297  m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4298  m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4299  FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4300  return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4301  break;
4302  default:
4303  break;
4304  }
4305 
4306  return {SPF_UNKNOWN, SPNB_NA, false};
4307 }
4308 
4309 /// Recognize variations of:
4310 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4312  Value *CmpLHS, Value *CmpRHS,
4313  Value *TrueVal, Value *FalseVal) {
4314  // Swap the select operands and predicate to match the patterns below.
4315  if (CmpRHS != TrueVal) {
4316  Pred = ICmpInst::getSwappedPredicate(Pred);
4317  std::swap(TrueVal, FalseVal);
4318  }
4319  const APInt *C1;
4320  if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4321  const APInt *C2;
4322  // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4323  if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4324  C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4325  return {SPF_SMAX, SPNB_NA, false};
4326 
4327  // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4328  if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4329  C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4330  return {SPF_SMIN, SPNB_NA, false};
4331 
4332  // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4333  if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4334  C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4335  return {SPF_UMAX, SPNB_NA, false};
4336 
4337  // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4338  if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4339  C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4340  return {SPF_UMIN, SPNB_NA, false};
4341  }
4342  return {SPF_UNKNOWN, SPNB_NA, false};
4343 }
4344 
4345 /// Recognize variations of:
4346 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4348  Value *CmpLHS, Value *CmpRHS,
4349  Value *TVal, Value *FVal,
4350  unsigned Depth) {
4351  // TODO: Allow FP min/max with nnan/nsz.
4352  assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4353 
4354  Value *A, *B;
4355  SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4357  return {SPF_UNKNOWN, SPNB_NA, false};
4358 
4359  Value *C, *D;
4360  SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4361  if (L.Flavor != R.Flavor)
4362  return {SPF_UNKNOWN, SPNB_NA, false};
4363 
4364  // We have something like: x Pred y ? min(a, b) : min(c, d).
4365  // Try to match the compare to the min/max operations of the select operands.
4366  // First, make sure we have the right compare predicate.
4367  switch (L.Flavor) {
4368  case SPF_SMIN:
4369  if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4370  Pred = ICmpInst::getSwappedPredicate(Pred);
4371  std::swap(CmpLHS, CmpRHS);
4372  }
4373  if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4374  break;
4375  return {SPF_UNKNOWN, SPNB_NA, false};
4376  case SPF_SMAX:
4377  if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4378  Pred = ICmpInst::getSwappedPredicate(Pred);
4379  std::swap(CmpLHS, CmpRHS);
4380  }
4381  if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4382  break;
4383  return {SPF_UNKNOWN, SPNB_NA, false};
4384  case SPF_UMIN:
4385  if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4386  Pred = ICmpInst::getSwappedPredicate(Pred);
4387  std::swap(CmpLHS, CmpRHS);
4388  }
4389  if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4390  break;
4391  return {SPF_UNKNOWN, SPNB_NA, false};
4392  case SPF_UMAX:
4393  if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4394  Pred = ICmpInst::getSwappedPredicate(Pred);
4395  std::swap(CmpLHS, CmpRHS);
4396  }
4397  if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4398  break;
4399  return {SPF_UNKNOWN, SPNB_NA, false};
4400  default:
4401  return {SPF_UNKNOWN, SPNB_NA, false};
4402  }
4403 
4404  // If there is a common operand in the already matched min/max and the other
4405  // min/max operands match the compare operands (either directly or inverted),
4406  // then this is min/max of the same flavor.
4407 
4408  // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4409  // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4410  if (D == B) {
4411  if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4412  match(A, m_Not(m_Specific(CmpRHS)))))
4413  return {L.Flavor, SPNB_NA, false};
4414  }
4415  // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4416  // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4417  if (C == B) {
4418  if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4419  match(A, m_Not(m_Specific(CmpRHS)))))
4420  return {L.Flavor, SPNB_NA, false};
4421  }
4422  // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4423  // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4424  if (D == A) {
4425  if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4426  match(B, m_Not(m_Specific(CmpRHS)))))
4427  return {L.Flavor, SPNB_NA, false};
4428  }
4429  // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4430  // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4431  if (C == A) {
4432  if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4433  match(B, m_Not(m_Specific(CmpRHS)))))
4434  return {L.Flavor, SPNB_NA, false};
4435  }
4436 
4437  return {SPF_UNKNOWN, SPNB_NA, false};
4438 }
4439 
4440 /// Match non-obvious integer minimum and maximum sequences.
4442  Value *CmpLHS, Value *CmpRHS,
4443  Value *TrueVal, Value *FalseVal,
4444  Value *&LHS, Value *&RHS,
4445  unsigned Depth) {
4446  // Assume success. If there's no match, callers should not use these anyway.
4447  LHS = TrueVal;
4448  RHS = FalseVal;
4449 
4450  SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4452  return SPR;
4453 
4454  SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4456  return SPR;
4457 
4458  if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4459  return {SPF_UNKNOWN, SPNB_NA, false};
4460 
4461  // Z = X -nsw Y
4462  // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4463  // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4464  if (match(TrueVal, m_Zero()) &&
4465  match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4466  return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4467 
4468  // Z = X -nsw Y
4469  // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4470  // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4471  if (match(FalseVal, m_Zero()) &&
4472  match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4473  return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4474 
4475  const APInt *C1;
4476  if (!match(CmpRHS, m_APInt(C1)))
4477  return {SPF_UNKNOWN, SPNB_NA, false};
4478 
4479  // An unsigned min/max can be written with a signed compare.
4480  const APInt *C2;
4481  if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4482  (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4483  // Is the sign bit set?
4484  // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4485  // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4486  if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4487  C2->isMaxSignedValue())
4488  return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4489 
4490  // Is the sign bit clear?
4491  // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4492  // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4493  if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4494  C2->isMinSignedValue())
4495  return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4496  }
4497 
4498  // Look through 'not' ops to find disguised signed min/max.
4499  // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4500  // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4501  if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4502  match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4503  return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4504 
4505  // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4506  // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4507  if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4508  match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4509  return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4510 
4511  return {SPF_UNKNOWN, SPNB_NA, false};
4512 }
4513 
4514 bool llvm::isKnownNegation(const Value *X, const Value *Y) {
4515  assert(X && Y && "Invalid operand");
4516 
4517  // X = sub (0, Y)
4518  if (match(X, m_Neg(m_Specific(Y))))
4519  return true;
4520 
4521  // Y = sub (0, X)
4522  if (match(Y, m_Neg(m_Specific(X))))
4523  return true;
4524 
4525  // X = sub (A, B), Y = sub (B, A)
4526  Value *A, *B;
4527  return match(X, m_Sub(m_Value(A), m_Value(B))) &&
4528  match(Y, m_Sub(m_Specific(B), m_Specific(A)));
4529 }
4530 
4532  FastMathFlags FMF,
4533  Value *CmpLHS, Value *CmpRHS,
4534  Value *TrueVal, Value *FalseVal,
4535  Value *&LHS, Value *&RHS,
4536  unsigned Depth) {
4537  LHS = CmpLHS;
4538  RHS = CmpRHS;
4539 
4540  // Signed zero may return inconsistent results between implementations.
4541  // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4542  // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4543  // Therefore, we behave conservatively and only proceed if at least one of the
4544  // operands is known to not be zero or if we don't care about signed zero.
4545  switch (Pred) {
4546  default: break;
4547  // FIXME: Include OGT/OLT/UGT/ULT.
4550  if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4551  !isKnownNonZero(CmpRHS))
4552  return {SPF_UNKNOWN, SPNB_NA, false};
4553  }
4554 
4555  SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4556  bool Ordered = false;
4557 
4558  // When given one NaN and one non-NaN input:
4559  // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4560  // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4561  // ordered comparison fails), which could be NaN or non-NaN.
4562  // so here we discover exactly what NaN behavior is required/accepted.
4563  if (CmpInst::isFPPredicate(Pred)) {
4564  bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4565  bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4566 
4567  if (LHSSafe && RHSSafe) {
4568  // Both operands are known non-NaN.
4569  NaNBehavior = SPNB_RETURNS_ANY;
4570  } else if (CmpInst::isOrdered(Pred)) {
4571  // An ordered comparison will return false when given a NaN, so it
4572  // returns the RHS.
4573  Ordered = true;
4574  if (LHSSafe)
4575  // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4576  NaNBehavior = SPNB_RETURNS_NAN;
4577  else if (RHSSafe)
4578  NaNBehavior = SPNB_RETURNS_OTHER;
4579  else
4580  // Completely unsafe.
4581  return {SPF_UNKNOWN, SPNB_NA, false};
4582  } else {
4583  Ordered = false;
4584  // An unordered comparison will return true when given a NaN, so it
4585  // returns the LHS.
4586  if (LHSSafe)
4587  // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4588  NaNBehavior = SPNB_RETURNS_OTHER;
4589  else if (RHSSafe)
4590  NaNBehavior = SPNB_RETURNS_NAN;
4591  else
4592  // Completely unsafe.
4593  return {SPF_UNKNOWN, SPNB_NA, false};
4594  }
4595  }
4596 
4597  if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4598  std::swap(CmpLHS, CmpRHS);
4599  Pred = CmpInst::getSwappedPredicate(Pred);
4600  if (NaNBehavior == SPNB_RETURNS_NAN)
4601  NaNBehavior = SPNB_RETURNS_OTHER;
4602  else if (NaNBehavior == SPNB_RETURNS_OTHER)
4603  NaNBehavior = SPNB_RETURNS_NAN;
4604  Ordered = !Ordered;
4605  }
4606 
4607  // ([if]cmp X, Y) ? X : Y
4608  if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4609  switch (Pred) {
4610  default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4611  case ICmpInst::ICMP_UGT:
4612  case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4613  case ICmpInst::ICMP_SGT:
4614  case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4615  case ICmpInst::ICMP_ULT:
4616  case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4617  case ICmpInst::ICMP_SLT:
4618  case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4619  case FCmpInst::FCMP_UGT:
4620  case FCmpInst::FCMP_UGE:
4621  case FCmpInst::FCMP_OGT:
4622  case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4623  case FCmpInst::FCMP_ULT:
4624  case FCmpInst::FCMP_ULE:
4625  case FCmpInst::FCMP_OLT:
4626  case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4627  }
4628  }
4629 
4630  if (isKnownNegation(TrueVal, FalseVal)) {
4631  // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
4632  // match against either LHS or sext(LHS).
4633  auto MaybeSExtCmpLHS =
4634  m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
4635  auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
4636  auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
4637  if (match(TrueVal, MaybeSExtCmpLHS)) {
4638  // Set the return values. If the compare uses the negated value (-X >s 0),
4639  // swap the return values because the negated value is always 'RHS'.
4640  LHS = TrueVal;
4641  RHS = FalseVal;
4642  if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
4643  std::swap(LHS, RHS);
4644 
4645  // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
4646  // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
4647  if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4648  return {SPF_ABS, SPNB_NA, false};
4649 
4650  // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
4651  // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
4652  if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4653  return {SPF_NABS, SPNB_NA, false};
4654  }
4655  else if (match(FalseVal, MaybeSExtCmpLHS)) {
4656  // Set the return values. If the compare uses the negated value (-X >s 0),
4657  // swap the return values because the negated value is always 'RHS'.
4658  LHS = FalseVal;
4659  RHS = TrueVal;
4660  if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
4661  std::swap(LHS, RHS);
4662 
4663  // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
4664  // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
4665  if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4666  return {SPF_NABS, SPNB_NA, false};
4667 
4668  // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
4669  // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
4670  if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4671  return {SPF_ABS, SPNB_NA, false};
4672  }
4673  }
4674 
4675  if (CmpInst::isIntPredicate(Pred))
4676  return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4677 
4678  // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4679  // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4680  // semantics than minNum. Be conservative in such case.
4681  if (NaNBehavior != SPNB_RETURNS_ANY ||
4682  (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4683  !isKnownNonZero(CmpRHS)))
4684  return {SPF_UNKNOWN, SPNB_NA, false};
4685 
4686  return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4687 }
4688 
4689 /// Helps to match a select pattern in case of a type mismatch.
4690 ///
4691 /// The function processes the case when type of true and false values of a
4692 /// select instruction differs from type of the cmp instruction operands because
4693 /// of a cast instruction. The function checks if it is legal to move the cast
4694 /// operation after "select". If yes, it returns the new second value of
4695 /// "select" (with the assumption that cast is moved):
4696 /// 1. As operand of cast instruction when both values of "select" are same cast
4697 /// instructions.
4698 /// 2. As restored constant (by applying reverse cast operation) when the first
4699 /// value of the "select" is a cast operation and the second value is a
4700 /// constant.
4701 /// NOTE: We return only the new second value because the first value could be
4702 /// accessed as operand of cast instruction.
4703 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4704  Instruction::CastOps *CastOp) {
4705  auto *Cast1 = dyn_cast<CastInst>(V1);
4706  if (!Cast1)
4707  return nullptr;
4708 
4709  *CastOp = Cast1->getOpcode();
4710  Type *SrcTy = Cast1->getSrcTy();
4711  if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4712  // If V1 and V2 are both the same cast from the same type, look through V1.
4713  if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4714  return Cast2->getOperand(0);
4715  return nullptr;
4716  }
4717 
4718  auto *C = dyn_cast<Constant>(V2);
4719  if (!C)
4720  return nullptr;
4721 
4722  Constant *CastedTo = nullptr;
4723  switch (*CastOp) {
4724  case Instruction::ZExt:
4725  if (CmpI->isUnsigned())
4726  CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4727  break;
4728  case Instruction::SExt:
4729  if (CmpI->isSigned())
4730  CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4731  break;
4732  case Instruction::Trunc:
4733  Constant *CmpConst;
4734  if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4735  CmpConst->getType() == SrcTy) {
4736  // Here we have the following case:
4737  //
4738  // %cond = cmp iN %x, CmpConst
4739  // %tr = trunc iN %x to iK
4740  // %narrowsel = select i1 %cond, iK %t, iK C
4741  //
4742  // We can always move trunc after select operation:
4743  //
4744  // %cond = cmp iN %x, CmpConst
4745  // %widesel = select i1 %cond, iN %x, iN CmpConst
4746  // %tr = trunc iN %widesel to iK
4747  //
4748  // Note that C could be extended in any way because we don't care about
4749  // upper bits after truncation. It can't be abs pattern, because it would
4750  // look like:
4751  //
4752  // select i1 %cond, x, -x.
4753  //
4754  // So only min/max pattern could be matched. Such match requires widened C
4755  // == CmpConst. That is why set widened C = CmpConst, condition trunc
4756  // CmpConst == C is checked below.
4757  CastedTo = CmpConst;
4758  } else {
4759  CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4760  }
4761  break;
4762  case Instruction::FPTrunc:
4763  CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4764  break;
4765  case Instruction::FPExt:
4766  CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4767  break;
4768  case Instruction::FPToUI:
4769  CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4770  break;
4771  case Instruction::FPToSI:
4772  CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4773  break;
4774  case Instruction::UIToFP:
4775  CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4776  break;
4777  case Instruction::SIToFP:
4778  CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4779  break;
4780  default:
4781  break;
4782  }
4783 
4784  if (!CastedTo)
4785  return nullptr;
4786 
4787  // Make sure the cast doesn't lose any information.
4788  Constant *CastedBack =
4789  ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4790  if (CastedBack != C)
4791  return nullptr;
4792 
4793  return CastedTo;
4794 }
4795 
4797  Instruction::CastOps *CastOp,
4798  unsigned Depth) {
4799  if (Depth >= MaxDepth)
4800  return {SPF_UNKNOWN, SPNB_NA, false};
4801 
4803  if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4804 
4805  CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4806  if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4807 
4808  CmpInst::Predicate Pred = CmpI->getPredicate();
4809  Value *CmpLHS = CmpI->getOperand(0);
4810  Value *CmpRHS = CmpI->getOperand(1);
4811  Value *TrueVal = SI->getTrueValue();
4812  Value *FalseVal = SI->getFalseValue();
4813  FastMathFlags FMF;
4814  if (isa<FPMathOperator>(CmpI))
4815  FMF = CmpI->getFastMathFlags();
4816 
4817  // Bail out early.
4818  if (CmpI->isEquality())
4819  return {SPF_UNKNOWN, SPNB_NA, false};
4820 
4821  // Deal with type mismatches.
4822  if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4823  if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
4824  // If this is a potential fmin/fmax with a cast to integer, then ignore
4825  // -0.0 because there is no corresponding integer value.
4826  if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4827  FMF.setNoSignedZeros();
4828  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4829  cast<CastInst>(TrueVal)->getOperand(0), C,
4830  LHS, RHS, Depth);
4831  }
4832  if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
4833  // If this is a potential fmin/fmax with a cast to integer, then ignore
4834  // -0.0 because there is no corresponding integer value.
4835  if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4836  FMF.setNoSignedZeros();
4837  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4838  C, cast<CastInst>(FalseVal)->getOperand(0),
4839  LHS, RHS, Depth);
4840  }
4841  }
4842  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4843  LHS, RHS, Depth);
4844 }
4845 
4847  if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
4848  if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
4849  if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
4850  if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
4851  if (SPF == SPF_FMINNUM)
4852  return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
4853  if (SPF == SPF_FMAXNUM)
4854  return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
4855  llvm_unreachable("unhandled!");
4856 }
4857 
4859  if (SPF == SPF_SMIN) return SPF_SMAX;
4860  if (SPF == SPF_UMIN) return SPF_UMAX;
4861  if (SPF == SPF_SMAX) return SPF_SMIN;
4862  if (SPF == SPF_UMAX) return SPF_UMIN;
4863  llvm_unreachable("unhandled!");
4864 }
4865 
4868 }
4869 
4870 /// Return true if "icmp Pred LHS RHS" is always true.
4871 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
4872  const Value *RHS, const DataLayout &DL,
4873  unsigned Depth) {
4874  assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4875  if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4876  return true;
4877 
4878  switch (Pred) {
4879  default:
4880  return false;
4881 
4882  case CmpInst::ICMP_SLE: {
4883  const APInt *C;
4884 
4885  // LHS s<= LHS +_{nsw} C if C >= 0
4886  if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4887  return !C->isNegative();
4888  return false;
4889  }
4890 
4891  case CmpInst::ICMP_ULE: {
4892  const APInt *C;
4893 
4894  // LHS u<= LHS +_{nuw} C for any C
4895  if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4896  return true;
4897 
4898  // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4899  auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4900  const Value *&X,
4901  const APInt *&CA, const APInt *&CB) {
4902  if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4903  match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4904  return true;
4905 
4906  // If X & C == 0 then (X | C) == X +_{nuw} C
4907  if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4908  match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4909  KnownBits Known(CA->getBitWidth());
4910  computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
4911  /*CxtI*/ nullptr, /*DT*/ nullptr);
4912  if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
4913  return true;
4914  }
4915 
4916  return false;
4917  };
4918 
4919  const Value *X;
4920  const APInt *CLHS, *CRHS;
4921  if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4922  return CLHS->ule(*CRHS);
4923 
4924  return false;
4925  }
4926  }
4927 }
4928 
4929 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4930 /// ALHS ARHS" is true. Otherwise, return None.
4931 static Optional<bool>
4933  const Value *ARHS, const Value *BLHS, const Value *BRHS,
4934  const DataLayout &DL, unsigned Depth) {
4935  switch (Pred) {
4936  default:
4937  return None;
4938 
4939  case CmpInst::ICMP_SLT:
4940  case CmpInst::ICMP_SLE:
4941  if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
4942  isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
4943  return true;
4944  return None;
4945 
4946  case CmpInst::ICMP_ULT:
4947  case CmpInst::ICMP_ULE:
4948  if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
4949  isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
4950  return true;
4951  return None;
4952  }
4953 }
4954 
4955 /// Return true if the operands of the two compares match. IsSwappedOps is true
4956 /// when the operands match, but are swapped.
4957 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4958  const Value *BLHS, const Value *BRHS,
4959  bool &IsSwappedOps) {
4960 
4961  bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4962  IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4963  return IsMatchingOps || IsSwappedOps;
4964 }
4965 
4966 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4967 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4968 /// BRHS" is false. Otherwise, return None if we can't infer anything.
4970  const Value *ALHS,
4971  const Value *ARHS,
4972  CmpInst::Predicate BPred,
4973  const Value *BLHS,
4974  const Value *BRHS,
4975  bool IsSwappedOps) {
4976  // Canonicalize the operands so they're matching.
4977  if (IsSwappedOps) {
4978  std::swap(BLHS, BRHS);
4979  BPred = ICmpInst::getSwappedPredicate(BPred);
4980  }
4981  if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4982  return true;
4983  if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4984  return false;
4985 
4986  return None;
4987 }
4988 
4989 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4990 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4991 /// C2" is false. Otherwise, return None if we can't infer anything.
4992 static Optional<bool>
4994  const ConstantInt *C1,
4995  CmpInst::Predicate BPred,
4996  const Value *BLHS, const ConstantInt *C2) {
4997  assert(ALHS == BLHS && "LHS operands must match.");
4998  ConstantRange DomCR =
5000  ConstantRange CR =
5002  ConstantRange Intersection = DomCR.intersectWith(CR);
5003  ConstantRange Difference = DomCR.difference(CR);
5004  if (Intersection.isEmptySet())
5005  return false;
5006  if (Difference.isEmptySet())
5007  return true;
5008  return None;
5009 }
5010 
5011 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5012 /// false. Otherwise, return None if we can't infer anything.
5014  const ICmpInst *RHS,
5015  const DataLayout &DL, bool LHSIsTrue,
5016  unsigned Depth) {
5017  Value *ALHS = LHS->getOperand(0);
5018  Value *ARHS = LHS->getOperand(1);
5019  // The rest of the logic assumes the LHS condition is true. If that's not the
5020  // case, invert the predicate to make it so.
5021  ICmpInst::Predicate APred =
5022  LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
5023 
5024  Value *BLHS = RHS->getOperand(0);
5025  Value *BRHS = RHS->getOperand(1);
5026  ICmpInst::Predicate BPred = RHS->getPredicate();
5027 
5028  // Can we infer anything when the two compares have matching operands?
5029  bool IsSwappedOps;
5030  if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
5032  APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
5033  return Implication;
5034  // No amount of additional analysis will infer the second condition, so
5035  // early exit.
5036  return None;
5037  }
5038 
5039  // Can we infer anything when the LHS operands match and the RHS operands are
5040  // constants (not necessarily matching)?
5041  if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
5043  APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
5044  cast<ConstantInt>(BRHS)))
5045  return Implication;
5046  // No amount of additional analysis will infer the second condition, so
5047  // early exit.
5048  return None;
5049  }
5050 
5051  if (APred == BPred)
5052  return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
5053  return None;
5054 }
5055 
5056 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
5057 /// false. Otherwise, return None if we can't infer anything. We expect the
5058 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
5060  const ICmpInst *RHS,
5061  const DataLayout &DL, bool LHSIsTrue,
5062  unsigned Depth) {
5063  // The LHS must be an 'or' or an 'and' instruction.
5064  assert((LHS->getOpcode() == Instruction::And ||
5065  LHS->getOpcode() == Instruction::Or) &&
5066  "Expected LHS to be 'and' or 'or'.");
5067 
5068  assert(Depth <= MaxDepth && "Hit recursion limit");
5069 
5070  // If the result of an 'or' is false, then we know both legs of the 'or' are
5071  // false. Similarly, if the result of an 'and' is true, then we know both
5072  // legs of the 'and' are true.
5073  Value *ALHS, *ARHS;
5074  if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
5075  (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
5076  // FIXME: Make this non-recursion.
5077  if (Optional<bool> Implication =
5078  isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
5079  return Implication;
5080  if (Optional<bool> Implication =
5081  isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
5082  return Implication;
5083  return None;
5084  }
5085  return None;
5086 }
5087 
5089  const DataLayout &DL, bool LHSIsTrue,
5090  unsigned Depth) {
5091  // Bail out when we hit the limit.
5092  if (Depth == MaxDepth)
5093  return None;
5094 
5095  // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
5096  // example.
5097  if (LHS->getType() != RHS->getType())
5098  return None;
5099 
5100  Type *OpTy = LHS->getType();
5101  assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
5102 
5103  // LHS ==> RHS by definition
5104  if (LHS == RHS)
5105  return LHSIsTrue;
5106 
5107  // FIXME: Extending the code below to handle vectors.
5108  if (OpTy->isVectorTy())
5109  return None;
5110 
5111  assert(OpTy->isIntegerTy(1) && "implied by above");
5112 
5113  // Both LHS and RHS are icmps.
5114  const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
5115  const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
5116  if (LHSCmp && RHSCmp)
5117  return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
5118 
5119  // The LHS should be an '