LLVM  7.0.0svn
ValueTracking.cpp
Go to the documentation of this file.
1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains routines that help analyze properties that chains of
11 // computations have.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/LLVMContext.h"
56 #include "llvm/IR/Metadata.h"
57 #include "llvm/IR/Module.h"
58 #include "llvm/IR/Operator.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/User.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Support/Casting.h"
65 #include "llvm/Support/Compiler.h"
67 #include "llvm/Support/KnownBits.h"
69 #include <algorithm>
70 #include <array>
71 #include <cassert>
72 #include <cstdint>
73 #include <iterator>
74 #include <utility>
75 
76 using namespace llvm;
77 using namespace llvm::PatternMatch;
78 
79 const unsigned MaxDepth = 6;
80 
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84  cl::Hidden, cl::init(20));
85 
86 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
87 /// returns the element type's bitwidth.
88 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
89  if (unsigned BitWidth = Ty->getScalarSizeInBits())
90  return BitWidth;
91 
92  return DL.getPointerTypeSizeInBits(Ty);
93 }
94 
95 namespace {
96 
97 // Simplifying using an assume can only be done in a particular control-flow
98 // context (the context instruction provides that context). If an assume and
99 // the context instruction are not in the same block then the DT helps in
100 // figuring out if we can use it.
101 struct Query {
102  const DataLayout &DL;
103  AssumptionCache *AC;
104  const Instruction *CxtI;
105  const DominatorTree *DT;
106 
107  // Unlike the other analyses, this may be a nullptr because not all clients
108  // provide it currently.
110 
111  /// Set of assumptions that should be excluded from further queries.
112  /// This is because of the potential for mutual recursion to cause
113  /// computeKnownBits to repeatedly visit the same assume intrinsic. The
114  /// classic case of this is assume(x = y), which will attempt to determine
115  /// bits in x from bits in y, which will attempt to determine bits in y from
116  /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
117  /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
118  /// (all of which can call computeKnownBits), and so on.
119  std::array<const Value *, MaxDepth> Excluded;
120 
121  unsigned NumExcluded = 0;
122 
123  Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
124  const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
125  : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {}
126 
127  Query(const Query &Q, const Value *NewExcl)
128  : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
129  NumExcluded(Q.NumExcluded) {
130  Excluded = Q.Excluded;
131  Excluded[NumExcluded++] = NewExcl;
132  assert(NumExcluded <= Excluded.size());
133  }
134 
135  bool isExcluded(const Value *Value) const {
136  if (NumExcluded == 0)
137  return false;
138  auto End = Excluded.begin() + NumExcluded;
139  return std::find(Excluded.begin(), End, Value) != End;
140  }
141 };
142 
143 } // end anonymous namespace
144 
145 // Given the provided Value and, potentially, a context instruction, return
146 // the preferred context instruction (if any).
147 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
148  // If we've been provided with a context instruction, then use that (provided
149  // it has been inserted).
150  if (CxtI && CxtI->getParent())
151  return CxtI;
152 
153  // If the value is really an already-inserted instruction, then use that.
154  CxtI = dyn_cast<Instruction>(V);
155  if (CxtI && CxtI->getParent())
156  return CxtI;
157 
158  return nullptr;
159 }
160 
161 static void computeKnownBits(const Value *V, KnownBits &Known,
162  unsigned Depth, const Query &Q);
163 
164 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
165  const DataLayout &DL, unsigned Depth,
166  AssumptionCache *AC, const Instruction *CxtI,
167  const DominatorTree *DT,
169  ::computeKnownBits(V, Known, Depth,
170  Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
171 }
172 
173 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
174  const Query &Q);
175 
177  unsigned Depth, AssumptionCache *AC,
178  const Instruction *CxtI,
179  const DominatorTree *DT,
181  return ::computeKnownBits(V, Depth,
182  Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
183 }
184 
185 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
186  const DataLayout &DL,
187  AssumptionCache *AC, const Instruction *CxtI,
188  const DominatorTree *DT) {
189  assert(LHS->getType() == RHS->getType() &&
190  "LHS and RHS should have the same type");
191  assert(LHS->getType()->isIntOrIntVectorTy() &&
192  "LHS and RHS should be integers");
193  IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
194  KnownBits LHSKnown(IT->getBitWidth());
195  KnownBits RHSKnown(IT->getBitWidth());
196  computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
197  computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
198  return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
199 }
200 
202  for (const User *U : CxtI->users()) {
203  if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
204  if (IC->isEquality())
205  if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
206  if (C->isNullValue())
207  continue;
208  return false;
209  }
210  return true;
211 }
212 
213 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
214  const Query &Q);
215 
217  bool OrZero,
218  unsigned Depth, AssumptionCache *AC,
219  const Instruction *CxtI,
220  const DominatorTree *DT) {
221  return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
222  Query(DL, AC, safeCxtI(V, CxtI), DT));
223 }
224 
225 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
226 
227 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
228  AssumptionCache *AC, const Instruction *CxtI,
229  const DominatorTree *DT) {
230  return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
231 }
232 
233 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
234  unsigned Depth,
235  AssumptionCache *AC, const Instruction *CxtI,
236  const DominatorTree *DT) {
237  KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
238  return Known.isNonNegative();
239 }
240 
241 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
242  AssumptionCache *AC, const Instruction *CxtI,
243  const DominatorTree *DT) {
244  if (auto *CI = dyn_cast<ConstantInt>(V))
245  return CI->getValue().isStrictlyPositive();
246 
247  // TODO: We'd doing two recursive queries here. We should factor this such
248  // that only a single query is needed.
249  return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
250  isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
251 }
252 
253 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
254  AssumptionCache *AC, const Instruction *CxtI,
255  const DominatorTree *DT) {
256  KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
257  return Known.isNegative();
258 }
259 
260 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
261 
262 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
263  const DataLayout &DL,
264  AssumptionCache *AC, const Instruction *CxtI,
265  const DominatorTree *DT) {
266  return ::isKnownNonEqual(V1, V2, Query(DL, AC,
267  safeCxtI(V1, safeCxtI(V2, CxtI)),
268  DT));
269 }
270 
271 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
272  const Query &Q);
273 
274 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
275  const DataLayout &DL,
276  unsigned Depth, AssumptionCache *AC,
277  const Instruction *CxtI, const DominatorTree *DT) {
278  return ::MaskedValueIsZero(V, Mask, Depth,
279  Query(DL, AC, safeCxtI(V, CxtI), DT));
280 }
281 
282 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
283  const Query &Q);
284 
285 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
286  unsigned Depth, AssumptionCache *AC,
287  const Instruction *CxtI,
288  const DominatorTree *DT) {
289  return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
290 }
291 
292 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
293  bool NSW,
294  KnownBits &KnownOut, KnownBits &Known2,
295  unsigned Depth, const Query &Q) {
296  unsigned BitWidth = KnownOut.getBitWidth();
297 
298  // If an initial sequence of bits in the result is not needed, the
299  // corresponding bits in the operands are not needed.
300  KnownBits LHSKnown(BitWidth);
301  computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
302  computeKnownBits(Op1, Known2, Depth + 1, Q);
303 
304  KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
305 }
306 
307 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
308  KnownBits &Known, KnownBits &Known2,
309  unsigned Depth, const Query &Q) {
310  unsigned BitWidth = Known.getBitWidth();
311  computeKnownBits(Op1, Known, Depth + 1, Q);
312  computeKnownBits(Op0, Known2, Depth + 1, Q);
313 
314  bool isKnownNegative = false;
315  bool isKnownNonNegative = false;
316  // If the multiplication is known not to overflow, compute the sign bit.
317  if (NSW) {
318  if (Op0 == Op1) {
319  // The product of a number with itself is non-negative.
320  isKnownNonNegative = true;
321  } else {
322  bool isKnownNonNegativeOp1 = Known.isNonNegative();
323  bool isKnownNonNegativeOp0 = Known2.isNonNegative();
324  bool isKnownNegativeOp1 = Known.isNegative();
325  bool isKnownNegativeOp0 = Known2.isNegative();
326  // The product of two numbers with the same sign is non-negative.
327  isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
328  (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
329  // The product of a negative number and a non-negative number is either
330  // negative or zero.
331  if (!isKnownNonNegative)
332  isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
333  isKnownNonZero(Op0, Depth, Q)) ||
334  (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
335  isKnownNonZero(Op1, Depth, Q));
336  }
337  }
338 
339  assert(!Known.hasConflict() && !Known2.hasConflict());
340  // Compute a conservative estimate for high known-0 bits.
341  unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
342  Known2.countMinLeadingZeros(),
343  BitWidth) - BitWidth;
344  LeadZ = std::min(LeadZ, BitWidth);
345 
346  // The result of the bottom bits of an integer multiply can be
347  // inferred by looking at the bottom bits of both operands and
348  // multiplying them together.
349  // We can infer at least the minimum number of known trailing bits
350  // of both operands. Depending on number of trailing zeros, we can
351  // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
352  // a and b are divisible by m and n respectively.
353  // We then calculate how many of those bits are inferrable and set
354  // the output. For example, the i8 mul:
355  // a = XXXX1100 (12)
356  // b = XXXX1110 (14)
357  // We know the bottom 3 bits are zero since the first can be divided by
358  // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
359  // Applying the multiplication to the trimmed arguments gets:
360  // XX11 (3)
361  // X111 (7)
362  // -------
363  // XX11
364  // XX11
365  // XX11
366  // XX11
367  // -------
368  // XXXXX01
369  // Which allows us to infer the 2 LSBs. Since we're multiplying the result
370  // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
371  // The proof for this can be described as:
372  // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
373  // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
374  // umin(countTrailingZeros(C2), C6) +
375  // umin(C5 - umin(countTrailingZeros(C1), C5),
376  // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
377  // %aa = shl i8 %a, C5
378  // %bb = shl i8 %b, C6
379  // %aaa = or i8 %aa, C1
380  // %bbb = or i8 %bb, C2
381  // %mul = mul i8 %aaa, %bbb
382  // %mask = and i8 %mul, C7
383  // =>
384  // %mask = i8 ((C1*C2)&C7)
385  // Where C5, C6 describe the known bits of %a, %b
386  // C1, C2 describe the known bottom bits of %a, %b.
387  // C7 describes the mask of the known bits of the result.
388  APInt Bottom0 = Known.One;
389  APInt Bottom1 = Known2.One;
390 
391  // How many times we'd be able to divide each argument by 2 (shr by 1).
392  // This gives us the number of trailing zeros on the multiplication result.
393  unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
394  unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
395  unsigned TrailZero0 = Known.countMinTrailingZeros();
396  unsigned TrailZero1 = Known2.countMinTrailingZeros();
397  unsigned TrailZ = TrailZero0 + TrailZero1;
398 
399  // Figure out the fewest known-bits operand.
400  unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
401  TrailBitsKnown1 - TrailZero1);
402  unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
403 
404  APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
405  Bottom1.getLoBits(TrailBitsKnown1);
406 
407  Known.resetAll();
408  Known.Zero.setHighBits(LeadZ);
409  Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
410  Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
411 
412  // Only make use of no-wrap flags if we failed to compute the sign bit
413  // directly. This matters if the multiplication always overflows, in
414  // which case we prefer to follow the result of the direct computation,
415  // though as the program is invoking undefined behaviour we can choose
416  // whatever we like here.
417  if (isKnownNonNegative && !Known.isNegative())
418  Known.makeNonNegative();
419  else if (isKnownNegative && !Known.isNonNegative())
420  Known.makeNegative();
421 }
422 
424  KnownBits &Known) {
425  unsigned BitWidth = Known.getBitWidth();
426  unsigned NumRanges = Ranges.getNumOperands() / 2;
427  assert(NumRanges >= 1);
428 
429  Known.Zero.setAllBits();
430  Known.One.setAllBits();
431 
432  for (unsigned i = 0; i < NumRanges; ++i) {
433  ConstantInt *Lower =
434  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
435  ConstantInt *Upper =
436  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
437  ConstantRange Range(Lower->getValue(), Upper->getValue());
438 
439  // The first CommonPrefixBits of all values in Range are equal.
440  unsigned CommonPrefixBits =
441  (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
442 
443  APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
444  Known.One &= Range.getUnsignedMax() & Mask;
445  Known.Zero &= ~Range.getUnsignedMax() & Mask;
446  }
447 }
448 
449 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
450  SmallVector<const Value *, 16> WorkSet(1, I);
453 
454  // The instruction defining an assumption's condition itself is always
455  // considered ephemeral to that assumption (even if it has other
456  // non-ephemeral users). See r246696's test case for an example.
457  if (is_contained(I->operands(), E))
458  return true;
459 
460  while (!WorkSet.empty()) {
461  const Value *V = WorkSet.pop_back_val();
462  if (!Visited.insert(V).second)
463  continue;
464 
465  // If all uses of this value are ephemeral, then so is this value.
466  if (llvm::all_of(V->users(), [&](const User *U) {
467  return EphValues.count(U);
468  })) {
469  if (V == E)
470  return true;
471 
472  if (V == I || isSafeToSpeculativelyExecute(V)) {
473  EphValues.insert(V);
474  if (const User *U = dyn_cast<User>(V))
475  for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
476  J != JE; ++J)
477  WorkSet.push_back(*J);
478  }
479  }
480  }
481 
482  return false;
483 }
484 
485 // Is this an intrinsic that cannot be speculated but also cannot trap?
487  if (const CallInst *CI = dyn_cast<CallInst>(I))
488  if (Function *F = CI->getCalledFunction())
489  switch (F->getIntrinsicID()) {
490  default: break;
491  // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
492  case Intrinsic::assume:
493  case Intrinsic::sideeffect:
494  case Intrinsic::dbg_declare:
495  case Intrinsic::dbg_value:
496  case Intrinsic::invariant_start:
497  case Intrinsic::invariant_end:
498  case Intrinsic::lifetime_start:
499  case Intrinsic::lifetime_end:
500  case Intrinsic::objectsize:
501  case Intrinsic::ptr_annotation:
502  case Intrinsic::var_annotation:
503  return true;
504  }
505 
506  return false;
507 }
508 
510  const Instruction *CxtI,
511  const DominatorTree *DT) {
512  // There are two restrictions on the use of an assume:
513  // 1. The assume must dominate the context (or the control flow must
514  // reach the assume whenever it reaches the context).
515  // 2. The context must not be in the assume's set of ephemeral values
516  // (otherwise we will use the assume to prove that the condition
517  // feeding the assume is trivially true, thus causing the removal of
518  // the assume).
519 
520  if (DT) {
521  if (DT->dominates(Inv, CxtI))
522  return true;
523  } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
524  // We don't have a DT, but this trivially dominates.
525  return true;
526  }
527 
528  // With or without a DT, the only remaining case we will check is if the
529  // instructions are in the same BB. Give up if that is not the case.
530  if (Inv->getParent() != CxtI->getParent())
531  return false;
532 
533  // If we have a dom tree, then we now know that the assume doens't dominate
534  // the other instruction. If we don't have a dom tree then we can check if
535  // the assume is first in the BB.
536  if (!DT) {
537  // Search forward from the assume until we reach the context (or the end
538  // of the block); the common case is that the assume will come first.
539  for (auto I = std::next(BasicBlock::const_iterator(Inv)),
540  IE = Inv->getParent()->end(); I != IE; ++I)
541  if (&*I == CxtI)
542  return true;
543  }
544 
545  // The context comes first, but they're both in the same block. Make sure
546  // there is nothing in between that might interrupt the control flow.
548  std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
549  I != IE; ++I)
551  return false;
552 
553  return !isEphemeralValueOf(Inv, CxtI);
554 }
555 
556 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
557  unsigned Depth, const Query &Q) {
558  // Use of assumptions is context-sensitive. If we don't have a context, we
559  // cannot use them!
560  if (!Q.AC || !Q.CxtI)
561  return;
562 
563  unsigned BitWidth = Known.getBitWidth();
564 
565  // Note that the patterns below need to be kept in sync with the code
566  // in AssumptionCache::updateAffectedValues.
567 
568  for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
569  if (!AssumeVH)
570  continue;
571  CallInst *I = cast<CallInst>(AssumeVH);
572  assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
573  "Got assumption for the wrong function!");
574  if (Q.isExcluded(I))
575  continue;
576 
577  // Warning: This loop can end up being somewhat performance sensetive.
578  // We're running this loop for once for each value queried resulting in a
579  // runtime of ~O(#assumes * #values).
580 
581  assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
582  "must be an assume intrinsic");
583 
584  Value *Arg = I->getArgOperand(0);
585 
586  if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
587  assert(BitWidth == 1 && "assume operand is not i1?");
588  Known.setAllOnes();
589  return;
590  }
591  if (match(Arg, m_Not(m_Specific(V))) &&
592  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
593  assert(BitWidth == 1 && "assume operand is not i1?");
594  Known.setAllZero();
595  return;
596  }
597 
598  // The remaining tests are all recursive, so bail out if we hit the limit.
599  if (Depth == MaxDepth)
600  continue;
601 
602  Value *A, *B;
603  auto m_V = m_CombineOr(m_Specific(V),
605  m_BitCast(m_Specific(V))));
606 
607  CmpInst::Predicate Pred;
608  uint64_t C;
609  // assume(v = a)
610  if (match(Arg, m_c_ICmp(Pred, m_V, m_Value(A))) &&
611  Pred == ICmpInst::ICMP_EQ && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
612  KnownBits RHSKnown(BitWidth);
613  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
614  Known.Zero |= RHSKnown.Zero;
615  Known.One |= RHSKnown.One;
616  // assume(v & b = a)
617  } else if (match(Arg,
618  m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
619  Pred == ICmpInst::ICMP_EQ &&
620  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
621  KnownBits RHSKnown(BitWidth);
622  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
623  KnownBits MaskKnown(BitWidth);
624  computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
625 
626  // For those bits in the mask that are known to be one, we can propagate
627  // known bits from the RHS to V.
628  Known.Zero |= RHSKnown.Zero & MaskKnown.One;
629  Known.One |= RHSKnown.One & MaskKnown.One;
630  // assume(~(v & b) = a)
631  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
632  m_Value(A))) &&
633  Pred == ICmpInst::ICMP_EQ &&
634  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
635  KnownBits RHSKnown(BitWidth);
636  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
637  KnownBits MaskKnown(BitWidth);
638  computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
639 
640  // For those bits in the mask that are known to be one, we can propagate
641  // inverted known bits from the RHS to V.
642  Known.Zero |= RHSKnown.One & MaskKnown.One;
643  Known.One |= RHSKnown.Zero & MaskKnown.One;
644  // assume(v | b = a)
645  } else if (match(Arg,
646  m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
647  Pred == ICmpInst::ICMP_EQ &&
648  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
649  KnownBits RHSKnown(BitWidth);
650  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
651  KnownBits BKnown(BitWidth);
652  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
653 
654  // For those bits in B that are known to be zero, we can propagate known
655  // bits from the RHS to V.
656  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
657  Known.One |= RHSKnown.One & BKnown.Zero;
658  // assume(~(v | b) = a)
659  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
660  m_Value(A))) &&
661  Pred == ICmpInst::ICMP_EQ &&
662  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
663  KnownBits RHSKnown(BitWidth);
664  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
665  KnownBits BKnown(BitWidth);
666  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
667 
668  // For those bits in B that are known to be zero, we can propagate
669  // inverted known bits from the RHS to V.
670  Known.Zero |= RHSKnown.One & BKnown.Zero;
671  Known.One |= RHSKnown.Zero & BKnown.Zero;
672  // assume(v ^ b = a)
673  } else if (match(Arg,
674  m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
675  Pred == ICmpInst::ICMP_EQ &&
676  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
677  KnownBits RHSKnown(BitWidth);
678  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
679  KnownBits BKnown(BitWidth);
680  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
681 
682  // For those bits in B that are known to be zero, we can propagate known
683  // bits from the RHS to V. For those bits in B that are known to be one,
684  // we can propagate inverted known bits from the RHS to V.
685  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
686  Known.One |= RHSKnown.One & BKnown.Zero;
687  Known.Zero |= RHSKnown.One & BKnown.One;
688  Known.One |= RHSKnown.Zero & BKnown.One;
689  // assume(~(v ^ b) = a)
690  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
691  m_Value(A))) &&
692  Pred == ICmpInst::ICMP_EQ &&
693  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
694  KnownBits RHSKnown(BitWidth);
695  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
696  KnownBits BKnown(BitWidth);
697  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
698 
699  // For those bits in B that are known to be zero, we can propagate
700  // inverted known bits from the RHS to V. For those bits in B that are
701  // known to be one, we can propagate known bits from the RHS to V.
702  Known.Zero |= RHSKnown.One & BKnown.Zero;
703  Known.One |= RHSKnown.Zero & BKnown.Zero;
704  Known.Zero |= RHSKnown.Zero & BKnown.One;
705  Known.One |= RHSKnown.One & BKnown.One;
706  // assume(v << c = a)
707  } else if (match(Arg, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
708  m_Value(A))) &&
709  Pred == ICmpInst::ICMP_EQ &&
710  isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
711  C < BitWidth) {
712  KnownBits RHSKnown(BitWidth);
713  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
714  // For those bits in RHS that are known, we can propagate them to known
715  // bits in V shifted to the right by C.
716  RHSKnown.Zero.lshrInPlace(C);
717  Known.Zero |= RHSKnown.Zero;
718  RHSKnown.One.lshrInPlace(C);
719  Known.One |= RHSKnown.One;
720  // assume(~(v << c) = a)
721  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
722  m_Value(A))) &&
723  Pred == ICmpInst::ICMP_EQ &&
724  isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
725  C < BitWidth) {
726  KnownBits RHSKnown(BitWidth);
727  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
728  // For those bits in RHS that are known, we can propagate them inverted
729  // to known bits in V shifted to the right by C.
730  RHSKnown.One.lshrInPlace(C);
731  Known.Zero |= RHSKnown.One;
732  RHSKnown.Zero.lshrInPlace(C);
733  Known.One |= RHSKnown.Zero;
734  // assume(v >> c = a)
735  } else if (match(Arg,
736  m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
737  m_Value(A))) &&
738  Pred == ICmpInst::ICMP_EQ &&
739  isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
740  C < BitWidth) {
741  KnownBits RHSKnown(BitWidth);
742  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
743  // For those bits in RHS that are known, we can propagate them to known
744  // bits in V shifted to the right by C.
745  Known.Zero |= RHSKnown.Zero << C;
746  Known.One |= RHSKnown.One << C;
747  // assume(~(v >> c) = a)
748  } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
749  m_Value(A))) &&
750  Pred == ICmpInst::ICMP_EQ &&
751  isValidAssumeForContext(I, Q.CxtI, Q.DT) &&
752  C < BitWidth) {
753  KnownBits RHSKnown(BitWidth);
754  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
755  // For those bits in RHS that are known, we can propagate them inverted
756  // to known bits in V shifted to the right by C.
757  Known.Zero |= RHSKnown.One << C;
758  Known.One |= RHSKnown.Zero << C;
759  // assume(v >=_s c) where c is non-negative
760  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
761  Pred == ICmpInst::ICMP_SGE &&
762  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763  KnownBits RHSKnown(BitWidth);
764  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
765 
766  if (RHSKnown.isNonNegative()) {
767  // We know that the sign bit is zero.
768  Known.makeNonNegative();
769  }
770  // assume(v >_s c) where c is at least -1.
771  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
772  Pred == ICmpInst::ICMP_SGT &&
773  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
774  KnownBits RHSKnown(BitWidth);
775  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
776 
777  if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
778  // We know that the sign bit is zero.
779  Known.makeNonNegative();
780  }
781  // assume(v <=_s c) where c is negative
782  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
783  Pred == ICmpInst::ICMP_SLE &&
784  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
785  KnownBits RHSKnown(BitWidth);
786  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
787 
788  if (RHSKnown.isNegative()) {
789  // We know that the sign bit is one.
790  Known.makeNegative();
791  }
792  // assume(v <_s c) where c is non-positive
793  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
794  Pred == ICmpInst::ICMP_SLT &&
795  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
796  KnownBits RHSKnown(BitWidth);
797  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
798 
799  if (RHSKnown.isZero() || RHSKnown.isNegative()) {
800  // We know that the sign bit is one.
801  Known.makeNegative();
802  }
803  // assume(v <=_u c)
804  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
805  Pred == ICmpInst::ICMP_ULE &&
806  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
807  KnownBits RHSKnown(BitWidth);
808  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
809 
810  // Whatever high bits in c are zero are known to be zero.
811  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
812  // assume(v <_u c)
813  } else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
814  Pred == ICmpInst::ICMP_ULT &&
815  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
816  KnownBits RHSKnown(BitWidth);
817  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
818 
819  // Whatever high bits in c are zero are known to be zero (if c is a power
820  // of 2, then one more).
821  if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
822  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
823  else
824  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
825  }
826  }
827 
828  // If assumptions conflict with each other or previous known bits, then we
829  // have a logical fallacy. It's possible that the assumption is not reachable,
830  // so this isn't a real bug. On the other hand, the program may have undefined
831  // behavior, or we might have a bug in the compiler. We can't assert/crash, so
832  // clear out the known bits, try to warn the user, and hope for the best.
833  if (Known.Zero.intersects(Known.One)) {
834  Known.resetAll();
835 
836  if (Q.ORE)
837  Q.ORE->emit([&]() {
838  auto *CxtI = const_cast<Instruction *>(Q.CxtI);
839  return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
840  CxtI)
841  << "Detected conflicting code assumptions. Program may "
842  "have undefined behavior, or compiler may have "
843  "internal error.";
844  });
845  }
846 }
847 
848 /// Compute known bits from a shift operator, including those with a
849 /// non-constant shift amount. Known is the output of this function. Known2 is a
850 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
851 /// operator-specific functors that, given the known-zero or known-one bits
852 /// respectively, and a shift amount, compute the implied known-zero or
853 /// known-one bits of the shift operator's result respectively for that shift
854 /// amount. The results from calling KZF and KOF are conservatively combined for
855 /// all permitted shift amounts.
857  const Operator *I, KnownBits &Known, KnownBits &Known2,
858  unsigned Depth, const Query &Q,
859  function_ref<APInt(const APInt &, unsigned)> KZF,
860  function_ref<APInt(const APInt &, unsigned)> KOF) {
861  unsigned BitWidth = Known.getBitWidth();
862 
863  if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
864  unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
865 
866  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
867  Known.Zero = KZF(Known.Zero, ShiftAmt);
868  Known.One = KOF(Known.One, ShiftAmt);
869  // If the known bits conflict, this must be an overflowing left shift, so
870  // the shift result is poison. We can return anything we want. Choose 0 for
871  // the best folding opportunity.
872  if (Known.hasConflict())
873  Known.setAllZero();
874 
875  return;
876  }
877 
878  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
879 
880  // If the shift amount could be greater than or equal to the bit-width of the
881  // LHS, the value could be poison, but bail out because the check below is
882  // expensive. TODO: Should we just carry on?
883  if ((~Known.Zero).uge(BitWidth)) {
884  Known.resetAll();
885  return;
886  }
887 
888  // Note: We cannot use Known.Zero.getLimitedValue() here, because if
889  // BitWidth > 64 and any upper bits are known, we'll end up returning the
890  // limit value (which implies all bits are known).
891  uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
892  uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
893 
894  // It would be more-clearly correct to use the two temporaries for this
895  // calculation. Reusing the APInts here to prevent unnecessary allocations.
896  Known.resetAll();
897 
898  // If we know the shifter operand is nonzero, we can sometimes infer more
899  // known bits. However this is expensive to compute, so be lazy about it and
900  // only compute it when absolutely necessary.
901  Optional<bool> ShifterOperandIsNonZero;
902 
903  // Early exit if we can't constrain any well-defined shift amount.
904  if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
905  !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
906  ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
907  if (!*ShifterOperandIsNonZero)
908  return;
909  }
910 
911  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
912 
913  Known.Zero.setAllBits();
914  Known.One.setAllBits();
915  for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
916  // Combine the shifted known input bits only for those shift amounts
917  // compatible with its known constraints.
918  if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
919  continue;
920  if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
921  continue;
922  // If we know the shifter is nonzero, we may be able to infer more known
923  // bits. This check is sunk down as far as possible to avoid the expensive
924  // call to isKnownNonZero if the cheaper checks above fail.
925  if (ShiftAmt == 0) {
926  if (!ShifterOperandIsNonZero.hasValue())
927  ShifterOperandIsNonZero =
928  isKnownNonZero(I->getOperand(1), Depth + 1, Q);
929  if (*ShifterOperandIsNonZero)
930  continue;
931  }
932 
933  Known.Zero &= KZF(Known2.Zero, ShiftAmt);
934  Known.One &= KOF(Known2.One, ShiftAmt);
935  }
936 
937  // If the known bits conflict, the result is poison. Return a 0 and hope the
938  // caller can further optimize that.
939  if (Known.hasConflict())
940  Known.setAllZero();
941 }
942 
943 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
944  unsigned Depth, const Query &Q) {
945  unsigned BitWidth = Known.getBitWidth();
946 
947  KnownBits Known2(Known);
948  switch (I->getOpcode()) {
949  default: break;
950  case Instruction::Load:
951  if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
953  break;
954  case Instruction::And: {
955  // If either the LHS or the RHS are Zero, the result is zero.
956  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
957  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
958 
959  // Output known-1 bits are only known if set in both the LHS & RHS.
960  Known.One &= Known2.One;
961  // Output known-0 are known to be clear if zero in either the LHS | RHS.
962  Known.Zero |= Known2.Zero;
963 
964  // and(x, add (x, -1)) is a common idiom that always clears the low bit;
965  // here we handle the more general case of adding any odd number by
966  // matching the form add(x, add(x, y)) where y is odd.
967  // TODO: This could be generalized to clearing any bit set in y where the
968  // following bit is known to be unset in y.
969  Value *Y = nullptr;
970  if (!Known.Zero[0] && !Known.One[0] &&
971  (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
972  m_Value(Y))) ||
974  m_Value(Y))))) {
975  Known2.resetAll();
976  computeKnownBits(Y, Known2, Depth + 1, Q);
977  if (Known2.countMinTrailingOnes() > 0)
978  Known.Zero.setBit(0);
979  }
980  break;
981  }
982  case Instruction::Or:
983  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
984  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
985 
986  // Output known-0 bits are only known if clear in both the LHS & RHS.
987  Known.Zero &= Known2.Zero;
988  // Output known-1 are known to be set if set in either the LHS | RHS.
989  Known.One |= Known2.One;
990  break;
991  case Instruction::Xor: {
992  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
993  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
994 
995  // Output known-0 bits are known if clear or set in both the LHS & RHS.
996  APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
997  // Output known-1 are known to be set if set in only one of the LHS, RHS.
998  Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
999  Known.Zero = std::move(KnownZeroOut);
1000  break;
1001  }
1002  case Instruction::Mul: {
1003  bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1004  computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1005  Known2, Depth, Q);
1006  break;
1007  }
1008  case Instruction::UDiv: {
1009  // For the purposes of computing leading zeros we can conservatively
1010  // treat a udiv as a logical right shift by the power of 2 known to
1011  // be less than the denominator.
1012  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1013  unsigned LeadZ = Known2.countMinLeadingZeros();
1014 
1015  Known2.resetAll();
1016  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1017  unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1018  if (RHSMaxLeadingZeros != BitWidth)
1019  LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1020 
1021  Known.Zero.setHighBits(LeadZ);
1022  break;
1023  }
1024  case Instruction::Select: {
1025  const Value *LHS, *RHS;
1026  SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1028  computeKnownBits(RHS, Known, Depth + 1, Q);
1029  computeKnownBits(LHS, Known2, Depth + 1, Q);
1030  } else {
1031  computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1032  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1033  }
1034 
1035  unsigned MaxHighOnes = 0;
1036  unsigned MaxHighZeros = 0;
1037  if (SPF == SPF_SMAX) {
1038  // If both sides are negative, the result is negative.
1039  if (Known.isNegative() && Known2.isNegative())
1040  // We can derive a lower bound on the result by taking the max of the
1041  // leading one bits.
1042  MaxHighOnes =
1044  // If either side is non-negative, the result is non-negative.
1045  else if (Known.isNonNegative() || Known2.isNonNegative())
1046  MaxHighZeros = 1;
1047  } else if (SPF == SPF_SMIN) {
1048  // If both sides are non-negative, the result is non-negative.
1049  if (Known.isNonNegative() && Known2.isNonNegative())
1050  // We can derive an upper bound on the result by taking the max of the
1051  // leading zero bits.
1052  MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1053  Known2.countMinLeadingZeros());
1054  // If either side is negative, the result is negative.
1055  else if (Known.isNegative() || Known2.isNegative())
1056  MaxHighOnes = 1;
1057  } else if (SPF == SPF_UMAX) {
1058  // We can derive a lower bound on the result by taking the max of the
1059  // leading one bits.
1060  MaxHighOnes =
1062  } else if (SPF == SPF_UMIN) {
1063  // We can derive an upper bound on the result by taking the max of the
1064  // leading zero bits.
1065  MaxHighZeros =
1067  }
1068 
1069  // Only known if known in both the LHS and RHS.
1070  Known.One &= Known2.One;
1071  Known.Zero &= Known2.Zero;
1072  if (MaxHighOnes > 0)
1073  Known.One.setHighBits(MaxHighOnes);
1074  if (MaxHighZeros > 0)
1075  Known.Zero.setHighBits(MaxHighZeros);
1076  break;
1077  }
1078  case Instruction::FPTrunc:
1079  case Instruction::FPExt:
1080  case Instruction::FPToUI:
1081  case Instruction::FPToSI:
1082  case Instruction::SIToFP:
1083  case Instruction::UIToFP:
1084  break; // Can't work with floating point.
1085  case Instruction::PtrToInt:
1086  case Instruction::IntToPtr:
1087  // Fall through and handle them the same as zext/trunc.
1089  case Instruction::ZExt:
1090  case Instruction::Trunc: {
1091  Type *SrcTy = I->getOperand(0)->getType();
1092 
1093  unsigned SrcBitWidth;
1094  // Note that we handle pointer operands here because of inttoptr/ptrtoint
1095  // which fall through here.
1096  SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
1097 
1098  assert(SrcBitWidth && "SrcBitWidth can't be zero");
1099  Known = Known.zextOrTrunc(SrcBitWidth);
1100  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1101  Known = Known.zextOrTrunc(BitWidth);
1102  // Any top bits are known to be zero.
1103  if (BitWidth > SrcBitWidth)
1104  Known.Zero.setBitsFrom(SrcBitWidth);
1105  break;
1106  }
1107  case Instruction::BitCast: {
1108  Type *SrcTy = I->getOperand(0)->getType();
1109  if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
1110  // TODO: For now, not handling conversions like:
1111  // (bitcast i64 %x to <2 x i32>)
1112  !I->getType()->isVectorTy()) {
1113  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1114  break;
1115  }
1116  break;
1117  }
1118  case Instruction::SExt: {
1119  // Compute the bits in the result that are not present in the input.
1120  unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1121 
1122  Known = Known.trunc(SrcBitWidth);
1123  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1124  // If the sign bit of the input is known set or clear, then we know the
1125  // top bits of the result.
1126  Known = Known.sext(BitWidth);
1127  break;
1128  }
1129  case Instruction::Shl: {
1130  // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1131  bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1132  auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1133  APInt KZResult = KnownZero << ShiftAmt;
1134  KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1135  // If this shift has "nsw" keyword, then the result is either a poison
1136  // value or has the same sign bit as the first operand.
1137  if (NSW && KnownZero.isSignBitSet())
1138  KZResult.setSignBit();
1139  return KZResult;
1140  };
1141 
1142  auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1143  APInt KOResult = KnownOne << ShiftAmt;
1144  if (NSW && KnownOne.isSignBitSet())
1145  KOResult.setSignBit();
1146  return KOResult;
1147  };
1148 
1149  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1150  break;
1151  }
1152  case Instruction::LShr: {
1153  // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1154  auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1155  APInt KZResult = KnownZero.lshr(ShiftAmt);
1156  // High bits known zero.
1157  KZResult.setHighBits(ShiftAmt);
1158  return KZResult;
1159  };
1160 
1161  auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1162  return KnownOne.lshr(ShiftAmt);
1163  };
1164 
1165  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1166  break;
1167  }
1168  case Instruction::AShr: {
1169  // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1170  auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1171  return KnownZero.ashr(ShiftAmt);
1172  };
1173 
1174  auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1175  return KnownOne.ashr(ShiftAmt);
1176  };
1177 
1178  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1179  break;
1180  }
1181  case Instruction::Sub: {
1182  bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1183  computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1184  Known, Known2, Depth, Q);
1185  break;
1186  }
1187  case Instruction::Add: {
1188  bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
1189  computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1190  Known, Known2, Depth, Q);
1191  break;
1192  }
1193  case Instruction::SRem:
1194  if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1195  APInt RA = Rem->getValue().abs();
1196  if (RA.isPowerOf2()) {
1197  APInt LowBits = RA - 1;
1198  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1199 
1200  // The low bits of the first operand are unchanged by the srem.
1201  Known.Zero = Known2.Zero & LowBits;
1202  Known.One = Known2.One & LowBits;
1203 
1204  // If the first operand is non-negative or has all low bits zero, then
1205  // the upper bits are all zero.
1206  if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1207  Known.Zero |= ~LowBits;
1208 
1209  // If the first operand is negative and not all low bits are zero, then
1210  // the upper bits are all one.
1211  if (Known2.isNegative() && LowBits.intersects(Known2.One))
1212  Known.One |= ~LowBits;
1213 
1214  assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1215  break;
1216  }
1217  }
1218 
1219  // The sign bit is the LHS's sign bit, except when the result of the
1220  // remainder is zero.
1221  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1222  // If it's known zero, our sign bit is also zero.
1223  if (Known2.isNonNegative())
1224  Known.makeNonNegative();
1225 
1226  break;
1227  case Instruction::URem: {
1228  if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1229  const APInt &RA = Rem->getValue();
1230  if (RA.isPowerOf2()) {
1231  APInt LowBits = (RA - 1);
1232  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1233  Known.Zero |= ~LowBits;
1234  Known.One &= LowBits;
1235  break;
1236  }
1237  }
1238 
1239  // Since the result is less than or equal to either operand, any leading
1240  // zero bits in either operand must also exist in the result.
1241  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1242  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1243 
1244  unsigned Leaders =
1246  Known.resetAll();
1247  Known.Zero.setHighBits(Leaders);
1248  break;
1249  }
1250 
1251  case Instruction::Alloca: {
1252  const AllocaInst *AI = cast<AllocaInst>(I);
1253  unsigned Align = AI->getAlignment();
1254  if (Align == 0)
1255  Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1256 
1257  if (Align > 0)
1258  Known.Zero.setLowBits(countTrailingZeros(Align));
1259  break;
1260  }
1261  case Instruction::GetElementPtr: {
1262  // Analyze all of the subscripts of this getelementptr instruction
1263  // to determine if we can prove known low zero bits.
1264  KnownBits LocalKnown(BitWidth);
1265  computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1266  unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1267 
1269  for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1270  Value *Index = I->getOperand(i);
1271  if (StructType *STy = GTI.getStructTypeOrNull()) {
1272  // Handle struct member offset arithmetic.
1273 
1274  // Handle case when index is vector zeroinitializer
1275  Constant *CIndex = cast<Constant>(Index);
1276  if (CIndex->isZeroValue())
1277  continue;
1278 
1279  if (CIndex->getType()->isVectorTy())
1280  Index = CIndex->getSplatValue();
1281 
1282  unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1283  const StructLayout *SL = Q.DL.getStructLayout(STy);
1284  uint64_t Offset = SL->getElementOffset(Idx);
1285  TrailZ = std::min<unsigned>(TrailZ,
1286  countTrailingZeros(Offset));
1287  } else {
1288  // Handle array index arithmetic.
1289  Type *IndexedTy = GTI.getIndexedType();
1290  if (!IndexedTy->isSized()) {
1291  TrailZ = 0;
1292  break;
1293  }
1294  unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1295  uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1296  LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1297  computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1298  TrailZ = std::min(TrailZ,
1299  unsigned(countTrailingZeros(TypeSize) +
1300  LocalKnown.countMinTrailingZeros()));
1301  }
1302  }
1303 
1304  Known.Zero.setLowBits(TrailZ);
1305  break;
1306  }
1307  case Instruction::PHI: {
1308  const PHINode *P = cast<PHINode>(I);
1309  // Handle the case of a simple two-predecessor recurrence PHI.
1310  // There's a lot more that could theoretically be done here, but
1311  // this is sufficient to catch some interesting cases.
1312  if (P->getNumIncomingValues() == 2) {
1313  for (unsigned i = 0; i != 2; ++i) {
1314  Value *L = P->getIncomingValue(i);
1315  Value *R = P->getIncomingValue(!i);
1316  Operator *LU = dyn_cast<Operator>(L);
1317  if (!LU)
1318  continue;
1319  unsigned Opcode = LU->getOpcode();
1320  // Check for operations that have the property that if
1321  // both their operands have low zero bits, the result
1322  // will have low zero bits.
1323  if (Opcode == Instruction::Add ||
1324  Opcode == Instruction::Sub ||
1325  Opcode == Instruction::And ||
1326  Opcode == Instruction::Or ||
1327  Opcode == Instruction::Mul) {
1328  Value *LL = LU->getOperand(0);
1329  Value *LR = LU->getOperand(1);
1330  // Find a recurrence.
1331  if (LL == I)
1332  L = LR;
1333  else if (LR == I)
1334  L = LL;
1335  else
1336  break;
1337  // Ok, we have a PHI of the form L op= R. Check for low
1338  // zero bits.
1339  computeKnownBits(R, Known2, Depth + 1, Q);
1340 
1341  // We need to take the minimum number of known bits
1342  KnownBits Known3(Known);
1343  computeKnownBits(L, Known3, Depth + 1, Q);
1344 
1345  Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1346  Known3.countMinTrailingZeros()));
1347 
1348  auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1349  if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
1350  // If initial value of recurrence is nonnegative, and we are adding
1351  // a nonnegative number with nsw, the result can only be nonnegative
1352  // or poison value regardless of the number of times we execute the
1353  // add in phi recurrence. If initial value is negative and we are
1354  // adding a negative number with nsw, the result can only be
1355  // negative or poison value. Similar arguments apply to sub and mul.
1356  //
1357  // (add non-negative, non-negative) --> non-negative
1358  // (add negative, negative) --> negative
1359  if (Opcode == Instruction::Add) {
1360  if (Known2.isNonNegative() && Known3.isNonNegative())
1361  Known.makeNonNegative();
1362  else if (Known2.isNegative() && Known3.isNegative())
1363  Known.makeNegative();
1364  }
1365 
1366  // (sub nsw non-negative, negative) --> non-negative
1367  // (sub nsw negative, non-negative) --> negative
1368  else if (Opcode == Instruction::Sub && LL == I) {
1369  if (Known2.isNonNegative() && Known3.isNegative())
1370  Known.makeNonNegative();
1371  else if (Known2.isNegative() && Known3.isNonNegative())
1372  Known.makeNegative();
1373  }
1374 
1375  // (mul nsw non-negative, non-negative) --> non-negative
1376  else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1377  Known3.isNonNegative())
1378  Known.makeNonNegative();
1379  }
1380 
1381  break;
1382  }
1383  }
1384  }
1385 
1386  // Unreachable blocks may have zero-operand PHI nodes.
1387  if (P->getNumIncomingValues() == 0)
1388  break;
1389 
1390  // Otherwise take the unions of the known bit sets of the operands,
1391  // taking conservative care to avoid excessive recursion.
1392  if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1393  // Skip if every incoming value references to ourself.
1394  if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1395  break;
1396 
1397  Known.Zero.setAllBits();
1398  Known.One.setAllBits();
1399  for (Value *IncValue : P->incoming_values()) {
1400  // Skip direct self references.
1401  if (IncValue == P) continue;
1402 
1403  Known2 = KnownBits(BitWidth);
1404  // Recurse, but cap the recursion to one level, because we don't
1405  // want to waste time spinning around in loops.
1406  computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1407  Known.Zero &= Known2.Zero;
1408  Known.One &= Known2.One;
1409  // If all bits have been ruled out, there's no need to check
1410  // more operands.
1411  if (!Known.Zero && !Known.One)
1412  break;
1413  }
1414  }
1415  break;
1416  }
1417  case Instruction::Call:
1418  case Instruction::Invoke:
1419  // If range metadata is attached to this call, set known bits from that,
1420  // and then intersect with known bits based on other properties of the
1421  // function.
1422  if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
1424  if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1425  computeKnownBits(RV, Known2, Depth + 1, Q);
1426  Known.Zero |= Known2.Zero;
1427  Known.One |= Known2.One;
1428  }
1429  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1430  switch (II->getIntrinsicID()) {
1431  default: break;
1432  case Intrinsic::bitreverse:
1433  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1434  Known.Zero |= Known2.Zero.reverseBits();
1435  Known.One |= Known2.One.reverseBits();
1436  break;
1437  case Intrinsic::bswap:
1438  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1439  Known.Zero |= Known2.Zero.byteSwap();
1440  Known.One |= Known2.One.byteSwap();
1441  break;
1442  case Intrinsic::ctlz: {
1443  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1444  // If we have a known 1, its position is our upper bound.
1445  unsigned PossibleLZ = Known2.One.countLeadingZeros();
1446  // If this call is undefined for 0, the result will be less than 2^n.
1447  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1448  PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1449  unsigned LowBits = Log2_32(PossibleLZ)+1;
1450  Known.Zero.setBitsFrom(LowBits);
1451  break;
1452  }
1453  case Intrinsic::cttz: {
1454  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1455  // If we have a known 1, its position is our upper bound.
1456  unsigned PossibleTZ = Known2.One.countTrailingZeros();
1457  // If this call is undefined for 0, the result will be less than 2^n.
1458  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1459  PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1460  unsigned LowBits = Log2_32(PossibleTZ)+1;
1461  Known.Zero.setBitsFrom(LowBits);
1462  break;
1463  }
1464  case Intrinsic::ctpop: {
1465  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1466  // We can bound the space the count needs. Also, bits known to be zero
1467  // can't contribute to the population.
1468  unsigned BitsPossiblySet = Known2.countMaxPopulation();
1469  unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1470  Known.Zero.setBitsFrom(LowBits);
1471  // TODO: we could bound KnownOne using the lower bound on the number
1472  // of bits which might be set provided by popcnt KnownOne2.
1473  break;
1474  }
1475  case Intrinsic::x86_sse42_crc32_64_64:
1476  Known.Zero.setBitsFrom(32);
1477  break;
1478  }
1479  }
1480  break;
1481  case Instruction::ExtractElement:
1482  // Look through extract element. At the moment we keep this simple and skip
1483  // tracking the specific element. But at least we might find information
1484  // valid for all elements of the vector (for example if vector is sign
1485  // extended, shifted, etc).
1486  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1487  break;
1488  case Instruction::ExtractValue:
1489  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1490  const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1491  if (EVI->getNumIndices() != 1) break;
1492  if (EVI->getIndices()[0] == 0) {
1493  switch (II->getIntrinsicID()) {
1494  default: break;
1495  case Intrinsic::uadd_with_overflow:
1496  case Intrinsic::sadd_with_overflow:
1497  computeKnownBitsAddSub(true, II->getArgOperand(0),
1498  II->getArgOperand(1), false, Known, Known2,
1499  Depth, Q);
1500  break;
1501  case Intrinsic::usub_with_overflow:
1502  case Intrinsic::ssub_with_overflow:
1503  computeKnownBitsAddSub(false, II->getArgOperand(0),
1504  II->getArgOperand(1), false, Known, Known2,
1505  Depth, Q);
1506  break;
1507  case Intrinsic::umul_with_overflow:
1508  case Intrinsic::smul_with_overflow:
1509  computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1510  Known, Known2, Depth, Q);
1511  break;
1512  }
1513  }
1514  }
1515  }
1516 }
1517 
1518 /// Determine which bits of V are known to be either zero or one and return
1519 /// them.
1520 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1521  KnownBits Known(getBitWidth(V->getType(), Q.DL));
1522  computeKnownBits(V, Known, Depth, Q);
1523  return Known;
1524 }
1525 
1526 /// Determine which bits of V are known to be either zero or one and return
1527 /// them in the Known bit set.
1528 ///
1529 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1530 /// we cannot optimize based on the assumption that it is zero without changing
1531 /// it to be an explicit zero. If we don't change it to zero, other code could
1532 /// optimized based on the contradictory assumption that it is non-zero.
1533 /// Because instcombine aggressively folds operations with undef args anyway,
1534 /// this won't lose us code quality.
1535 ///
1536 /// This function is defined on values with integer type, values with pointer
1537 /// type, and vectors of integers. In the case
1538 /// where V is a vector, known zero, and known one values are the
1539 /// same width as the vector element, and the bit is set only if it is true
1540 /// for all of the elements in the vector.
1541 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1542  const Query &Q) {
1543  assert(V && "No Value?");
1544  assert(Depth <= MaxDepth && "Limit Search Depth");
1545  unsigned BitWidth = Known.getBitWidth();
1546 
1547  assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1548  V->getType()->isPtrOrPtrVectorTy()) &&
1549  "Not integer or pointer type!");
1550  assert(Q.DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth &&
1551  "V and Known should have same BitWidth");
1552  (void)BitWidth;
1553 
1554  const APInt *C;
1555  if (match(V, m_APInt(C))) {
1556  // We know all of the bits for a scalar constant or a splat vector constant!
1557  Known.One = *C;
1558  Known.Zero = ~Known.One;
1559  return;
1560  }
1561  // Null and aggregate-zero are all-zeros.
1562  if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1563  Known.setAllZero();
1564  return;
1565  }
1566  // Handle a constant vector by taking the intersection of the known bits of
1567  // each element.
1568  if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1569  // We know that CDS must be a vector of integers. Take the intersection of
1570  // each element.
1571  Known.Zero.setAllBits(); Known.One.setAllBits();
1572  for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1573  APInt Elt = CDS->getElementAsAPInt(i);
1574  Known.Zero &= ~Elt;
1575  Known.One &= Elt;
1576  }
1577  return;
1578  }
1579 
1580  if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1581  // We know that CV must be a vector of integers. Take the intersection of
1582  // each element.
1583  Known.Zero.setAllBits(); Known.One.setAllBits();
1584  for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1585  Constant *Element = CV->getAggregateElement(i);
1586  auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1587  if (!ElementCI) {
1588  Known.resetAll();
1589  return;
1590  }
1591  const APInt &Elt = ElementCI->getValue();
1592  Known.Zero &= ~Elt;
1593  Known.One &= Elt;
1594  }
1595  return;
1596  }
1597 
1598  // Start out not knowing anything.
1599  Known.resetAll();
1600 
1601  // We can't imply anything about undefs.
1602  if (isa<UndefValue>(V))
1603  return;
1604 
1605  // There's no point in looking through other users of ConstantData for
1606  // assumptions. Confirm that we've handled them all.
1607  assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1608 
1609  // Limit search depth.
1610  // All recursive calls that increase depth must come after this.
1611  if (Depth == MaxDepth)
1612  return;
1613 
1614  // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1615  // the bits of its aliasee.
1616  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1617  if (!GA->isInterposable())
1618  computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1619  return;
1620  }
1621 
1622  if (const Operator *I = dyn_cast<Operator>(V))
1623  computeKnownBitsFromOperator(I, Known, Depth, Q);
1624 
1625  // Aligned pointers have trailing zeros - refine Known.Zero set
1626  if (V->getType()->isPointerTy()) {
1627  unsigned Align = V->getPointerAlignment(Q.DL);
1628  if (Align)
1629  Known.Zero.setLowBits(countTrailingZeros(Align));
1630  }
1631 
1632  // computeKnownBitsFromAssume strictly refines Known.
1633  // Therefore, we run them after computeKnownBitsFromOperator.
1634 
1635  // Check whether a nearby assume intrinsic can determine some known bits.
1636  computeKnownBitsFromAssume(V, Known, Depth, Q);
1637 
1638  assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1639 }
1640 
1641 /// Return true if the given value is known to have exactly one
1642 /// bit set when defined. For vectors return true if every element is known to
1643 /// be a power of two when defined. Supports values with integer or pointer
1644 /// types and vectors of integers.
1645 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1646  const Query &Q) {
1647  assert(Depth <= MaxDepth && "Limit Search Depth");
1648 
1649  if (const Constant *C = dyn_cast<Constant>(V)) {
1650  if (C->isNullValue())
1651  return OrZero;
1652 
1653  const APInt *ConstIntOrConstSplatInt;
1654  if (match(C, m_APInt(ConstIntOrConstSplatInt)))
1655  return ConstIntOrConstSplatInt->isPowerOf2();
1656  }
1657 
1658  // 1 << X is clearly a power of two if the one is not shifted off the end. If
1659  // it is shifted off the end then the result is undefined.
1660  if (match(V, m_Shl(m_One(), m_Value())))
1661  return true;
1662 
1663  // (signmask) >>l X is clearly a power of two if the one is not shifted off
1664  // the bottom. If it is shifted off the bottom then the result is undefined.
1665  if (match(V, m_LShr(m_SignMask(), m_Value())))
1666  return true;
1667 
1668  // The remaining tests are all recursive, so bail out if we hit the limit.
1669  if (Depth++ == MaxDepth)
1670  return false;
1671 
1672  Value *X = nullptr, *Y = nullptr;
1673  // A shift left or a logical shift right of a power of two is a power of two
1674  // or zero.
1675  if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1676  match(V, m_LShr(m_Value(X), m_Value()))))
1677  return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1678 
1679  if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1680  return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1681 
1682  if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1683  return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1684  isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1685 
1686  if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1687  // A power of two and'd with anything is a power of two or zero.
1688  if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1689  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1690  return true;
1691  // X & (-X) is always a power of two or zero.
1692  if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1693  return true;
1694  return false;
1695  }
1696 
1697  // Adding a power-of-two or zero to the same power-of-two or zero yields
1698  // either the original power-of-two, a larger power-of-two or zero.
1699  if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1700  const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1701  if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
1702  if (match(X, m_And(m_Specific(Y), m_Value())) ||
1703  match(X, m_And(m_Value(), m_Specific(Y))))
1704  if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1705  return true;
1706  if (match(Y, m_And(m_Specific(X), m_Value())) ||
1707  match(Y, m_And(m_Value(), m_Specific(X))))
1708  if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1709  return true;
1710 
1711  unsigned BitWidth = V->getType()->getScalarSizeInBits();
1712  KnownBits LHSBits(BitWidth);
1713  computeKnownBits(X, LHSBits, Depth, Q);
1714 
1715  KnownBits RHSBits(BitWidth);
1716  computeKnownBits(Y, RHSBits, Depth, Q);
1717  // If i8 V is a power of two or zero:
1718  // ZeroBits: 1 1 1 0 1 1 1 1
1719  // ~ZeroBits: 0 0 0 1 0 0 0 0
1720  if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1721  // If OrZero isn't set, we cannot give back a zero result.
1722  // Make sure either the LHS or RHS has a bit set.
1723  if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1724  return true;
1725  }
1726  }
1727 
1728  // An exact divide or right shift can only shift off zero bits, so the result
1729  // is a power of two only if the first operand is a power of two and not
1730  // copying a sign bit (sdiv int_min, 2).
1731  if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1732  match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1733  return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1734  Depth, Q);
1735  }
1736 
1737  return false;
1738 }
1739 
1740 /// \brief Test whether a GEP's result is known to be non-null.
1741 ///
1742 /// Uses properties inherent in a GEP to try to determine whether it is known
1743 /// to be non-null.
1744 ///
1745 /// Currently this routine does not support vector GEPs.
1746 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1747  const Query &Q) {
1748  if (!GEP->isInBounds() || GEP->getPointerAddressSpace() != 0)
1749  return false;
1750 
1751  // FIXME: Support vector-GEPs.
1752  assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1753 
1754  // If the base pointer is non-null, we cannot walk to a null address with an
1755  // inbounds GEP in address space zero.
1756  if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1757  return true;
1758 
1759  // Walk the GEP operands and see if any operand introduces a non-zero offset.
1760  // If so, then the GEP cannot produce a null pointer, as doing so would
1761  // inherently violate the inbounds contract within address space zero.
1762  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1763  GTI != GTE; ++GTI) {
1764  // Struct types are easy -- they must always be indexed by a constant.
1765  if (StructType *STy = GTI.getStructTypeOrNull()) {
1766  ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1767  unsigned ElementIdx = OpC->getZExtValue();
1768  const StructLayout *SL = Q.DL.getStructLayout(STy);
1769  uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1770  if (ElementOffset > 0)
1771  return true;
1772  continue;
1773  }
1774 
1775  // If we have a zero-sized type, the index doesn't matter. Keep looping.
1776  if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1777  continue;
1778 
1779  // Fast path the constant operand case both for efficiency and so we don't
1780  // increment Depth when just zipping down an all-constant GEP.
1781  if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1782  if (!OpC->isZero())
1783  return true;
1784  continue;
1785  }
1786 
1787  // We post-increment Depth here because while isKnownNonZero increments it
1788  // as well, when we pop back up that increment won't persist. We don't want
1789  // to recurse 10k times just because we have 10k GEP operands. We don't
1790  // bail completely out because we want to handle constant GEPs regardless
1791  // of depth.
1792  if (Depth++ >= MaxDepth)
1793  continue;
1794 
1795  if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1796  return true;
1797  }
1798 
1799  return false;
1800 }
1801 
1803  const Instruction *CtxI,
1804  const DominatorTree *DT) {
1805  assert(V->getType()->isPointerTy() && "V must be pointer type");
1806  assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1807 
1808  if (!CtxI || !DT)
1809  return false;
1810 
1811  unsigned NumUsesExplored = 0;
1812  for (auto *U : V->users()) {
1813  // Avoid massive lists
1814  if (NumUsesExplored >= DomConditionsMaxUses)
1815  break;
1816  NumUsesExplored++;
1817 
1818  // If the value is used as an argument to a call or invoke, then argument
1819  // attributes may provide an answer about null-ness.
1820  if (auto CS = ImmutableCallSite(U))
1821  if (auto *CalledFunc = CS.getCalledFunction())
1822  for (const Argument &Arg : CalledFunc->args())
1823  if (CS.getArgOperand(Arg.getArgNo()) == V &&
1824  Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1825  return true;
1826 
1827  // Consider only compare instructions uniquely controlling a branch
1828  CmpInst::Predicate Pred;
1829  if (!match(const_cast<User *>(U),
1830  m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1831  (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1832  continue;
1833 
1834  for (auto *CmpU : U->users()) {
1835  if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
1836  assert(BI->isConditional() && "uses a comparison!");
1837 
1838  BasicBlock *NonNullSuccessor =
1839  BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1840  BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1841  if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1842  return true;
1843  } else if (Pred == ICmpInst::ICMP_NE &&
1844  match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
1845  DT->dominates(cast<Instruction>(CmpU), CtxI)) {
1846  return true;
1847  }
1848  }
1849  }
1850 
1851  return false;
1852 }
1853 
1854 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1855 /// ensure that the value it's attached to is never Value? 'RangeType' is
1856 /// is the type of the value described by the range.
1857 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1858  const unsigned NumRanges = Ranges->getNumOperands() / 2;
1859  assert(NumRanges >= 1);
1860  for (unsigned i = 0; i < NumRanges; ++i) {
1861  ConstantInt *Lower =
1862  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1863  ConstantInt *Upper =
1864  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1865  ConstantRange Range(Lower->getValue(), Upper->getValue());
1866  if (Range.contains(Value))
1867  return false;
1868  }
1869  return true;
1870 }
1871 
1872 /// Return true if the given value is known to be non-zero when defined. For
1873 /// vectors, return true if every element is known to be non-zero when
1874 /// defined. For pointers, if the context instruction and dominator tree are
1875 /// specified, perform context-sensitive analysis and return true if the
1876 /// pointer couldn't possibly be null at the specified instruction.
1877 /// Supports values with integer or pointer type and vectors of integers.
1878 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
1879  if (auto *C = dyn_cast<Constant>(V)) {
1880  if (C->isNullValue())
1881  return false;
1882  if (isa<ConstantInt>(C))
1883  // Must be non-zero due to null test above.
1884  return true;
1885 
1886  // For constant vectors, check that all elements are undefined or known
1887  // non-zero to determine that the whole vector is known non-zero.
1888  if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
1889  for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
1890  Constant *Elt = C->getAggregateElement(i);
1891  if (!Elt || Elt->isNullValue())
1892  return false;
1893  if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
1894  return false;
1895  }
1896  return true;
1897  }
1898 
1899  // A global variable in address space 0 is non null unless extern weak
1900  // or an absolute symbol reference. Other address spaces may have null as a
1901  // valid address for a global, so we can't assume anything.
1902  if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
1903  if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
1904  GV->getType()->getAddressSpace() == 0)
1905  return true;
1906  } else
1907  return false;
1908  }
1909 
1910  if (auto *I = dyn_cast<Instruction>(V)) {
1911  if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
1912  // If the possible ranges don't contain zero, then the value is
1913  // definitely non-zero.
1914  if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
1915  const APInt ZeroValue(Ty->getBitWidth(), 0);
1916  if (rangeMetadataExcludesValue(Ranges, ZeroValue))
1917  return true;
1918  }
1919  }
1920  }
1921 
1922  // Check for pointer simplifications.
1923  if (V->getType()->isPointerTy()) {
1924  // Alloca never returns null, malloc might.
1925  if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
1926  return true;
1927 
1928  // A byval, inalloca, or nonnull argument is never null.
1929  if (const Argument *A = dyn_cast<Argument>(V))
1930  if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
1931  return true;
1932 
1933  // A Load tagged with nonnull metadata is never null.
1934  if (const LoadInst *LI = dyn_cast<LoadInst>(V))
1935  if (LI->getMetadata(LLVMContext::MD_nonnull))
1936  return true;
1937 
1938  if (auto CS = ImmutableCallSite(V))
1939  if (CS.isReturnNonNull())
1940  return true;
1941  }
1942 
1943  // The remaining tests are all recursive, so bail out if we hit the limit.
1944  if (Depth++ >= MaxDepth)
1945  return false;
1946 
1947  // Check for recursive pointer simplifications.
1948  if (V->getType()->isPointerTy()) {
1949  if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
1950  return true;
1951 
1952  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
1953  if (isGEPKnownNonNull(GEP, Depth, Q))
1954  return true;
1955  }
1956 
1957  unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
1958 
1959  // X | Y != 0 if X != 0 or Y != 0.
1960  Value *X = nullptr, *Y = nullptr;
1961  if (match(V, m_Or(m_Value(X), m_Value(Y))))
1962  return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
1963 
1964  // ext X != 0 if X != 0.
1965  if (isa<SExtInst>(V) || isa<ZExtInst>(V))
1966  return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
1967 
1968  // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
1969  // if the lowest bit is shifted off the end.
1970  if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
1971  // shl nuw can't remove any non-zero bits.
1972  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
1973  if (BO->hasNoUnsignedWrap())
1974  return isKnownNonZero(X, Depth, Q);
1975 
1976  KnownBits Known(BitWidth);
1977  computeKnownBits(X, Known, Depth, Q);
1978  if (Known.One[0])
1979  return true;
1980  }
1981  // shr X, Y != 0 if X is negative. Note that the value of the shift is not
1982  // defined if the sign bit is shifted off the end.
1983  else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
1984  // shr exact can only shift out zero bits.
1985  const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
1986  if (BO->isExact())
1987  return isKnownNonZero(X, Depth, Q);
1988 
1989  KnownBits Known = computeKnownBits(X, Depth, Q);
1990  if (Known.isNegative())
1991  return true;
1992 
1993  // If the shifter operand is a constant, and all of the bits shifted
1994  // out are known to be zero, and X is known non-zero then at least one
1995  // non-zero bit must remain.
1996  if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
1997  auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
1998  // Is there a known one in the portion not shifted out?
1999  if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2000  return true;
2001  // Are all the bits to be shifted out known zero?
2002  if (Known.countMinTrailingZeros() >= ShiftVal)
2003  return isKnownNonZero(X, Depth, Q);
2004  }
2005  }
2006  // div exact can only produce a zero if the dividend is zero.
2007  else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2008  return isKnownNonZero(X, Depth, Q);
2009  }
2010  // X + Y.
2011  else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2012  KnownBits XKnown = computeKnownBits(X, Depth, Q);
2013  KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2014 
2015  // If X and Y are both non-negative (as signed values) then their sum is not
2016  // zero unless both X and Y are zero.
2017  if (XKnown.isNonNegative() && YKnown.isNonNegative())
2018  if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2019  return true;
2020 
2021  // If X and Y are both negative (as signed values) then their sum is not
2022  // zero unless both X and Y equal INT_MIN.
2023  if (XKnown.isNegative() && YKnown.isNegative()) {
2024  APInt Mask = APInt::getSignedMaxValue(BitWidth);
2025  // The sign bit of X is set. If some other bit is set then X is not equal
2026  // to INT_MIN.
2027  if (XKnown.One.intersects(Mask))
2028  return true;
2029  // The sign bit of Y is set. If some other bit is set then Y is not equal
2030  // to INT_MIN.
2031  if (YKnown.One.intersects(Mask))
2032  return true;
2033  }
2034 
2035  // The sum of a non-negative number and a power of two is not zero.
2036  if (XKnown.isNonNegative() &&
2037  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2038  return true;
2039  if (YKnown.isNonNegative() &&
2040  isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2041  return true;
2042  }
2043  // X * Y.
2044  else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2045  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2046  // If X and Y are non-zero then so is X * Y as long as the multiplication
2047  // does not overflow.
2048  if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
2049  isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2050  return true;
2051  }
2052  // (C ? X : Y) != 0 if X != 0 and Y != 0.
2053  else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2054  if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2055  isKnownNonZero(SI->getFalseValue(), Depth, Q))
2056  return true;
2057  }
2058  // PHI
2059  else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2060  // Try and detect a recurrence that monotonically increases from a
2061  // starting value, as these are common as induction variables.
2062  if (PN->getNumIncomingValues() == 2) {
2063  Value *Start = PN->getIncomingValue(0);
2064  Value *Induction = PN->getIncomingValue(1);
2065  if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2066  std::swap(Start, Induction);
2067  if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2068  if (!C->isZero() && !C->isNegative()) {
2069  ConstantInt *X;
2070  if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2071  match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2072  !X->isNegative())
2073  return true;
2074  }
2075  }
2076  }
2077  // Check if all incoming values are non-zero constant.
2078  bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2079  return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2080  });
2081  if (AllNonZeroConstants)
2082  return true;
2083  }
2084 
2085  KnownBits Known(BitWidth);
2086  computeKnownBits(V, Known, Depth, Q);
2087  return Known.One != 0;
2088 }
2089 
2090 /// Return true if V2 == V1 + X, where X is known non-zero.
2091 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2092  const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2093  if (!BO || BO->getOpcode() != Instruction::Add)
2094  return false;
2095  Value *Op = nullptr;
2096  if (V2 == BO->getOperand(0))
2097  Op = BO->getOperand(1);
2098  else if (V2 == BO->getOperand(1))
2099  Op = BO->getOperand(0);
2100  else
2101  return false;
2102  return isKnownNonZero(Op, 0, Q);
2103 }
2104 
2105 /// Return true if it is known that V1 != V2.
2106 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2107  if (V1 == V2)
2108  return false;
2109  if (V1->getType() != V2->getType())
2110  // We can't look through casts yet.
2111  return false;
2112  if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2113  return true;
2114 
2115  if (V1->getType()->isIntOrIntVectorTy()) {
2116  // Are any known bits in V1 contradictory to known bits in V2? If V1
2117  // has a known zero where V2 has a known one, they must not be equal.
2118  KnownBits Known1 = computeKnownBits(V1, 0, Q);
2119  KnownBits Known2 = computeKnownBits(V2, 0, Q);
2120 
2121  if (Known1.Zero.intersects(Known2.One) ||
2122  Known2.Zero.intersects(Known1.One))
2123  return true;
2124  }
2125  return false;
2126 }
2127 
2128 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2129 /// simplify operations downstream. Mask is known to be zero for bits that V
2130 /// cannot have.
2131 ///
2132 /// This function is defined on values with integer type, values with pointer
2133 /// type, and vectors of integers. In the case
2134 /// where V is a vector, the mask, known zero, and known one values are the
2135 /// same width as the vector element, and the bit is set only if it is true
2136 /// for all of the elements in the vector.
2137 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2138  const Query &Q) {
2139  KnownBits Known(Mask.getBitWidth());
2140  computeKnownBits(V, Known, Depth, Q);
2141  return Mask.isSubsetOf(Known.Zero);
2142 }
2143 
2144 /// For vector constants, loop over the elements and find the constant with the
2145 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2146 /// or if any element was not analyzed; otherwise, return the count for the
2147 /// element with the minimum number of sign bits.
2148 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2149  unsigned TyBits) {
2150  const auto *CV = dyn_cast<Constant>(V);
2151  if (!CV || !CV->getType()->isVectorTy())
2152  return 0;
2153 
2154  unsigned MinSignBits = TyBits;
2155  unsigned NumElts = CV->getType()->getVectorNumElements();
2156  for (unsigned i = 0; i != NumElts; ++i) {
2157  // If we find a non-ConstantInt, bail out.
2158  auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2159  if (!Elt)
2160  return 0;
2161 
2162  MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2163  }
2164 
2165  return MinSignBits;
2166 }
2167 
2168 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2169  const Query &Q);
2170 
2171 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2172  const Query &Q) {
2173  unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2174  assert(Result > 0 && "At least one sign bit needs to be present!");
2175  return Result;
2176 }
2177 
2178 /// Return the number of times the sign bit of the register is replicated into
2179 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2180 /// (itself), but other cases can give us information. For example, immediately
2181 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2182 /// other, so we return 3. For vectors, return the number of sign bits for the
2183 /// vector element with the mininum number of known sign bits.
2184 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2185  const Query &Q) {
2186  assert(Depth <= MaxDepth && "Limit Search Depth");
2187 
2188  // We return the minimum number of sign bits that are guaranteed to be present
2189  // in V, so for undef we have to conservatively return 1. We don't have the
2190  // same behavior for poison though -- that's a FIXME today.
2191 
2192  unsigned TyBits = Q.DL.getTypeSizeInBits(V->getType()->getScalarType());
2193  unsigned Tmp, Tmp2;
2194  unsigned FirstAnswer = 1;
2195 
2196  // Note that ConstantInt is handled by the general computeKnownBits case
2197  // below.
2198 
2199  if (Depth == MaxDepth)
2200  return 1; // Limit search depth.
2201 
2202  const Operator *U = dyn_cast<Operator>(V);
2203  switch (Operator::getOpcode(V)) {
2204  default: break;
2205  case Instruction::SExt:
2206  Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2207  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2208 
2209  case Instruction::SDiv: {
2210  const APInt *Denominator;
2211  // sdiv X, C -> adds log(C) sign bits.
2212  if (match(U->getOperand(1), m_APInt(Denominator))) {
2213 
2214  // Ignore non-positive denominator.
2215  if (!Denominator->isStrictlyPositive())
2216  break;
2217 
2218  // Calculate the incoming numerator bits.
2219  unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2220 
2221  // Add floor(log(C)) bits to the numerator bits.
2222  return std::min(TyBits, NumBits + Denominator->logBase2());
2223  }
2224  break;
2225  }
2226 
2227  case Instruction::SRem: {
2228  const APInt *Denominator;
2229  // srem X, C -> we know that the result is within [-C+1,C) when C is a
2230  // positive constant. This let us put a lower bound on the number of sign
2231  // bits.
2232  if (match(U->getOperand(1), m_APInt(Denominator))) {
2233 
2234  // Ignore non-positive denominator.
2235  if (!Denominator->isStrictlyPositive())
2236  break;
2237 
2238  // Calculate the incoming numerator bits. SRem by a positive constant
2239  // can't lower the number of sign bits.
2240  unsigned NumrBits =
2241  ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2242 
2243  // Calculate the leading sign bit constraints by examining the
2244  // denominator. Given that the denominator is positive, there are two
2245  // cases:
2246  //
2247  // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2248  // (1 << ceilLogBase2(C)).
2249  //
2250  // 2. the numerator is negative. Then the result range is (-C,0] and
2251  // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2252  //
2253  // Thus a lower bound on the number of sign bits is `TyBits -
2254  // ceilLogBase2(C)`.
2255 
2256  unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2257  return std::max(NumrBits, ResBits);
2258  }
2259  break;
2260  }
2261 
2262  case Instruction::AShr: {
2263  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2264  // ashr X, C -> adds C sign bits. Vectors too.
2265  const APInt *ShAmt;
2266  if (match(U->getOperand(1), m_APInt(ShAmt))) {
2267  if (ShAmt->uge(TyBits))
2268  break; // Bad shift.
2269  unsigned ShAmtLimited = ShAmt->getZExtValue();
2270  Tmp += ShAmtLimited;
2271  if (Tmp > TyBits) Tmp = TyBits;
2272  }
2273  return Tmp;
2274  }
2275  case Instruction::Shl: {
2276  const APInt *ShAmt;
2277  if (match(U->getOperand(1), m_APInt(ShAmt))) {
2278  // shl destroys sign bits.
2279  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2280  if (ShAmt->uge(TyBits) || // Bad shift.
2281  ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2282  Tmp2 = ShAmt->getZExtValue();
2283  return Tmp - Tmp2;
2284  }
2285  break;
2286  }
2287  case Instruction::And:
2288  case Instruction::Or:
2289  case Instruction::Xor: // NOT is handled here.
2290  // Logical binary ops preserve the number of sign bits at the worst.
2291  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2292  if (Tmp != 1) {
2293  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2294  FirstAnswer = std::min(Tmp, Tmp2);
2295  // We computed what we know about the sign bits as our first
2296  // answer. Now proceed to the generic code that uses
2297  // computeKnownBits, and pick whichever answer is better.
2298  }
2299  break;
2300 
2301  case Instruction::Select:
2302  Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2303  if (Tmp == 1) return 1; // Early out.
2304  Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2305  return std::min(Tmp, Tmp2);
2306 
2307  case Instruction::Add:
2308  // Add can have at most one carry bit. Thus we know that the output
2309  // is, at worst, one more bit than the inputs.
2310  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2311  if (Tmp == 1) return 1; // Early out.
2312 
2313  // Special case decrementing a value (ADD X, -1):
2314  if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2315  if (CRHS->isAllOnesValue()) {
2316  KnownBits Known(TyBits);
2317  computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2318 
2319  // If the input is known to be 0 or 1, the output is 0/-1, which is all
2320  // sign bits set.
2321  if ((Known.Zero | 1).isAllOnesValue())
2322  return TyBits;
2323 
2324  // If we are subtracting one from a positive number, there is no carry
2325  // out of the result.
2326  if (Known.isNonNegative())
2327  return Tmp;
2328  }
2329 
2330  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2331  if (Tmp2 == 1) return 1;
2332  return std::min(Tmp, Tmp2)-1;
2333 
2334  case Instruction::Sub:
2335  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2336  if (Tmp2 == 1) return 1;
2337 
2338  // Handle NEG.
2339  if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2340  if (CLHS->isNullValue()) {
2341  KnownBits Known(TyBits);
2342  computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2343  // If the input is known to be 0 or 1, the output is 0/-1, which is all
2344  // sign bits set.
2345  if ((Known.Zero | 1).isAllOnesValue())
2346  return TyBits;
2347 
2348  // If the input is known to be positive (the sign bit is known clear),
2349  // the output of the NEG has the same number of sign bits as the input.
2350  if (Known.isNonNegative())
2351  return Tmp2;
2352 
2353  // Otherwise, we treat this like a SUB.
2354  }
2355 
2356  // Sub can have at most one carry bit. Thus we know that the output
2357  // is, at worst, one more bit than the inputs.
2358  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2359  if (Tmp == 1) return 1; // Early out.
2360  return std::min(Tmp, Tmp2)-1;
2361 
2362  case Instruction::Mul: {
2363  // The output of the Mul can be at most twice the valid bits in the inputs.
2364  unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2365  if (SignBitsOp0 == 1) return 1; // Early out.
2366  unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2367  if (SignBitsOp1 == 1) return 1;
2368  unsigned OutValidBits =
2369  (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2370  return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2371  }
2372 
2373  case Instruction::PHI: {
2374  const PHINode *PN = cast<PHINode>(U);
2375  unsigned NumIncomingValues = PN->getNumIncomingValues();
2376  // Don't analyze large in-degree PHIs.
2377  if (NumIncomingValues > 4) break;
2378  // Unreachable blocks may have zero-operand PHI nodes.
2379  if (NumIncomingValues == 0) break;
2380 
2381  // Take the minimum of all incoming values. This can't infinitely loop
2382  // because of our depth threshold.
2383  Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2384  for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2385  if (Tmp == 1) return Tmp;
2386  Tmp = std::min(
2387  Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2388  }
2389  return Tmp;
2390  }
2391 
2392  case Instruction::Trunc:
2393  // FIXME: it's tricky to do anything useful for this, but it is an important
2394  // case for targets like X86.
2395  break;
2396 
2397  case Instruction::ExtractElement:
2398  // Look through extract element. At the moment we keep this simple and skip
2399  // tracking the specific element. But at least we might find information
2400  // valid for all elements of the vector (for example if vector is sign
2401  // extended, shifted, etc).
2402  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2403  }
2404 
2405  // Finally, if we can prove that the top bits of the result are 0's or 1's,
2406  // use this information.
2407 
2408  // If we can examine all elements of a vector constant successfully, we're
2409  // done (we can't do any better than that). If not, keep trying.
2410  if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2411  return VecSignBits;
2412 
2413  KnownBits Known(TyBits);
2414  computeKnownBits(V, Known, Depth, Q);
2415 
2416  // If we know that the sign bit is either zero or one, determine the number of
2417  // identical bits in the top of the input value.
2418  return std::max(FirstAnswer, Known.countMinSignBits());
2419 }
2420 
2421 /// This function computes the integer multiple of Base that equals V.
2422 /// If successful, it returns true and returns the multiple in
2423 /// Multiple. If unsuccessful, it returns false. It looks
2424 /// through SExt instructions only if LookThroughSExt is true.
2425 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2426  bool LookThroughSExt, unsigned Depth) {
2427  const unsigned MaxDepth = 6;
2428 
2429  assert(V && "No Value?");
2430  assert(Depth <= MaxDepth && "Limit Search Depth");
2431  assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2432 
2433  Type *T = V->getType();
2434 
2435  ConstantInt *CI = dyn_cast<ConstantInt>(V);
2436 
2437  if (Base == 0)
2438  return false;
2439 
2440  if (Base == 1) {
2441  Multiple = V;
2442  return true;
2443  }
2444 
2446  Constant *BaseVal = ConstantInt::get(T, Base);
2447  if (CO && CO == BaseVal) {
2448  // Multiple is 1.
2449  Multiple = ConstantInt::get(T, 1);
2450  return true;
2451  }
2452 
2453  if (CI && CI->getZExtValue() % Base == 0) {
2454  Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2455  return true;
2456  }
2457 
2458  if (Depth == MaxDepth) return false; // Limit search depth.
2459 
2460  Operator *I = dyn_cast<Operator>(V);
2461  if (!I) return false;
2462 
2463  switch (I->getOpcode()) {
2464  default: break;
2465  case Instruction::SExt:
2466  if (!LookThroughSExt) return false;
2467  // otherwise fall through to ZExt
2469  case Instruction::ZExt:
2470  return ComputeMultiple(I->getOperand(0), Base, Multiple,
2471  LookThroughSExt, Depth+1);
2472  case Instruction::Shl:
2473  case Instruction::Mul: {
2474  Value *Op0 = I->getOperand(0);
2475  Value *Op1 = I->getOperand(1);
2476 
2477  if (I->getOpcode() == Instruction::Shl) {
2478  ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2479  if (!Op1CI) return false;
2480  // Turn Op0 << Op1 into Op0 * 2^Op1
2481  APInt Op1Int = Op1CI->getValue();
2482  uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2483  APInt API(Op1Int.getBitWidth(), 0);
2484  API.setBit(BitToSet);
2485  Op1 = ConstantInt::get(V->getContext(), API);
2486  }
2487 
2488  Value *Mul0 = nullptr;
2489  if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2490  if (Constant *Op1C = dyn_cast<Constant>(Op1))
2491  if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2492  if (Op1C->getType()->getPrimitiveSizeInBits() <
2493  MulC->getType()->getPrimitiveSizeInBits())
2494  Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2495  if (Op1C->getType()->getPrimitiveSizeInBits() >
2496  MulC->getType()->getPrimitiveSizeInBits())
2497  MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2498 
2499  // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2500  Multiple = ConstantExpr::getMul(MulC, Op1C);
2501  return true;
2502  }
2503 
2504  if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2505  if (Mul0CI->getValue() == 1) {
2506  // V == Base * Op1, so return Op1
2507  Multiple = Op1;
2508  return true;
2509  }
2510  }
2511 
2512  Value *Mul1 = nullptr;
2513  if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2514  if (Constant *Op0C = dyn_cast<Constant>(Op0))
2515  if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2516  if (Op0C->getType()->getPrimitiveSizeInBits() <
2517  MulC->getType()->getPrimitiveSizeInBits())
2518  Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2519  if (Op0C->getType()->getPrimitiveSizeInBits() >
2520  MulC->getType()->getPrimitiveSizeInBits())
2521  MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2522 
2523  // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2524  Multiple = ConstantExpr::getMul(MulC, Op0C);
2525  return true;
2526  }
2527 
2528  if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2529  if (Mul1CI->getValue() == 1) {
2530  // V == Base * Op0, so return Op0
2531  Multiple = Op0;
2532  return true;
2533  }
2534  }
2535  }
2536  }
2537 
2538  // We could not determine if V is a multiple of Base.
2539  return false;
2540 }
2541 
2543  const TargetLibraryInfo *TLI) {
2544  const Function *F = ICS.getCalledFunction();
2545  if (!F)
2546  return Intrinsic::not_intrinsic;
2547 
2548  if (F->isIntrinsic())
2549  return F->getIntrinsicID();
2550 
2551  if (!TLI)
2552  return Intrinsic::not_intrinsic;
2553 
2554  LibFunc Func;
2555  // We're going to make assumptions on the semantics of the functions, check
2556  // that the target knows that it's available in this environment and it does
2557  // not have local linkage.
2558  if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2559  return Intrinsic::not_intrinsic;
2560 
2561  if (!ICS.onlyReadsMemory())
2562  return Intrinsic::not_intrinsic;
2563 
2564  // Otherwise check if we have a call to a function that can be turned into a
2565  // vector intrinsic.
2566  switch (Func) {
2567  default:
2568  break;
2569  case LibFunc_sin:
2570  case LibFunc_sinf:
2571  case LibFunc_sinl:
2572  return Intrinsic::sin;
2573  case LibFunc_cos:
2574  case LibFunc_cosf:
2575  case LibFunc_cosl:
2576  return Intrinsic::cos;
2577  case LibFunc_exp:
2578  case LibFunc_expf:
2579  case LibFunc_expl:
2580  return Intrinsic::exp;
2581  case LibFunc_exp2:
2582  case LibFunc_exp2f:
2583  case LibFunc_exp2l:
2584  return Intrinsic::exp2;
2585  case LibFunc_log:
2586  case LibFunc_logf:
2587  case LibFunc_logl:
2588  return Intrinsic::log;
2589  case LibFunc_log10:
2590  case LibFunc_log10f:
2591  case LibFunc_log10l:
2592  return Intrinsic::log10;
2593  case LibFunc_log2:
2594  case LibFunc_log2f:
2595  case LibFunc_log2l:
2596  return Intrinsic::log2;
2597  case LibFunc_fabs:
2598  case LibFunc_fabsf:
2599  case LibFunc_fabsl:
2600  return Intrinsic::fabs;
2601  case LibFunc_fmin:
2602  case LibFunc_fminf:
2603  case LibFunc_fminl:
2604  return Intrinsic::minnum;
2605  case LibFunc_fmax:
2606  case LibFunc_fmaxf:
2607  case LibFunc_fmaxl:
2608  return Intrinsic::maxnum;
2609  case LibFunc_copysign:
2610  case LibFunc_copysignf:
2611  case LibFunc_copysignl:
2612  return Intrinsic::copysign;
2613  case LibFunc_floor:
2614  case LibFunc_floorf:
2615  case LibFunc_floorl:
2616  return Intrinsic::floor;
2617  case LibFunc_ceil:
2618  case LibFunc_ceilf:
2619  case LibFunc_ceill:
2620  return Intrinsic::ceil;
2621  case LibFunc_trunc:
2622  case LibFunc_truncf:
2623  case LibFunc_truncl:
2624  return Intrinsic::trunc;
2625  case LibFunc_rint:
2626  case LibFunc_rintf:
2627  case LibFunc_rintl:
2628  return Intrinsic::rint;
2629  case LibFunc_nearbyint:
2630  case LibFunc_nearbyintf:
2631  case LibFunc_nearbyintl:
2632  return Intrinsic::nearbyint;
2633  case LibFunc_round:
2634  case LibFunc_roundf:
2635  case LibFunc_roundl:
2636  return Intrinsic::round;
2637  case LibFunc_pow:
2638  case LibFunc_powf:
2639  case LibFunc_powl:
2640  return Intrinsic::pow;
2641  case LibFunc_sqrt:
2642  case LibFunc_sqrtf:
2643  case LibFunc_sqrtl:
2644  return Intrinsic::sqrt;
2645  }
2646 
2647  return Intrinsic::not_intrinsic;
2648 }
2649 
2650 /// Return true if we can prove that the specified FP value is never equal to
2651 /// -0.0.
2652 ///
2653 /// NOTE: this function will need to be revisited when we support non-default
2654 /// rounding modes!
2656  unsigned Depth) {
2657  if (auto *CFP = dyn_cast<ConstantFP>(V))
2658  return !CFP->getValueAPF().isNegZero();
2659 
2660  // Limit search depth.
2661  if (Depth == MaxDepth)
2662  return false;
2663 
2664  auto *Op = dyn_cast<Operator>(V);
2665  if (!Op)
2666  return false;
2667 
2668  // Check if the nsz fast-math flag is set.
2669  if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2670  if (FPO->hasNoSignedZeros())
2671  return true;
2672 
2673  // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2674  if (match(Op, m_FAdd(m_Value(), m_Zero())))
2675  return true;
2676 
2677  // sitofp and uitofp turn into +0.0 for zero.
2678  if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2679  return true;
2680 
2681  if (auto *Call = dyn_cast<CallInst>(Op)) {
2682  Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2683  switch (IID) {
2684  default:
2685  break;
2686  // sqrt(-0.0) = -0.0, no other negative results are possible.
2687  case Intrinsic::sqrt:
2688  return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2689  // fabs(x) != -0.0
2690  case Intrinsic::fabs:
2691  return true;
2692  }
2693  }
2694 
2695  return false;
2696 }
2697 
2698 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2699 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2700 /// bit despite comparing equal.
2702  const TargetLibraryInfo *TLI,
2703  bool SignBitOnly,
2704  unsigned Depth) {
2705  // TODO: This function does not do the right thing when SignBitOnly is true
2706  // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2707  // which flips the sign bits of NaNs. See
2708  // https://llvm.org/bugs/show_bug.cgi?id=31702.
2709 
2710  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2711  return !CFP->getValueAPF().isNegative() ||
2712  (!SignBitOnly && CFP->getValueAPF().isZero());
2713  }
2714 
2715  if (Depth == MaxDepth)
2716  return false; // Limit search depth.
2717 
2718  const Operator *I = dyn_cast<Operator>(V);
2719  if (!I)
2720  return false;
2721 
2722  switch (I->getOpcode()) {
2723  default:
2724  break;
2725  // Unsigned integers are always nonnegative.
2726  case Instruction::UIToFP:
2727  return true;
2728  case Instruction::FMul:
2729  // x*x is always non-negative or a NaN.
2730  if (I->getOperand(0) == I->getOperand(1) &&
2731  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2732  return true;
2733 
2735  case Instruction::FAdd:
2736  case Instruction::FDiv:
2737  case Instruction::FRem:
2738  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2739  Depth + 1) &&
2740  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2741  Depth + 1);
2742  case Instruction::Select:
2743  return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2744  Depth + 1) &&
2745  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2746  Depth + 1);
2747  case Instruction::FPExt:
2748  case Instruction::FPTrunc:
2749  // Widening/narrowing never change sign.
2750  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2751  Depth + 1);
2752  case Instruction::Call:
2753  const auto *CI = cast<CallInst>(I);
2754  Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
2755  switch (IID) {
2756  default:
2757  break;
2758  case Intrinsic::maxnum:
2759  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2760  Depth + 1) ||
2761  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2762  Depth + 1);
2763  case Intrinsic::minnum:
2764  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2765  Depth + 1) &&
2766  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
2767  Depth + 1);
2768  case Intrinsic::exp:
2769  case Intrinsic::exp2:
2770  case Intrinsic::fabs:
2771  return true;
2772 
2773  case Intrinsic::sqrt:
2774  // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
2775  if (!SignBitOnly)
2776  return true;
2777  return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
2778  CannotBeNegativeZero(CI->getOperand(0), TLI));
2779 
2780  case Intrinsic::powi:
2781  if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
2782  // powi(x,n) is non-negative if n is even.
2783  if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
2784  return true;
2785  }
2786  // TODO: This is not correct. Given that exp is an integer, here are the
2787  // ways that pow can return a negative value:
2788  //
2789  // pow(x, exp) --> negative if exp is odd and x is negative.
2790  // pow(-0, exp) --> -inf if exp is negative odd.
2791  // pow(-0, exp) --> -0 if exp is positive odd.
2792  // pow(-inf, exp) --> -0 if exp is negative odd.
2793  // pow(-inf, exp) --> -inf if exp is positive odd.
2794  //
2795  // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
2796  // but we must return false if x == -0. Unfortunately we do not currently
2797  // have a way of expressing this constraint. See details in
2798  // https://llvm.org/bugs/show_bug.cgi?id=31702.
2799  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2800  Depth + 1);
2801 
2802  case Intrinsic::fma:
2803  case Intrinsic::fmuladd:
2804  // x*x+y is non-negative if y is non-negative.
2805  return I->getOperand(0) == I->getOperand(1) &&
2806  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
2807  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
2808  Depth + 1);
2809  }
2810  break;
2811  }
2812  return false;
2813 }
2814 
2816  const TargetLibraryInfo *TLI) {
2817  return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
2818 }
2819 
2821  return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
2822 }
2823 
2825  assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
2826 
2827  // If we're told that NaNs won't happen, assume they won't.
2828  if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
2829  if (FPMathOp->hasNoNaNs())
2830  return true;
2831 
2832  // TODO: Handle instructions and potentially recurse like other 'isKnown'
2833  // functions. For example, the result of sitofp is never NaN.
2834 
2835  // Handle scalar constants.
2836  if (auto *CFP = dyn_cast<ConstantFP>(V))
2837  return !CFP->isNaN();
2838 
2839  // Bail out for constant expressions, but try to handle vector constants.
2840  if (!V->getType()->isVectorTy() || !isa<Constant>(V))
2841  return false;
2842 
2843  // For vectors, verify that each element is not NaN.
2844  unsigned NumElts = V->getType()->getVectorNumElements();
2845  for (unsigned i = 0; i != NumElts; ++i) {
2846  Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
2847  if (!Elt)
2848  return false;
2849  if (isa<UndefValue>(Elt))
2850  continue;
2851  auto *CElt = dyn_cast<ConstantFP>(Elt);
2852  if (!CElt || CElt->isNaN())
2853  return false;
2854  }
2855  // All elements were confirmed not-NaN or undefined.
2856  return true;
2857 }
2858 
2859 /// If the specified value can be set by repeating the same byte in memory,
2860 /// return the i8 value that it is represented with. This is
2861 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
2862 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
2863 /// byte store (e.g. i16 0x1234), return null.
2865  // All byte-wide stores are splatable, even of arbitrary variables.
2866  if (V->getType()->isIntegerTy(8)) return V;
2867 
2868  // Handle 'null' ConstantArrayZero etc.
2869  if (Constant *C = dyn_cast<Constant>(V))
2870  if (C->isNullValue())
2872 
2873  // Constant float and double values can be handled as integer values if the
2874  // corresponding integer value is "byteable". An important case is 0.0.
2875  if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2876  if (CFP->getType()->isFloatTy())
2878  if (CFP->getType()->isDoubleTy())
2880  // Don't handle long double formats, which have strange constraints.
2881  }
2882 
2883  // We can handle constant integers that are multiple of 8 bits.
2884  if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
2885  if (CI->getBitWidth() % 8 == 0) {
2886  assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
2887 
2888  if (!CI->getValue().isSplat(8))
2889  return nullptr;
2890  return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
2891  }
2892  }
2893 
2894  // A ConstantDataArray/Vector is splatable if all its members are equal and
2895  // also splatable.
2896  if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(V)) {
2897  Value *Elt = CA->getElementAsConstant(0);
2898  Value *Val = isBytewiseValue(Elt);
2899  if (!Val)
2900  return nullptr;
2901 
2902  for (unsigned I = 1, E = CA->getNumElements(); I != E; ++I)
2903  if (CA->getElementAsConstant(I) != Elt)
2904  return nullptr;
2905 
2906  return Val;
2907  }
2908 
2909  // Conceptually, we could handle things like:
2910  // %a = zext i8 %X to i16
2911  // %b = shl i16 %a, 8
2912  // %c = or i16 %a, %b
2913  // but until there is an example that actually needs this, it doesn't seem
2914  // worth worrying about.
2915  return nullptr;
2916 }
2917 
2918 // This is the recursive version of BuildSubAggregate. It takes a few different
2919 // arguments. Idxs is the index within the nested struct From that we are
2920 // looking at now (which is of type IndexedType). IdxSkip is the number of
2921 // indices from Idxs that should be left out when inserting into the resulting
2922 // struct. To is the result struct built so far, new insertvalue instructions
2923 // build on that.
2924 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
2926  unsigned IdxSkip,
2927  Instruction *InsertBefore) {
2928  StructType *STy = dyn_cast<StructType>(IndexedType);
2929  if (STy) {
2930  // Save the original To argument so we can modify it
2931  Value *OrigTo = To;
2932  // General case, the type indexed by Idxs is a struct
2933  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2934  // Process each struct element recursively
2935  Idxs.push_back(i);
2936  Value *PrevTo = To;
2937  To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
2938  InsertBefore);
2939  Idxs.pop_back();
2940  if (!To) {
2941  // Couldn't find any inserted value for this index? Cleanup
2942  while (PrevTo != OrigTo) {
2943  InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
2944  PrevTo = Del->getAggregateOperand();
2945  Del->eraseFromParent();
2946  }
2947  // Stop processing elements
2948  break;
2949  }
2950  }
2951  // If we successfully found a value for each of our subaggregates
2952  if (To)
2953  return To;
2954  }
2955  // Base case, the type indexed by SourceIdxs is not a struct, or not all of
2956  // the struct's elements had a value that was inserted directly. In the latter
2957  // case, perhaps we can't determine each of the subelements individually, but
2958  // we might be able to find the complete struct somewhere.
2959 
2960  // Find the value that is at that particular spot
2961  Value *V = FindInsertedValue(From, Idxs);
2962 
2963  if (!V)
2964  return nullptr;
2965 
2966  // Insert the value in the new (sub) aggregrate
2967  return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
2968  "tmp", InsertBefore);
2969 }
2970 
2971 // This helper takes a nested struct and extracts a part of it (which is again a
2972 // struct) into a new value. For example, given the struct:
2973 // { a, { b, { c, d }, e } }
2974 // and the indices "1, 1" this returns
2975 // { c, d }.
2976 //
2977 // It does this by inserting an insertvalue for each element in the resulting
2978 // struct, as opposed to just inserting a single struct. This will only work if
2979 // each of the elements of the substruct are known (ie, inserted into From by an
2980 // insertvalue instruction somewhere).
2981 //
2982 // All inserted insertvalue instructions are inserted before InsertBefore
2984  Instruction *InsertBefore) {
2985  assert(InsertBefore && "Must have someplace to insert!");
2986  Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
2987  idx_range);
2988  Value *To = UndefValue::get(IndexedType);
2989  SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
2990  unsigned IdxSkip = Idxs.size();
2991 
2992  return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
2993 }
2994 
2995 /// Given an aggregrate and an sequence of indices, see if
2996 /// the scalar value indexed is already around as a register, for example if it
2997 /// were inserted directly into the aggregrate.
2998 ///
2999 /// If InsertBefore is not null, this function will duplicate (modified)
3000 /// insertvalues when a part of a nested struct is extracted.
3002  Instruction *InsertBefore) {
3003  // Nothing to index? Just return V then (this is useful at the end of our
3004  // recursion).
3005  if (idx_range.empty())
3006  return V;
3007  // We have indices, so V should have an indexable type.
3008  assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3009  "Not looking at a struct or array?");
3010  assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3011  "Invalid indices for type?");
3012 
3013  if (Constant *C = dyn_cast<Constant>(V)) {
3014  C = C->getAggregateElement(idx_range[0]);
3015  if (!C) return nullptr;
3016  return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3017  }
3018 
3019  if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3020  // Loop the indices for the insertvalue instruction in parallel with the
3021  // requested indices
3022  const unsigned *req_idx = idx_range.begin();
3023  for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3024  i != e; ++i, ++req_idx) {
3025  if (req_idx == idx_range.end()) {
3026  // We can't handle this without inserting insertvalues
3027  if (!InsertBefore)
3028  return nullptr;
3029 
3030  // The requested index identifies a part of a nested aggregate. Handle
3031  // this specially. For example,
3032  // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3033  // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3034  // %C = extractvalue {i32, { i32, i32 } } %B, 1
3035  // This can be changed into
3036  // %A = insertvalue {i32, i32 } undef, i32 10, 0
3037  // %C = insertvalue {i32, i32 } %A, i32 11, 1
3038  // which allows the unused 0,0 element from the nested struct to be
3039  // removed.
3040  return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3041  InsertBefore);
3042  }
3043 
3044  // This insert value inserts something else than what we are looking for.
3045  // See if the (aggregate) value inserted into has the value we are
3046  // looking for, then.
3047  if (*req_idx != *i)
3048  return FindInsertedValue(I->getAggregateOperand(), idx_range,
3049  InsertBefore);
3050  }
3051  // If we end up here, the indices of the insertvalue match with those
3052  // requested (though possibly only partially). Now we recursively look at
3053  // the inserted value, passing any remaining indices.
3054  return FindInsertedValue(I->getInsertedValueOperand(),
3055  makeArrayRef(req_idx, idx_range.end()),
3056  InsertBefore);
3057  }
3058 
3059  if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3060  // If we're extracting a value from an aggregate that was extracted from
3061  // something else, we can extract from that something else directly instead.
3062  // However, we will need to chain I's indices with the requested indices.
3063 
3064  // Calculate the number of indices required
3065  unsigned size = I->getNumIndices() + idx_range.size();
3066  // Allocate some space to put the new indices in
3068  Idxs.reserve(size);
3069  // Add indices from the extract value instruction
3070  Idxs.append(I->idx_begin(), I->idx_end());
3071 
3072  // Add requested indices
3073  Idxs.append(idx_range.begin(), idx_range.end());
3074 
3075  assert(Idxs.size() == size
3076  && "Number of indices added not correct?");
3077 
3078  return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3079  }
3080  // Otherwise, we don't know (such as, extracting from a function return value
3081  // or load instruction)
3082  return nullptr;
3083 }
3084 
3085 /// Analyze the specified pointer to see if it can be expressed as a base
3086 /// pointer plus a constant offset. Return the base and offset to the caller.
3088  const DataLayout &DL) {
3089  unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
3090  APInt ByteOffset(BitWidth, 0);
3091 
3092  // We walk up the defs but use a visited set to handle unreachable code. In
3093  // that case, we stop after accumulating the cycle once (not that it
3094  // matters).
3095  SmallPtrSet<Value *, 16> Visited;
3096  while (Visited.insert(Ptr).second) {
3097  if (Ptr->getType()->isVectorTy())
3098  break;
3099 
3100  if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
3101  // If one of the values we have visited is an addrspacecast, then
3102  // the pointer type of this GEP may be different from the type
3103  // of the Ptr parameter which was passed to this function. This
3104  // means when we construct GEPOffset, we need to use the size
3105  // of GEP's pointer type rather than the size of the original
3106  // pointer type.
3107  APInt GEPOffset(DL.getPointerTypeSizeInBits(Ptr->getType()), 0);
3108  if (!GEP->accumulateConstantOffset(DL, GEPOffset))
3109  break;
3110 
3111  ByteOffset += GEPOffset.getSExtValue();
3112 
3113  Ptr = GEP->getPointerOperand();
3114  } else if (Operator::getOpcode(Ptr) == Instruction::BitCast ||
3115  Operator::getOpcode(Ptr) == Instruction::AddrSpaceCast) {
3116  Ptr = cast<Operator>(Ptr)->getOperand(0);
3117  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(Ptr)) {
3118  if (GA->isInterposable())
3119  break;
3120  Ptr = GA->getAliasee();
3121  } else {
3122  break;
3123  }
3124  }
3125  Offset = ByteOffset.getSExtValue();
3126  return Ptr;
3127 }
3128 
3130  unsigned CharSize) {
3131  // Make sure the GEP has exactly three arguments.
3132  if (GEP->getNumOperands() != 3)
3133  return false;
3134 
3135  // Make sure the index-ee is a pointer to array of \p CharSize integers.
3136  // CharSize.
3138  if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3139  return false;
3140 
3141  // Check to make sure that the first operand of the GEP is an integer and
3142  // has value 0 so that we are sure we're indexing into the initializer.
3143  const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3144  if (!FirstIdx || !FirstIdx->isZero())
3145  return false;
3146 
3147  return true;
3148 }
3149 
3151  ConstantDataArraySlice &Slice,
3152  unsigned ElementSize, uint64_t Offset) {
3153  assert(V);
3154 
3155  // Look through bitcast instructions and geps.
3156  V = V->stripPointerCasts();
3157 
3158  // If the value is a GEP instruction or constant expression, treat it as an
3159  // offset.
3160  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3161  // The GEP operator should be based on a pointer to string constant, and is
3162  // indexing into the string constant.
3163  if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3164  return false;
3165 
3166  // If the second index isn't a ConstantInt, then this is a variable index
3167  // into the array. If this occurs, we can't say anything meaningful about
3168  // the string.
3169  uint64_t StartIdx = 0;
3170  if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3171  StartIdx = CI->getZExtValue();
3172  else
3173  return false;
3174  return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3175  StartIdx + Offset);
3176  }
3177 
3178  // The GEP instruction, constant or instruction, must reference a global
3179  // variable that is a constant and is initialized. The referenced constant
3180  // initializer is the array that we'll use for optimization.
3181  const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3182  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3183  return false;
3184 
3185  const ConstantDataArray *Array;
3186  ArrayType *ArrayTy;
3187  if (GV->getInitializer()->isNullValue()) {
3188  Type *GVTy = GV->getValueType();
3189  if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3190  // A zeroinitializer for the array; there is no ConstantDataArray.
3191  Array = nullptr;
3192  } else {
3193  const DataLayout &DL = GV->getParent()->getDataLayout();
3194  uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3195  uint64_t Length = SizeInBytes / (ElementSize / 8);
3196  if (Length <= Offset)
3197  return false;
3198 
3199  Slice.Array = nullptr;
3200  Slice.Offset = 0;
3201  Slice.Length = Length - Offset;
3202  return true;
3203  }
3204  } else {
3205  // This must be a ConstantDataArray.
3206  Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3207  if (!Array)
3208  return false;
3209  ArrayTy = Array->getType();
3210  }
3211  if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3212  return false;
3213 
3214  uint64_t NumElts = ArrayTy->getArrayNumElements();
3215  if (Offset > NumElts)
3216  return false;
3217 
3218  Slice.Array = Array;
3219  Slice.Offset = Offset;
3220  Slice.Length = NumElts - Offset;
3221  return true;
3222 }
3223 
3224 /// This function computes the length of a null-terminated C string pointed to
3225 /// by V. If successful, it returns true and returns the string in Str.
3226 /// If unsuccessful, it returns false.
3228  uint64_t Offset, bool TrimAtNul) {
3229  ConstantDataArraySlice Slice;
3230  if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3231  return false;
3232 
3233  if (Slice.Array == nullptr) {
3234  if (TrimAtNul) {
3235  Str = StringRef();
3236  return true;
3237  }
3238  if (Slice.Length == 1) {
3239  Str = StringRef("", 1);
3240  return true;
3241  }
3242  // We cannot instantiate a StringRef as we do not have an appropriate string
3243  // of 0s at hand.
3244  return false;
3245  }
3246 
3247  // Start out with the entire array in the StringRef.
3248  Str = Slice.Array->getAsString();
3249  // Skip over 'offset' bytes.
3250  Str = Str.substr(Slice.Offset);
3251 
3252  if (TrimAtNul) {
3253  // Trim off the \0 and anything after it. If the array is not nul
3254  // terminated, we just return the whole end of string. The client may know
3255  // some other way that the string is length-bound.
3256  Str = Str.substr(0, Str.find('\0'));
3257  }
3258  return true;
3259 }
3260 
3261 // These next two are very similar to the above, but also look through PHI
3262 // nodes.
3263 // TODO: See if we can integrate these two together.
3264 
3265 /// If we can compute the length of the string pointed to by
3266 /// the specified pointer, return 'len+1'. If we can't, return 0.
3267 static uint64_t GetStringLengthH(const Value *V,
3269  unsigned CharSize) {
3270  // Look through noop bitcast instructions.
3271  V = V->stripPointerCasts();
3272 
3273  // If this is a PHI node, there are two cases: either we have already seen it
3274  // or we haven't.
3275  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3276  if (!PHIs.insert(PN).second)
3277  return ~0ULL; // already in the set.
3278 
3279  // If it was new, see if all the input strings are the same length.
3280  uint64_t LenSoFar = ~0ULL;
3281  for (Value *IncValue : PN->incoming_values()) {
3282  uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3283  if (Len == 0) return 0; // Unknown length -> unknown.
3284 
3285  if (Len == ~0ULL) continue;
3286 
3287  if (Len != LenSoFar && LenSoFar != ~0ULL)
3288  return 0; // Disagree -> unknown.
3289  LenSoFar = Len;
3290  }
3291 
3292  // Success, all agree.
3293  return LenSoFar;
3294  }
3295 
3296  // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3297  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3298  uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3299  if (Len1 == 0) return 0;
3300  uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3301  if (Len2 == 0) return 0;
3302  if (Len1 == ~0ULL) return Len2;
3303  if (Len2 == ~0ULL) return Len1;
3304  if (Len1 != Len2) return 0;
3305  return Len1;
3306  }
3307 
3308  // Otherwise, see if we can read the string.
3309  ConstantDataArraySlice Slice;
3310  if (!getConstantDataArrayInfo(V, Slice, CharSize))
3311  return 0;
3312 
3313  if (Slice.Array == nullptr)
3314  return 1;
3315 
3316  // Search for nul characters
3317  unsigned NullIndex = 0;
3318  for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3319  if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3320  break;
3321  }
3322 
3323  return NullIndex + 1;
3324 }
3325 
3326 /// If we can compute the length of the string pointed to by
3327 /// the specified pointer, return 'len+1'. If we can't, return 0.
3328 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3329  if (!V->getType()->isPointerTy()) return 0;
3330 
3332  uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3333  // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3334  // an empty string as a length.
3335  return Len == ~0ULL ? 1 : Len;
3336 }
3337 
3338 /// \brief \p PN defines a loop-variant pointer to an object. Check if the
3339 /// previous iteration of the loop was referring to the same object as \p PN.
3341  const LoopInfo *LI) {
3342  // Find the loop-defined value.
3343  Loop *L = LI->getLoopFor(PN->getParent());
3344  if (PN->getNumIncomingValues() != 2)
3345  return true;
3346 
3347  // Find the value from previous iteration.
3348  auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3349  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3350  PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3351  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3352  return true;
3353 
3354  // If a new pointer is loaded in the loop, the pointer references a different
3355  // object in every iteration. E.g.:
3356  // for (i)
3357  // int *p = a[i];
3358  // ...
3359  if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3360  if (!L->isLoopInvariant(Load->getPointerOperand()))
3361  return false;
3362  return true;
3363 }
3364 
3366  unsigned MaxLookup) {
3367  if (!V->getType()->isPointerTy())
3368  return V;
3369  for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3370  if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3371  V = GEP->getPointerOperand();
3372  } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3373  Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3374  V = cast<Operator>(V)->getOperand(0);
3375  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3376  if (GA->isInterposable())
3377  return V;
3378  V = GA->getAliasee();
3379  } else if (isa<AllocaInst>(V)) {
3380  // An alloca can't be further simplified.
3381  return V;
3382  } else {
3383  if (auto CS = CallSite(V))
3384  if (Value *RV = CS.getReturnedArgOperand()) {
3385  V = RV;
3386  continue;
3387  }
3388 
3389  // See if InstructionSimplify knows any relevant tricks.
3390  if (Instruction *I = dyn_cast<Instruction>(V))
3391  // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3392  if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3393  V = Simplified;
3394  continue;
3395  }
3396 
3397  return V;
3398  }
3399  assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3400  }
3401  return V;
3402 }
3403 
3405  const DataLayout &DL, LoopInfo *LI,
3406  unsigned MaxLookup) {
3407  SmallPtrSet<Value *, 4> Visited;
3408  SmallVector<Value *, 4> Worklist;
3409  Worklist.push_back(V);
3410  do {
3411  Value *P = Worklist.pop_back_val();
3412  P = GetUnderlyingObject(P, DL, MaxLookup);
3413 
3414  if (!Visited.insert(P).second)
3415  continue;
3416 
3417  if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
3418  Worklist.push_back(SI->getTrueValue());
3419  Worklist.push_back(SI->getFalseValue());
3420  continue;
3421  }
3422 
3423  if (PHINode *PN = dyn_cast<PHINode>(P)) {
3424  // If this PHI changes the underlying object in every iteration of the
3425  // loop, don't look through it. Consider:
3426  // int **A;
3427  // for (i) {
3428  // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3429  // Curr = A[i];
3430  // *Prev, *Curr;
3431  //
3432  // Prev is tracking Curr one iteration behind so they refer to different
3433  // underlying objects.
3434  if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3436  for (Value *IncValue : PN->incoming_values())
3437  Worklist.push_back(IncValue);
3438  continue;
3439  }
3440 
3441  Objects.push_back(P);
3442  } while (!Worklist.empty());
3443 }
3444 
3445 /// This is the function that does the work of looking through basic
3446 /// ptrtoint+arithmetic+inttoptr sequences.
3447 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3448  do {
3449  if (const Operator *U = dyn_cast<Operator>(V)) {
3450  // If we find a ptrtoint, we can transfer control back to the
3451  // regular getUnderlyingObjectFromInt.
3452  if (U->getOpcode() == Instruction::PtrToInt)
3453  return U->getOperand(0);
3454  // If we find an add of a constant, a multiplied value, or a phi, it's
3455  // likely that the other operand will lead us to the base
3456  // object. We don't have to worry about the case where the
3457  // object address is somehow being computed by the multiply,
3458  // because our callers only care when the result is an
3459  // identifiable object.
3460  if (U->getOpcode() != Instruction::Add ||
3461  (!isa<ConstantInt>(U->getOperand(1)) &&
3462  Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3463  !isa<PHINode>(U->getOperand(1))))
3464  return V;
3465  V = U->getOperand(0);
3466  } else {
3467  return V;
3468  }
3469  assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3470  } while (true);
3471 }
3472 
3473 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3474 /// ptrtoint+arithmetic+inttoptr sequences.
3475 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3477  SmallVectorImpl<Value *> &Objects,
3478  const DataLayout &DL) {
3480  SmallVector<const Value *, 4> Working(1, V);
3481  do {
3482  V = Working.pop_back_val();
3483 
3485  GetUnderlyingObjects(const_cast<Value *>(V), Objs, DL);
3486 
3487  for (Value *V : Objs) {
3488  if (!Visited.insert(V).second)
3489  continue;
3490  if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3491  const Value *O =
3492  getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3493  if (O->getType()->isPointerTy()) {
3494  Working.push_back(O);
3495  continue;
3496  }
3497  }
3498  // If GetUnderlyingObjects fails to find an identifiable object,
3499  // getUnderlyingObjectsForCodeGen also fails for safety.
3500  if (!isIdentifiedObject(V)) {
3501  Objects.clear();
3502  return false;
3503  }
3504  Objects.push_back(const_cast<Value *>(V));
3505  }
3506  } while (!Working.empty());
3507  return true;
3508 }
3509 
3510 /// Return true if the only users of this pointer are lifetime markers.
3512  for (const User *U : V->users()) {
3513  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3514  if (!II) return false;
3515 
3516  if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
3517  II->getIntrinsicID() != Intrinsic::lifetime_end)
3518  return false;
3519  }
3520  return true;
3521 }
3522 
3524  const Instruction *CtxI,
3525  const DominatorTree *DT) {
3526  const Operator *Inst = dyn_cast<Operator>(V);
3527  if (!Inst)
3528  return false;
3529 
3530  for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3531  if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3532  if (C->canTrap())
3533  return false;
3534 
3535  switch (Inst->getOpcode()) {
3536  default:
3537  return true;
3538  case Instruction::UDiv:
3539  case Instruction::URem: {
3540  // x / y is undefined if y == 0.
3541  const APInt *V;
3542  if (match(Inst->getOperand(1), m_APInt(V)))
3543  return *V != 0;
3544  return false;
3545  }
3546  case Instruction::SDiv:
3547  case Instruction::SRem: {
3548  // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3549  const APInt *Numerator, *Denominator;
3550  if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3551  return false;
3552  // We cannot hoist this division if the denominator is 0.
3553  if (*Denominator == 0)
3554  return false;
3555  // It's safe to hoist if the denominator is not 0 or -1.
3556  if (*Denominator != -1)
3557  return true;
3558  // At this point we know that the denominator is -1. It is safe to hoist as
3559  // long we know that the numerator is not INT_MIN.
3560  if (match(Inst->getOperand(0), m_APInt(Numerator)))
3561  return !Numerator->isMinSignedValue();
3562  // The numerator *might* be MinSignedValue.
3563  return false;
3564  }
3565  case Instruction::Load: {
3566  const LoadInst *LI = cast<LoadInst>(Inst);
3567  if (!LI->isUnordered() ||
3568  // Speculative load may create a race that did not exist in the source.
3569  LI->getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
3570  // Speculative load may load data from dirty regions.
3571  LI->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
3572  LI->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
3573  return false;
3574  const DataLayout &DL = LI->getModule()->getDataLayout();
3576  LI->getAlignment(), DL, CtxI, DT);
3577  }
3578  case Instruction::Call: {
3579  auto *CI = cast<const CallInst>(Inst);
3580  const Function *Callee = CI->getCalledFunction();
3581 
3582  // The called function could have undefined behavior or side-effects, even
3583  // if marked readnone nounwind.
3584  return Callee && Callee->isSpeculatable();
3585  }
3586  case Instruction::VAArg:
3587  case Instruction::Alloca:
3588  case Instruction::Invoke:
3589  case Instruction::PHI:
3590  case Instruction::Store:
3591  case Instruction::Ret:
3592  case Instruction::Br:
3593  case Instruction::IndirectBr:
3594  case Instruction::Switch:
3595  case Instruction::Unreachable:
3596  case Instruction::Fence:
3597  case Instruction::AtomicRMW:
3598  case Instruction::AtomicCmpXchg:
3599  case Instruction::LandingPad:
3600  case Instruction::Resume:
3601  case Instruction::CatchSwitch:
3602  case Instruction::CatchPad:
3603  case Instruction::CatchRet:
3604  case Instruction::CleanupPad:
3605  case Instruction::CleanupRet:
3606  return false; // Misc instructions which have effects
3607  }
3608 }
3609 
3612 }
3613 
3615  const Value *RHS,
3616  const DataLayout &DL,
3617  AssumptionCache *AC,
3618  const Instruction *CxtI,
3619  const DominatorTree *DT) {
3620  // Multiplying n * m significant bits yields a result of n + m significant
3621  // bits. If the total number of significant bits does not exceed the
3622  // result bit width (minus 1), there is no overflow.
3623  // This means if we have enough leading zero bits in the operands
3624  // we can guarantee that the result does not overflow.
3625  // Ref: "Hacker's Delight" by Henry Warren
3626  unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
3627  KnownBits LHSKnown(BitWidth);
3628  KnownBits RHSKnown(BitWidth);
3629  computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3630  computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
3631  // Note that underestimating the number of zero bits gives a more
3632  // conservative answer.
3633  unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
3634  RHSKnown.countMinLeadingZeros();
3635  // First handle the easy case: if we have enough zero bits there's
3636  // definitely no overflow.
3637  if (ZeroBits >= BitWidth)
3639 
3640  // Get the largest possible values for each operand.
3641  APInt LHSMax = ~LHSKnown.Zero;
3642  APInt RHSMax = ~RHSKnown.Zero;
3643 
3644  // We know the multiply operation doesn't overflow if the maximum values for
3645  // each operand will not overflow after we multiply them together.
3646  bool MaxOverflow;
3647  (void)LHSMax.umul_ov(RHSMax, MaxOverflow);
3648  if (!MaxOverflow)
3650 
3651  // We know it always overflows if multiplying the smallest possible values for
3652  // the operands also results in overflow.
3653  bool MinOverflow;
3654  (void)LHSKnown.One.umul_ov(RHSKnown.One, MinOverflow);
3655  if (MinOverflow)
3657 
3659 }
3660 
3662  const Value *RHS,
3663  const DataLayout &DL,
3664  AssumptionCache *AC,
3665  const Instruction *CxtI,
3666  const DominatorTree *DT) {
3667  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3668  if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
3669  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3670 
3671  if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
3672  // The sign bit is set in both cases: this MUST overflow.
3673  // Create a simple add instruction, and insert it into the struct.
3675  }
3676 
3677  if (LHSKnown.isNonNegative() && RHSKnown.isNonNegative()) {
3678  // The sign bit is clear in both cases: this CANNOT overflow.
3679  // Create a simple add instruction, and insert it into the struct.
3681  }
3682  }
3683 
3685 }
3686 
3687 /// \brief Return true if we can prove that adding the two values of the
3688 /// knownbits will not overflow.
3689 /// Otherwise return false.
3690 static bool checkRippleForSignedAdd(const KnownBits &LHSKnown,
3691  const KnownBits &RHSKnown) {
3692  // Addition of two 2's complement numbers having opposite signs will never
3693  // overflow.
3694  if ((LHSKnown.isNegative() && RHSKnown.isNonNegative()) ||
3695  (LHSKnown.isNonNegative() && RHSKnown.isNegative()))
3696  return true;
3697 
3698  // If either of the values is known to be non-negative, adding them can only
3699  // overflow if the second is also non-negative, so we can assume that.
3700  // Two non-negative numbers will only overflow if there is a carry to the
3701  // sign bit, so we can check if even when the values are as big as possible
3702  // there is no overflow to the sign bit.
3703  if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
3704  APInt MaxLHS = ~LHSKnown.Zero;
3705  MaxLHS.clearSignBit();
3706  APInt MaxRHS = ~RHSKnown.Zero;
3707  MaxRHS.clearSignBit();
3708  APInt Result = std::move(MaxLHS) + std::move(MaxRHS);
3709  return Result.isSignBitClear();
3710  }
3711 
3712  // If either of the values is known to be negative, adding them can only
3713  // overflow if the second is also negative, so we can assume that.
3714  // Two negative number will only overflow if there is no carry to the sign
3715  // bit, so we can check if even when the values are as small as possible
3716  // there is overflow to the sign bit.
3717  if (LHSKnown.isNegative() || RHSKnown.isNegative()) {
3718  APInt MinLHS = LHSKnown.One;
3719  MinLHS.clearSignBit();
3720  APInt MinRHS = RHSKnown.One;
3721  MinRHS.clearSignBit();
3722  APInt Result = std::move(MinLHS) + std::move(MinRHS);
3723  return Result.isSignBitSet();
3724  }
3725 
3726  // If we reached here it means that we know nothing about the sign bits.
3727  // In this case we can't know if there will be an overflow, since by
3728  // changing the sign bits any two values can be made to overflow.
3729  return false;
3730 }
3731 
3733  const Value *RHS,
3734  const AddOperator *Add,
3735  const DataLayout &DL,
3736  AssumptionCache *AC,
3737  const Instruction *CxtI,
3738  const DominatorTree *DT) {
3739  if (Add && Add->hasNoSignedWrap()) {
3741  }
3742 
3743  // If LHS and RHS each have at least two sign bits, the addition will look
3744  // like
3745  //
3746  // XX..... +
3747  // YY.....
3748  //
3749  // If the carry into the most significant position is 0, X and Y can't both
3750  // be 1 and therefore the carry out of the addition is also 0.
3751  //
3752  // If the carry into the most significant position is 1, X and Y can't both
3753  // be 0 and therefore the carry out of the addition is also 1.
3754  //
3755  // Since the carry into the most significant position is always equal to
3756  // the carry out of the addition, there is no signed overflow.
3757  if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
3758  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
3760 
3761  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
3762  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
3763 
3764  if (checkRippleForSignedAdd(LHSKnown, RHSKnown))
3766 
3767  // The remaining code needs Add to be available. Early returns if not so.
3768  if (!Add)
3770 
3771  // If the sign of Add is the same as at least one of the operands, this add
3772  // CANNOT overflow. This is particularly useful when the sum is
3773  // @llvm.assume'ed non-negative rather than proved so from analyzing its
3774  // operands.
3775  bool LHSOrRHSKnownNonNegative =
3776  (LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
3777  bool LHSOrRHSKnownNegative =
3778  (LHSKnown.isNegative() || RHSKnown.isNegative());
3779  if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
3780  KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
3781  if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
3782  (AddKnown.isNegative() && LHSOrRHSKnownNegative)) {
3784  }
3785  }
3786 
3788 }
3789 
3791  const DominatorTree &DT) {
3792 #ifndef NDEBUG
3793  auto IID = II->getIntrinsicID();
3794  assert((IID == Intrinsic::sadd_with_overflow ||
3795  IID == Intrinsic::uadd_with_overflow ||
3796  IID == Intrinsic::ssub_with_overflow ||
3797  IID == Intrinsic::usub_with_overflow ||
3798  IID == Intrinsic::smul_with_overflow ||
3799  IID == Intrinsic::umul_with_overflow) &&
3800  "Not an overflow intrinsic!");
3801 #endif
3802 
3803  SmallVector<const BranchInst *, 2> GuardingBranches;
3805 
3806  for (const User *U : II->users()) {
3807  if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
3808  assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
3809 
3810  if (EVI->getIndices()[0] == 0)
3811  Results.push_back(EVI);
3812  else {
3813  assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
3814 
3815  for (const auto *U : EVI->users())
3816  if (const auto *B = dyn_cast<BranchInst>(U)) {
3817  assert(B->isConditional() && "How else is it using an i1?");
3818  GuardingBranches.push_back(B);
3819  }
3820  }
3821  } else {
3822  // We are using the aggregate directly in a way we don't want to analyze
3823  // here (storing it to a global, say).
3824  return false;
3825  }
3826  }
3827 
3828  auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
3829  BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
3830  if (!NoWrapEdge.isSingleEdge())
3831  return false;
3832 
3833  // Check if all users of the add are provably no-wrap.
3834  for (const auto *Result : Results) {
3835  // If the extractvalue itself is not executed on overflow, the we don't
3836  // need to check each use separately, since domination is transitive.
3837  if (DT.dominates(NoWrapEdge, Result->getParent()))
3838  continue;
3839 
3840  for (auto &RU : Result->uses())
3841  if (!DT.dominates(NoWrapEdge, RU))
3842  return false;
3843  }
3844 
3845  return true;
3846  };
3847 
3848  return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
3849 }
3850 
3851 
3853  const DataLayout &DL,
3854  AssumptionCache *AC,
3855  const Instruction *CxtI,
3856  const DominatorTree *DT) {
3858  Add, DL, AC, CxtI, DT);
3859 }
3860 
3862  const Value *RHS,
3863  const DataLayout &DL,
3864  AssumptionCache *AC,
3865  const Instruction *CxtI,
3866  const DominatorTree *DT) {
3867  return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
3868 }
3869 
3871  // A memory operation returns normally if it isn't volatile. A volatile
3872  // operation is allowed to trap.
3873  //
3874  // An atomic operation isn't guaranteed to return in a reasonable amount of
3875  // time because it's possible for another thread to interfere with it for an
3876  // arbitrary length of time, but programs aren't allowed to rely on that.
3877  if (const LoadInst *LI = dyn_cast<LoadInst>(I))
3878  return !LI->isVolatile();
3879  if (const StoreInst *SI = dyn_cast<StoreInst>(I))
3880  return !SI->isVolatile();
3881  if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
3882  return !CXI->isVolatile();
3883  if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
3884  return !RMWI->isVolatile();
3885  if (const MemIntrinsic *MII = dyn_cast<MemIntrinsic>(I))
3886  return !MII->isVolatile();
3887 
3888  // If there is no successor, then execution can't transfer to it.
3889  if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
3890  return !CRI->unwindsToCaller();
3891  if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
3892  return !CatchSwitch->unwindsToCaller();
3893  if (isa<ResumeInst>(I))
3894  return false;
3895  if (isa<ReturnInst>(I))
3896  return false;
3897  if (isa<UnreachableInst>(I))
3898  return false;
3899 
3900  // Calls can throw, or contain an infinite loop, or kill the process.
3901  if (auto CS = ImmutableCallSite(I)) {
3902  // Call sites that throw have implicit non-local control flow.
3903  if (!CS.doesNotThrow())
3904  return false;
3905 
3906  // Non-throwing call sites can loop infinitely, call exit/pthread_exit
3907  // etc. and thus not return. However, LLVM already assumes that
3908  //
3909  // - Thread exiting actions are modeled as writes to memory invisible to
3910  // the program.
3911  //
3912  // - Loops that don't have side effects (side effects are volatile/atomic
3913  // stores and IO) always terminate (see http://llvm.org/PR965).
3914  // Furthermore IO itself is also modeled as writes to memory invisible to
3915  // the program.
3916  //
3917  // We rely on those assumptions here, and use the memory effects of the call
3918  // target as a proxy for checking that it always returns.
3919 
3920  // FIXME: This isn't aggressive enough; a call which only writes to a global
3921  // is guaranteed to return.
3922  return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory() ||
3923  match(I, m_Intrinsic<Intrinsic::assume>()) ||
3924  match(I, m_Intrinsic<Intrinsic::sideeffect>());
3925  }
3926 
3927  // Other instructions return normally.
3928  return true;
3929 }
3930 
3932  const Loop *L) {
3933  // The loop header is guaranteed to be executed for every iteration.
3934  //
3935  // FIXME: Relax this constraint to cover all basic blocks that are
3936  // guaranteed to be executed at every iteration.
3937  if (I->getParent() != L->getHeader()) return false;
3938 
3939  for (const Instruction &LI : *L->getHeader()) {
3940  if (&LI == I) return true;
3941  if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
3942  }
3943  llvm_unreachable("Instruction not contained in its own parent basic block.");
3944 }
3945 
3947  switch (I->getOpcode()) {
3948  case Instruction::Add:
3949  case Instruction::Sub:
3950  case Instruction::Xor:
3951  case Instruction::Trunc:
3952  case Instruction::BitCast:
3953  case Instruction::AddrSpaceCast:
3954  case Instruction::Mul:
3955  case Instruction::Shl:
3956  case Instruction::GetElementPtr:
3957  // These operations all propagate poison unconditionally. Note that poison
3958  // is not any particular value, so xor or subtraction of poison with
3959  // itself still yields poison, not zero.
3960  return true;
3961 
3962  case Instruction::AShr:
3963  case Instruction::SExt:
3964  // For these operations, one bit of the input is replicated across
3965  // multiple output bits. A replicated poison bit is still poison.
3966  return true;
3967 
3968  case Instruction::ICmp:
3969  // Comparing poison with any value yields poison. This is why, for
3970  // instance, x s< (x +nsw 1) can be folded to true.
3971  return true;
3972 
3973  default:
3974  return false;
3975  }
3976 }
3977 
3979  switch (I->getOpcode()) {
3980  case Instruction::Store:
3981  return cast<StoreInst>(I)->getPointerOperand();
3982 
3983  case Instruction::Load:
3984  return cast<LoadInst>(I)->getPointerOperand();
3985 
3986  case Instruction::AtomicCmpXchg:
3987  return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
3988 
3989  case Instruction::AtomicRMW:
3990  return cast<AtomicRMWInst>(I)->getPointerOperand();
3991 
3992  case Instruction::UDiv:
3993  case Instruction::SDiv:
3994  case Instruction::URem:
3995  case Instruction::SRem:
3996  return I->getOperand(1);
3997 
3998  default:
3999  return nullptr;
4000  }
4001 }
4002 
4004  // We currently only look for uses of poison values within the same basic
4005  // block, as that makes it easier to guarantee that the uses will be
4006  // executed given that PoisonI is executed.
4007  //
4008  // FIXME: Expand this to consider uses beyond the same basic block. To do
4009  // this, look out for the distinction between post-dominance and strong
4010  // post-dominance.
4011  const BasicBlock *BB = PoisonI->getParent();
4012 
4013  // Set of instructions that we have proved will yield poison if PoisonI
4014  // does.
4015  SmallSet<const Value *, 16> YieldsPoison;
4017  YieldsPoison.insert(PoisonI);
4018  Visited.insert(PoisonI->getParent());
4019 
4020  BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4021 
4022  unsigned Iter = 0;
4023  while (Iter++ < MaxDepth) {
4024  for (auto &I : make_range(Begin, End)) {
4025  if (&I != PoisonI) {
4026  const Value *NotPoison = getGuaranteedNonFullPoisonOp(&I);
4027  if (NotPoison != nullptr && YieldsPoison.count(NotPoison))
4028  return true;
4030  return false;
4031  }
4032 
4033  // Mark poison that propagates from I through uses of I.
4034  if (YieldsPoison.count(&I)) {
4035  for (const User *User : I.users()) {
4036  const Instruction *UserI = cast<Instruction>(User);
4037  if (propagatesFullPoison(UserI))
4038  YieldsPoison.insert(User);
4039  }
4040  }
4041  }
4042 
4043  if (auto *NextBB = BB->getSingleSuccessor()) {
4044  if (Visited.insert(NextBB).second) {
4045  BB = NextBB;
4046  Begin = BB->getFirstNonPHI()->getIterator();
4047  End = BB->end();
4048  continue;
4049  }
4050  }
4051 
4052  break;
4053  }
4054  return false;
4055 }
4056 
4057 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4058  if (FMF.noNaNs())
4059  return true;
4060 
4061  if (auto *C = dyn_cast<ConstantFP>(V))
4062  return !C->isNaN();
4063  return false;
4064 }
4065 
4066 static bool isKnownNonZero(const Value *V) {
4067  if (auto *C = dyn_cast<ConstantFP>(V))
4068  return !C->isZero();
4069  return false;
4070 }
4071 
4072 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4073 /// Given non-min/max outer cmp/select from the clamp pattern this
4074 /// function recognizes if it can be substitued by a "canonical" min/max
4075 /// pattern.
4077  Value *CmpLHS, Value *CmpRHS,
4078  Value *TrueVal, Value *FalseVal,
4079  Value *&LHS, Value *&RHS) {
4080  // Try to match
4081  // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4082  // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4083  // and return description of the outer Max/Min.
4084 
4085  // First, check if select has inverse order:
4086  if (CmpRHS == FalseVal) {
4087  std::swap(TrueVal, FalseVal);
4088  Pred = CmpInst::getInversePredicate(Pred);
4089  }
4090 
4091  // Assume success now. If there's no match, callers should not use these anyway.
4092  LHS = TrueVal;
4093  RHS = FalseVal;
4094 
4095  const APFloat *FC1;
4096  if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4097  return {SPF_UNKNOWN, SPNB_NA, false};
4098 
4099  const APFloat *FC2;
4100  switch (Pred) {
4101  case CmpInst::FCMP_OLT:
4102  case CmpInst::FCMP_OLE:
4103  case CmpInst::FCMP_ULT:
4104  case CmpInst::FCMP_ULE:
4105  if (match(FalseVal,
4106  m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4107  m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4108  FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4109  return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4110  break;
4111  case CmpInst::FCMP_OGT:
4112  case CmpInst::FCMP_OGE:
4113  case CmpInst::FCMP_UGT:
4114  case CmpInst::FCMP_UGE:
4115  if (match(FalseVal,
4116  m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4117  m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4118  FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4119  return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4120  break;
4121  default:
4122  break;
4123  }
4124 
4125  return {SPF_UNKNOWN, SPNB_NA, false};
4126 }
4127 
4128 /// Recognize variations of:
4129 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4131  Value *CmpLHS, Value *CmpRHS,
4132  Value *TrueVal, Value *FalseVal) {
4133  // Swap the select operands and predicate to match the patterns below.
4134  if (CmpRHS != TrueVal) {
4135  Pred = ICmpInst::getSwappedPredicate(Pred);
4136  std::swap(TrueVal, FalseVal);
4137  }
4138  const APInt *C1;
4139  if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4140  const APInt *C2;
4141  // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4142  if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4143  C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4144  return {SPF_SMAX, SPNB_NA, false};
4145 
4146  // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4147  if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4148  C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4149  return {SPF_SMIN, SPNB_NA, false};
4150 
4151  // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4152  if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4153  C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4154  return {SPF_UMAX, SPNB_NA, false};
4155 
4156  // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4157  if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4158  C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4159  return {SPF_UMIN, SPNB_NA, false};
4160  }
4161  return {SPF_UNKNOWN, SPNB_NA, false};
4162 }
4163 
4164 /// Recognize variations of:
4165 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4167  Value *CmpLHS, Value *CmpRHS,
4168  Value *TrueVal, Value *FalseVal) {
4169  // TODO: Allow FP min/max with nnan/nsz.
4170  assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4171 
4172  Value *A, *B;
4173  SelectPatternResult L = matchSelectPattern(TrueVal, A, B);
4175  return {SPF_UNKNOWN, SPNB_NA, false};
4176 
4177  Value *C, *D;
4178  SelectPatternResult R = matchSelectPattern(FalseVal, C, D);
4179  if (L.Flavor != R.Flavor)
4180  return {SPF_UNKNOWN, SPNB_NA, false};
4181 
4182  // We have something like: x Pred y ? min(a, b) : min(c, d).
4183  // Try to match the compare to the min/max operations of the select operands.
4184  // First, make sure we have the right compare predicate.
4185  switch (L.Flavor) {
4186  case SPF_SMIN:
4187  if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4188  Pred = ICmpInst::getSwappedPredicate(Pred);
4189  std::swap(CmpLHS, CmpRHS);
4190  }
4191  if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4192  break;
4193  return {SPF_UNKNOWN, SPNB_NA, false};
4194  case SPF_SMAX:
4195  if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4196  Pred = ICmpInst::getSwappedPredicate(Pred);
4197  std::swap(CmpLHS, CmpRHS);
4198  }
4199  if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4200  break;
4201  return {SPF_UNKNOWN, SPNB_NA, false};
4202  case SPF_UMIN:
4203  if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4204  Pred = ICmpInst::getSwappedPredicate(Pred);
4205  std::swap(CmpLHS, CmpRHS);
4206  }
4207  if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4208  break;
4209  return {SPF_UNKNOWN, SPNB_NA, false};
4210  case SPF_UMAX:
4211  if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4212  Pred = ICmpInst::getSwappedPredicate(Pred);
4213  std::swap(CmpLHS, CmpRHS);
4214  }
4215  if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4216  break;
4217  return {SPF_UNKNOWN, SPNB_NA, false};
4218  default:
4219  return {SPF_UNKNOWN, SPNB_NA, false};
4220  }
4221 
4222  // If there is a common operand in the already matched min/max and the other
4223  // min/max operands match the compare operands (either directly or inverted),
4224  // then this is min/max of the same flavor.
4225 
4226  // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4227  // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4228  if (D == B) {
4229  if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4230  match(A, m_Not(m_Specific(CmpRHS)))))
4231  return {L.Flavor, SPNB_NA, false};
4232  }
4233  // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4234  // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4235  if (C == B) {
4236  if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4237  match(A, m_Not(m_Specific(CmpRHS)))))
4238  return {L.Flavor, SPNB_NA, false};
4239  }
4240  // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4241  // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4242  if (D == A) {
4243  if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4244  match(B, m_Not(m_Specific(CmpRHS)))))
4245  return {L.Flavor, SPNB_NA, false};
4246  }
4247  // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4248  // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4249  if (C == A) {
4250  if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4251  match(B, m_Not(m_Specific(CmpRHS)))))
4252  return {L.Flavor, SPNB_NA, false};
4253  }
4254 
4255  return {SPF_UNKNOWN, SPNB_NA, false};
4256 }
4257 
4258 /// Match non-obvious integer minimum and maximum sequences.
4260  Value *CmpLHS, Value *CmpRHS,
4261  Value *TrueVal, Value *FalseVal,
4262  Value *&LHS, Value *&RHS) {
4263  // Assume success. If there's no match, callers should not use these anyway.
4264  LHS = TrueVal;
4265  RHS = FalseVal;
4266 
4267  SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4269  return SPR;
4270 
4271  SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4273  return SPR;
4274 
4275  if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4276  return {SPF_UNKNOWN, SPNB_NA, false};
4277 
4278  // Z = X -nsw Y
4279  // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4280  // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4281  if (match(TrueVal, m_Zero()) &&
4282  match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4283  return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4284 
4285  // Z = X -nsw Y
4286  // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4287  // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4288  if (match(FalseVal, m_Zero()) &&
4289  match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4290  return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4291 
4292  const APInt *C1;
4293  if (!match(CmpRHS, m_APInt(C1)))
4294  return {SPF_UNKNOWN, SPNB_NA, false};
4295 
4296  // An unsigned min/max can be written with a signed compare.
4297  const APInt *C2;
4298  if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4299  (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4300  // Is the sign bit set?
4301  // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4302  // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4303  if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4304  C2->isMaxSignedValue())
4305  return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4306 
4307  // Is the sign bit clear?
4308  // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4309  // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4310  if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4311  C2->isMinSignedValue())
4312  return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4313  }
4314 
4315  // Look through 'not' ops to find disguised signed min/max.
4316  // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4317  // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4318  if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4319  match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4320  return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4321 
4322  // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4323  // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4324  if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4325  match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4326  return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4327 
4328  return {SPF_UNKNOWN, SPNB_NA, false};
4329 }
4330 
4332  FastMathFlags FMF,
4333  Value *CmpLHS, Value *CmpRHS,
4334  Value *TrueVal, Value *FalseVal,
4335  Value *&LHS, Value *&RHS) {
4336  LHS = CmpLHS;
4337  RHS = CmpRHS;
4338 
4339  // Signed zero may return inconsistent results between implementations.
4340  // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4341  // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4342  // Therefore, we behave conservatively and only proceed if at least one of the
4343  // operands is known to not be zero or if we don't care about signed zero.
4344  switch (Pred) {
4345  default: break;
4346  // FIXME: Include OGT/OLT/UGT/ULT.
4349  if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4350  !isKnownNonZero(CmpRHS))
4351  return {SPF_UNKNOWN, SPNB_NA, false};
4352  }
4353 
4354  SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4355  bool Ordered = false;
4356 
4357  // When given one NaN and one non-NaN input:
4358  // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4359  // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4360  // ordered comparison fails), which could be NaN or non-NaN.
4361  // so here we discover exactly what NaN behavior is required/accepted.
4362  if (CmpInst::isFPPredicate(Pred)) {
4363  bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4364  bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4365 
4366  if (LHSSafe && RHSSafe) {
4367  // Both operands are known non-NaN.
4368  NaNBehavior = SPNB_RETURNS_ANY;
4369  } else if (CmpInst::isOrdered(Pred)) {
4370  // An ordered comparison will return false when given a NaN, so it
4371  // returns the RHS.
4372  Ordered = true;
4373  if (LHSSafe)
4374  // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4375  NaNBehavior = SPNB_RETURNS_NAN;
4376  else if (RHSSafe)
4377  NaNBehavior = SPNB_RETURNS_OTHER;
4378  else
4379  // Completely unsafe.
4380  return {SPF_UNKNOWN, SPNB_NA, false};
4381  } else {
4382  Ordered = false;
4383  // An unordered comparison will return true when given a NaN, so it
4384  // returns the LHS.
4385  if (LHSSafe)
4386  // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4387  NaNBehavior = SPNB_RETURNS_OTHER;
4388  else if (RHSSafe)
4389  NaNBehavior = SPNB_RETURNS_NAN;
4390  else
4391  // Completely unsafe.
4392  return {SPF_UNKNOWN, SPNB_NA, false};
4393  }
4394  }
4395 
4396  if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4397  std::swap(CmpLHS, CmpRHS);
4398  Pred = CmpInst::getSwappedPredicate(Pred);
4399  if (NaNBehavior == SPNB_RETURNS_NAN)
4400  NaNBehavior = SPNB_RETURNS_OTHER;
4401  else if (NaNBehavior == SPNB_RETURNS_OTHER)
4402  NaNBehavior = SPNB_RETURNS_NAN;
4403  Ordered = !Ordered;
4404  }
4405 
4406  // ([if]cmp X, Y) ? X : Y
4407  if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4408  switch (Pred) {
4409  default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4410  case ICmpInst::ICMP_UGT:
4411  case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4412  case ICmpInst::ICMP_SGT:
4413  case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4414  case ICmpInst::ICMP_ULT:
4415  case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4416  case ICmpInst::ICMP_SLT:
4417  case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4418  case FCmpInst::FCMP_UGT:
4419  case FCmpInst::FCMP_UGE:
4420  case FCmpInst::FCMP_OGT:
4421  case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4422  case FCmpInst::FCMP_ULT:
4423  case FCmpInst::FCMP_ULE:
4424  case FCmpInst::FCMP_OLT:
4425  case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4426  }
4427  }
4428 
4429  const APInt *C1;
4430  if (match(CmpRHS, m_APInt(C1))) {
4431  if ((CmpLHS == TrueVal && match(FalseVal, m_Neg(m_Specific(CmpLHS)))) ||
4432  (CmpLHS == FalseVal && match(TrueVal, m_Neg(m_Specific(CmpLHS))))) {
4433 
4434  // ABS(X) ==> (X >s 0) ? X : -X and (X >s -1) ? X : -X
4435  // NABS(X) ==> (X >s 0) ? -X : X and (X >s -1) ? -X : X
4436  if (Pred == ICmpInst::ICMP_SGT &&
4437  (C1->isNullValue() || C1->isAllOnesValue())) {
4438  return {(CmpLHS == TrueVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4439  }
4440 
4441  // ABS(X) ==> (X <s 0) ? -X : X and (X <s 1) ? -X : X
4442  // NABS(X) ==> (X <s 0) ? X : -X and (X <s 1) ? X : -X
4443  if (Pred == ICmpInst::ICMP_SLT &&
4444  (C1->isNullValue() || C1->isOneValue())) {
4445  return {(CmpLHS == FalseVal) ? SPF_ABS : SPF_NABS, SPNB_NA, false};
4446  }
4447  }
4448  }
4449 
4450  if (CmpInst::isIntPredicate(Pred))
4451  return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4452 
4453  // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4454  // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4455  // semantics than minNum. Be conservative in such case.
4456  if (NaNBehavior != SPNB_RETURNS_ANY ||
4457  (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4458  !isKnownNonZero(CmpRHS)))
4459  return {SPF_UNKNOWN, SPNB_NA, false};
4460 
4461  return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4462 }
4463 
4464 /// Helps to match a select pattern in case of a type mismatch.
4465 ///
4466 /// The function processes the case when type of true and false values of a
4467 /// select instruction differs from type of the cmp instruction operands because
4468 /// of a cast instructon. The function checks if it is legal to move the cast
4469 /// operation after "select". If yes, it returns the new second value of
4470 /// "select" (with the assumption that cast is moved):
4471 /// 1. As operand of cast instruction when both values of "select" are same cast
4472 /// instructions.
4473 /// 2. As restored constant (by applying reverse cast operation) when the first
4474 /// value of the "select" is a cast operation and the second value is a
4475 /// constant.
4476 /// NOTE: We return only the new second value because the first value could be
4477 /// accessed as operand of cast instruction.
4478 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4479  Instruction::CastOps *CastOp) {
4480  auto *Cast1 = dyn_cast<CastInst>(V1);
4481  if (!Cast1)
4482  return nullptr;
4483 
4484  *CastOp = Cast1->getOpcode();
4485  Type *SrcTy = Cast1->getSrcTy();
4486  if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4487  // If V1 and V2 are both the same cast from the same type, look through V1.
4488  if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4489  return Cast2->getOperand(0);
4490  return nullptr;
4491  }
4492 
4493  auto *C = dyn_cast<Constant>(V2);
4494  if (!C)
4495  return nullptr;
4496 
4497  Constant *CastedTo = nullptr;
4498  switch (*CastOp) {
4499  case Instruction::ZExt:
4500  if (CmpI->isUnsigned())
4501  CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4502  break;
4503  case Instruction::SExt:
4504  if (CmpI->isSigned())
4505  CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4506  break;
4507  case Instruction::Trunc:
4508  Constant *CmpConst;
4509  if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4510  CmpConst->getType() == SrcTy) {
4511  // Here we have the following case:
4512  //
4513  // %cond = cmp iN %x, CmpConst
4514  // %tr = trunc iN %x to iK
4515  // %narrowsel = select i1 %cond, iK %t, iK C
4516  //
4517  // We can always move trunc after select operation:
4518  //
4519  // %cond = cmp iN %x, CmpConst
4520  // %widesel = select i1 %cond, iN %x, iN CmpConst
4521  // %tr = trunc iN %widesel to iK
4522  //
4523  // Note that C could be extended in any way because we don't care about
4524  // upper bits after truncation. It can't be abs pattern, because it would
4525  // look like:
4526  //
4527  // select i1 %cond, x, -x.
4528  //
4529  // So only min/max pattern could be matched. Such match requires widened C
4530  // == CmpConst. That is why set widened C = CmpConst, condition trunc
4531  // CmpConst == C is checked below.
4532  CastedTo = CmpConst;
4533  } else {
4534  CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
4535  }
4536  break;
4537  case Instruction::FPTrunc:
4538  CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
4539  break;
4540  case Instruction::FPExt:
4541  CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
4542  break;
4543  case Instruction::FPToUI:
4544  CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
4545  break;
4546  case Instruction::FPToSI:
4547  CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
4548  break;
4549  case Instruction::UIToFP:
4550  CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
4551  break;
4552  case Instruction::SIToFP:
4553  CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
4554  break;
4555  default:
4556  break;
4557  }
4558 
4559  if (!CastedTo)
4560  return nullptr;
4561 
4562  // Make sure the cast doesn't lose any information.
4563  Constant *CastedBack =
4564  ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
4565  if (CastedBack != C)
4566  return nullptr;
4567 
4568  return CastedTo;
4569 }
4570 
4572  Instruction::CastOps *CastOp) {
4574  if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
4575 
4576  CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
4577  if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
4578 
4579  CmpInst::Predicate Pred = CmpI->getPredicate();
4580  Value *CmpLHS = CmpI->getOperand(0);
4581  Value *CmpRHS = CmpI->getOperand(1);
4582  Value *TrueVal = SI->getTrueValue();
4583  Value *FalseVal = SI->getFalseValue();
4584  FastMathFlags FMF;
4585  if (isa<FPMathOperator>(CmpI))
4586  FMF = CmpI->getFastMathFlags();
4587 
4588  // Bail out early.
4589  if (CmpI->isEquality())
4590  return {SPF_UNKNOWN, SPNB_NA, false};
4591 
4592  // Deal with type mismatches.
4593  if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
4594  if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
4595  // If this is a potential fmin/fmax with a cast to integer, then ignore
4596  // -0.0 because there is no corresponding integer value.
4597  if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4598  FMF.setNoSignedZeros();
4599  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4600  cast<CastInst>(TrueVal)->getOperand(0), C,
4601  LHS, RHS);
4602  }
4603  if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
4604  // If this is a potential fmin/fmax with a cast to integer, then ignore
4605  // -0.0 because there is no corresponding integer value.
4606  if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
4607  FMF.setNoSignedZeros();
4608  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
4609  C, cast<CastInst>(FalseVal)->getOperand(0),
4610  LHS, RHS);
4611  }
4612  }
4613  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
4614  LHS, RHS);
4615 }
4616 
4617 /// Return true if "icmp Pred LHS RHS" is always true.
4618 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
4619  const Value *RHS, const DataLayout &DL,
4620  unsigned Depth) {
4621  assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
4622  if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
4623  return true;
4624 
4625  switch (Pred) {
4626  default:
4627  return false;
4628 
4629  case CmpInst::ICMP_SLE: {
4630  const APInt *C;
4631 
4632  // LHS s<= LHS +_{nsw} C if C >= 0
4633  if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
4634  return !C->isNegative();
4635  return false;
4636  }
4637 
4638  case CmpInst::ICMP_ULE: {
4639  const APInt *C;
4640 
4641  // LHS u<= LHS +_{nuw} C for any C
4642  if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
4643  return true;
4644 
4645  // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
4646  auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
4647  const Value *&X,
4648  const APInt *&CA, const APInt *&CB) {
4649  if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
4650  match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
4651  return true;
4652 
4653  // If X & C == 0 then (X | C) == X +_{nuw} C
4654  if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
4655  match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
4656  KnownBits Known(CA->getBitWidth());
4657  computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
4658  /*CxtI*/ nullptr, /*DT*/ nullptr);
4659  if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
4660  return true;
4661  }
4662 
4663  return false;
4664  };
4665 
4666  const Value *X;
4667  const APInt *CLHS, *CRHS;
4668  if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
4669  return CLHS->ule(*CRHS);
4670 
4671  return false;
4672  }
4673  }
4674 }
4675 
4676 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
4677 /// ALHS ARHS" is true. Otherwise, return None.
4678 static Optional<bool>
4680  const Value *ARHS, const Value *BLHS, const Value *BRHS,
4681  const DataLayout &DL, unsigned Depth) {
4682  switch (Pred) {
4683  default:
4684  return None;
4685 
4686  case CmpInst::ICMP_SLT:
4687  case CmpInst::ICMP_SLE:
4688  if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
4689  isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
4690  return true;
4691  return None;
4692 
4693  case CmpInst::ICMP_ULT:
4694  case CmpInst::ICMP_ULE:
4695  if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
4696  isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
4697  return true;
4698  return None;
4699  }
4700 }
4701 
4702 /// Return true if the operands of the two compares match. IsSwappedOps is true
4703 /// when the operands match, but are swapped.
4704 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
4705  const Value *BLHS, const Value *BRHS,
4706  bool &IsSwappedOps) {
4707 
4708  bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
4709  IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
4710  return IsMatchingOps || IsSwappedOps;
4711 }
4712 
4713 /// Return true if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS BRHS" is
4714 /// true. Return false if "icmp1 APred ALHS ARHS" implies "icmp2 BPred BLHS
4715 /// BRHS" is false. Otherwise, return None if we can't infer anything.
4717  const Value *ALHS,
4718  const Value *ARHS,
4719  CmpInst::Predicate BPred,
4720  const Value *BLHS,
4721  const Value *BRHS,
4722  bool IsSwappedOps) {
4723  // Canonicalize the operands so they're matching.
4724  if (IsSwappedOps) {
4725  std::swap(BLHS, BRHS);
4726  BPred = ICmpInst::getSwappedPredicate(BPred);
4727  }
4728  if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
4729  return true;
4730  if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
4731  return false;
4732 
4733  return None;
4734 }
4735 
4736 /// Return true if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS C2" is
4737 /// true. Return false if "icmp1 APred ALHS C1" implies "icmp2 BPred BLHS
4738 /// C2" is false. Otherwise, return None if we can't infer anything.
4739 static Optional<bool>
4741  const ConstantInt *C1,
4742  CmpInst::Predicate BPred,
4743  const Value *BLHS, const ConstantInt *C2) {
4744  assert(ALHS == BLHS && "LHS operands must match.");
4745  ConstantRange DomCR =
4747  ConstantRange CR =
4749  ConstantRange Intersection = DomCR.intersectWith(CR);
4750  ConstantRange Difference = DomCR.difference(CR);
4751  if (Intersection.isEmptySet())
4752  return false;
4753  if (Difference.isEmptySet())
4754  return true;
4755  return None;
4756 }
4757 
4758 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
4759 /// false. Otherwise, return None if we can't infer anything.
4761  const ICmpInst *RHS,
4762  const DataLayout &DL, bool LHSIsTrue,
4763  unsigned Depth) {
4764  Value *ALHS = LHS->getOperand(0);
4765  Value *ARHS = LHS->getOperand(1);
4766  // The rest of the logic assumes the LHS condition is true. If that's not the
4767  // case, invert the predicate to make it so.
4768  ICmpInst::Predicate APred =
4769  LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
4770 
4771  Value *BLHS = RHS->getOperand(0);
4772  Value *BRHS = RHS->getOperand(1);
4773  ICmpInst::Predicate BPred = RHS->getPredicate();
4774 
4775  // Can we infer anything when the two compares have matching operands?
4776  bool IsSwappedOps;
4777  if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, IsSwappedOps)) {
4779  APred, ALHS, ARHS, BPred, BLHS, BRHS, IsSwappedOps))
4780  return Implication;
4781  // No amount of additional analysis will infer the second condition, so
4782  // early exit.
4783  return None;
4784  }
4785 
4786  // Can we infer anything when the LHS operands match and the RHS operands are
4787  // constants (not necessarily matching)?
4788  if (ALHS == BLHS && isa<ConstantInt>(ARHS) && isa<ConstantInt>(BRHS)) {
4790  APred, ALHS, cast<ConstantInt>(ARHS), BPred, BLHS,
4791  cast<ConstantInt>(BRHS)))
4792  return Implication;
4793  // No amount of additional analysis will infer the second condition, so
4794  // early exit.
4795  return None;
4796  }
4797 
4798  if (APred == BPred)
4799  return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
4800  return None;
4801 }
4802 
4803 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
4804 /// false. Otherwise, return None if we can't infer anything. We expect the
4805 /// RHS to be an icmp and the LHS to be an 'and' or an 'or' instruction.
4807  const ICmpInst *RHS,
4808  const DataLayout &DL, bool LHSIsTrue,
4809  unsigned Depth) {
4810  // The LHS must be an 'or' or an 'and' instruction.
4811  assert((LHS->getOpcode() == Instruction::And ||
4812  LHS->getOpcode() == Instruction::Or) &&
4813  "Expected LHS to be 'and' or 'or'.");
4814 
4815  assert(Depth <= MaxDepth && "Hit recursion limit");
4816 
4817  // If the result of an 'or' is false, then we know both legs of the 'or' are
4818  // false. Similarly, if the result of an 'and' is true, then we know both
4819  // legs of the 'and' are true.
4820  Value *ALHS, *ARHS;
4821  if ((!LHSIsTrue && match(LHS, m_Or(m_Value(ALHS), m_Value(ARHS)))) ||
4822  (LHSIsTrue && match(LHS, m_And(m_Value(ALHS), m_Value(ARHS))))) {
4823  // FIXME: Make this non-recursion.
4824  if (Optional<bool> Implication =
4825  isImpliedCondition(ALHS, RHS, DL, LHSIsTrue, Depth + 1))
4826  return Implication;
4827  if (Optional<bool> Implication =
4828  isImpliedCondition(ARHS, RHS, DL, LHSIsTrue, Depth + 1))
4829  return Implication;
4830  return None;
4831  }
4832  return None;
4833 }
4834 
4836  const DataLayout &DL, bool LHSIsTrue,
4837  unsigned Depth) {
4838  // Bail out when we hit the limit.
4839  if (Depth == MaxDepth)
4840  return None;
4841 
4842  // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
4843  // example.
4844  if (LHS->getType() != RHS->getType())
4845  return None;
4846 
4847  Type *OpTy = LHS->getType();
4848  assert(OpTy->isIntOrIntVectorTy(1) && "Expected integer type only!");
4849 
4850  // LHS ==> RHS by definition
4851  if (LHS == RHS)
4852  return LHSIsTrue;
4853 
4854  // FIXME: Extending the code below to handle vectors.
4855  if (OpTy->isVectorTy())
4856  return None;
4857 
4858  assert(OpTy->isIntegerTy(1) && "implied by above");
4859 
4860  // Both LHS and RHS are icmps.
4861  const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
4862  const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS);
4863  if (LHSCmp && RHSCmp)
4864  return isImpliedCondICmps(LHSCmp, RHSCmp, DL, LHSIsTrue, Depth);
4865 
4866  // The LHS should be an 'or' or an 'and' instruction. We expect the RHS to be
4867  // an icmp. FIXME: Add support for and/or on the RHS.
4868  const BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHS);
4869  if (LHSBO && RHSCmp) {
4870  if ((LHSBO->getOpcode() == Instruction::And ||
4871  LHSBO->getOpcode() == Instruction::Or))
4872  return isImpliedCondAndOr(LHSBO, RHSCmp, DL, LHSIsTrue, Depth);
4873  }
4874  return None;
4875 }
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
bool isFPPredicate() const
Definition: InstrTypes.h:944
const Value * getGuaranteedNonFullPoisonOp(const Instruction *I)
Return either nullptr or an operand of I such that I will trigger undefined behavior if I is executed...
static Constant * getFPTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1583
const NoneType None
Definition: None.h:24
static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q)
Return true if V2 == V1 + X, where X is known non-zero.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
Definition: PatternMatch.h:574
uint64_t CallInst * C
bool isSignBitClear() const
Determine if sign bit of this APInt is clear.
Definition: APInt.h:376
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
Definition: PatternMatch.h:758
bool isIntrinsic() const
isIntrinsic - Returns true if the function&#39;s name starts with "llvm.".
Definition: Function.h:180
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:67
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
Definition: PatternMatch.h:645
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:109
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given value is known to have exactly one bit set when defined. ...
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:72
void setSignBit()
Set the sign bit to 1.
Definition: APInt.h:1392
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:843
static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, const Query &Q)
Return true if the given value is known to have exactly one bit set when defined. ...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
bool hasLocalLinkage() const
Definition: GlobalValue.h:428
static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Query &Q)
Determine which bits of V are known to be either zero or one and return them in the Known bit set...
This instruction extracts a struct member or array element value from an aggregate value...
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1542
static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, const Query &Q)
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
This class represents an incoming formal argument to a Function.
Definition: Argument.h:30
bool noNaNs() const
Definition: Operator.h:200
Value * getAggregateOperand()
void setNoSignedZeros()
Definition: Operator.h:213
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal)
Recognize variations of: a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
Unsigned minimum.
static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, const Query &Q)
Return true if &#39;V & Mask&#39; is known to be zero.
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
BinaryOps getOpcode() const
Definition: InstrTypes.h:523
Value * isBytewiseValue(Value *V)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:314
BinaryOp_match< LHS, RHS, Instruction::FAdd > m_FAdd(const LHS &L, const RHS &R)
Definition: PatternMatch.h:508
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:262
static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q)
Return true if it is known that V1 != V2.
iterator begin() const
Definition: ArrayRef.h:137
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
Definition: Instructions.h:514
match_zero m_Zero()
Match an arbitrary zero/null constant.
Definition: PatternMatch.h:145
bool hasConflict() const
Returns true if there is conflicting information.
Definition: KnownBits.h:47
static const Value * getUnderlyingObjectFromInt(const Value *V)
This is the function that does the work of looking through basic ptrtoint+arithmetic+inttoptr sequenc...
This class represents zero extension of integer types.
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:313
This provides a very simple, boring adaptor for a begin and end iterator into a range type...
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
Definition: PatternMatch.h:526
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.h:1183
This class represents a function call, abstracting a target machine&#39;s calling convention.
This file contains the declarations for metadata subclasses.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:91
static uint64_t round(uint64_t Acc, uint64_t Input)
Definition: xxhash.cpp:57
gep_type_iterator gep_type_end(const User *GEP)
const Value * getTrueValue() const
unsigned less or equal
Definition: InstrTypes.h:879
unsigned less than
Definition: InstrTypes.h:878
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition: APInt.h:1308
unsigned getPointerAddressSpace() const
Method to return the address space of the pointer operand.
Definition: Operator.h:468
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLExtras.h:89
A cache of .assume calls within a function.
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:859
Function Alias Analysis Results
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:738
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:270
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate, true > m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
bool sgt(const APInt &RHS) const
Signed greather than comparison.
Definition: APInt.h:1253
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:814
void setAllBits()
Set every bit to 1.
Definition: APInt.h:1369
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic, I, at the point in the control-flow identified by the context instruction, CxtI.
Metadata node.
Definition: Metadata.h:862
F(f)
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1067
An instruction for reading from memory.
Definition: Instructions.h:164
MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > m_UnordFMax(const LHS &L, const RHS &R)
Match an &#39;unordered&#39; floating point maximum function.
static IntegerType * getInt64Ty(LLVMContext &C)
Definition: Type.cpp:177
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:883
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
Definition: Instructions.h:677
Hexagon Common GEP
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
Definition: APInt.h:1416
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:227
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize=8)
Returns true if the GEP is based on a pointer to a string (array of.
void reserve(size_type N)
Definition: SmallVector.h:378
uint64_t Offset
Slice starts at this Offset.
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0&#39;s from the most significant bit to the least stopping at the first 1...
Definition: MathExtras.h:181
void setAllZero()
Make all bits known to be zero and discard any previous information.
Definition: KnownBits.h:84
static void computeKnownBitsFromShiftOperator(const Operator *I, KnownBits &Known, KnownBits &Known2, unsigned Depth, const Query &Q, function_ref< APInt(const APInt &, unsigned)> KZF, function_ref< APInt(const APInt &, unsigned)> KOF)
Compute known bits from a shift operator, including those with a non-constant shift amount...
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition: APInt.h:528
static OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS, const AddOperator *Add, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT)
bool propagatesFullPoison(const Instruction *I)
Return true if this function can prove that I is guaranteed to yield full-poison (all bits poison) if...
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1488
Signed maximum.
unsigned getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
Definition: Value.cpp:683
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:206
Intrinsic::ID getIntrinsicForCallSite(ImmutableCallSite ICS, const TargetLibraryInfo *TLI)
Map a call instruction to an intrinsic ID.
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: Know