LLVM  10.0.0svn
ValueTracking.cpp
Go to the documentation of this file.
1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
30 #include "llvm/Analysis/Loads.h"
31 #include "llvm/Analysis/LoopInfo.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CallSite.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/ConstantRange.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GlobalAlias.h"
47 #include "llvm/IR/GlobalValue.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Operator.h"
58 #include "llvm/IR/PatternMatch.h"
59 #include "llvm/IR/Type.h"
60 #include "llvm/IR/User.h"
61 #include "llvm/IR/Value.h"
62 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/KnownBits.h"
68 #include <algorithm>
69 #include <array>
70 #include <cassert>
71 #include <cstdint>
72 #include <iterator>
73 #include <utility>
74 
75 using namespace llvm;
76 using namespace llvm::PatternMatch;
77 
78 const unsigned MaxDepth = 6;
79 
80 // Controls the number of uses of the value searched for possible
81 // dominating comparisons.
82 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
83  cl::Hidden, cl::init(20));
84 
85 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
86 /// returns the element type's bitwidth.
87 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
88  if (unsigned BitWidth = Ty->getScalarSizeInBits())
89  return BitWidth;
90 
91  return DL.getIndexTypeSizeInBits(Ty);
92 }
93 
94 namespace {
95 
96 // Simplifying using an assume can only be done in a particular control-flow
97 // context (the context instruction provides that context). If an assume and
98 // the context instruction are not in the same block then the DT helps in
99 // figuring out if we can use it.
100 struct Query {
101  const DataLayout &DL;
102  AssumptionCache *AC;
103  const Instruction *CxtI;
104  const DominatorTree *DT;
105 
106  // Unlike the other analyses, this may be a nullptr because not all clients
107  // provide it currently.
109 
110  /// Set of assumptions that should be excluded from further queries.
111  /// This is because of the potential for mutual recursion to cause
112  /// computeKnownBits to repeatedly visit the same assume intrinsic. The
113  /// classic case of this is assume(x = y), which will attempt to determine
114  /// bits in x from bits in y, which will attempt to determine bits in y from
115  /// bits in x, etc. Regarding the mutual recursion, computeKnownBits can call
116  /// isKnownNonZero, which calls computeKnownBits and isKnownToBeAPowerOfTwo
117  /// (all of which can call computeKnownBits), and so on.
118  std::array<const Value *, MaxDepth> Excluded;
119 
120  /// If true, it is safe to use metadata during simplification.
121  InstrInfoQuery IIQ;
122 
123  unsigned NumExcluded = 0;
124 
125  Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
126  const DominatorTree *DT, bool UseInstrInfo,
127  OptimizationRemarkEmitter *ORE = nullptr)
128  : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
129 
130  Query(const Query &Q, const Value *NewExcl)
131  : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
132  NumExcluded(Q.NumExcluded) {
133  Excluded = Q.Excluded;
134  Excluded[NumExcluded++] = NewExcl;
135  assert(NumExcluded <= Excluded.size());
136  }
137 
138  bool isExcluded(const Value *Value) const {
139  if (NumExcluded == 0)
140  return false;
141  auto End = Excluded.begin() + NumExcluded;
142  return std::find(Excluded.begin(), End, Value) != End;
143  }
144 };
145 
146 } // end anonymous namespace
147 
148 // Given the provided Value and, potentially, a context instruction, return
149 // the preferred context instruction (if any).
150 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
151  // If we've been provided with a context instruction, then use that (provided
152  // it has been inserted).
153  if (CxtI && CxtI->getParent())
154  return CxtI;
155 
156  // If the value is really an already-inserted instruction, then use that.
157  CxtI = dyn_cast<Instruction>(V);
158  if (CxtI && CxtI->getParent())
159  return CxtI;
160 
161  return nullptr;
162 }
163 
164 static void computeKnownBits(const Value *V, KnownBits &Known,
165  unsigned Depth, const Query &Q);
166 
167 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
168  const DataLayout &DL, unsigned Depth,
169  AssumptionCache *AC, const Instruction *CxtI,
170  const DominatorTree *DT,
171  OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
172  ::computeKnownBits(V, Known, Depth,
173  Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
174 }
175 
176 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
177  const Query &Q);
178 
180  unsigned Depth, AssumptionCache *AC,
181  const Instruction *CxtI,
182  const DominatorTree *DT,
184  bool UseInstrInfo) {
186  V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
187 }
188 
189 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
190  const DataLayout &DL, AssumptionCache *AC,
191  const Instruction *CxtI, const DominatorTree *DT,
192  bool UseInstrInfo) {
193  assert(LHS->getType() == RHS->getType() &&
194  "LHS and RHS should have the same type");
195  assert(LHS->getType()->isIntOrIntVectorTy() &&
196  "LHS and RHS should be integers");
197  // Look for an inverted mask: (X & ~M) op (Y & M).
198  Value *M;
199  if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
200  match(RHS, m_c_And(m_Specific(M), m_Value())))
201  return true;
202  if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
203  match(LHS, m_c_And(m_Specific(M), m_Value())))
204  return true;
205  IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
206  KnownBits LHSKnown(IT->getBitWidth());
207  KnownBits RHSKnown(IT->getBitWidth());
208  computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
209  computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
210  return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
211 }
212 
214  for (const User *U : CxtI->users()) {
215  if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
216  if (IC->isEquality())
217  if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
218  if (C->isNullValue())
219  continue;
220  return false;
221  }
222  return true;
223 }
224 
225 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
226  const Query &Q);
227 
229  bool OrZero, unsigned Depth,
230  AssumptionCache *AC, const Instruction *CxtI,
231  const DominatorTree *DT, bool UseInstrInfo) {
233  V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
234 }
235 
236 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
237 
238 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
239  AssumptionCache *AC, const Instruction *CxtI,
240  const DominatorTree *DT, bool UseInstrInfo) {
241  return ::isKnownNonZero(V, Depth,
242  Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
243 }
244 
245 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
246  unsigned Depth, AssumptionCache *AC,
247  const Instruction *CxtI, const DominatorTree *DT,
248  bool UseInstrInfo) {
249  KnownBits Known =
250  computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
251  return Known.isNonNegative();
252 }
253 
254 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
255  AssumptionCache *AC, const Instruction *CxtI,
256  const DominatorTree *DT, bool UseInstrInfo) {
257  if (auto *CI = dyn_cast<ConstantInt>(V))
258  return CI->getValue().isStrictlyPositive();
259 
260  // TODO: We'd doing two recursive queries here. We should factor this such
261  // that only a single query is needed.
262  return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
263  isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
264 }
265 
266 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
267  AssumptionCache *AC, const Instruction *CxtI,
268  const DominatorTree *DT, bool UseInstrInfo) {
269  KnownBits Known =
270  computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
271  return Known.isNegative();
272 }
273 
274 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
275 
276 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
277  const DataLayout &DL, AssumptionCache *AC,
278  const Instruction *CxtI, const DominatorTree *DT,
279  bool UseInstrInfo) {
281  Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
282  UseInstrInfo, /*ORE=*/nullptr));
283 }
284 
285 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
286  const Query &Q);
287 
288 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
289  const DataLayout &DL, unsigned Depth,
290  AssumptionCache *AC, const Instruction *CxtI,
291  const DominatorTree *DT, bool UseInstrInfo) {
293  V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
294 }
295 
296 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
297  const Query &Q);
298 
299 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
300  unsigned Depth, AssumptionCache *AC,
301  const Instruction *CxtI,
302  const DominatorTree *DT, bool UseInstrInfo) {
304  V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
305 }
306 
307 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
308  bool NSW,
309  KnownBits &KnownOut, KnownBits &Known2,
310  unsigned Depth, const Query &Q) {
311  unsigned BitWidth = KnownOut.getBitWidth();
312 
313  // If an initial sequence of bits in the result is not needed, the
314  // corresponding bits in the operands are not needed.
315  KnownBits LHSKnown(BitWidth);
316  computeKnownBits(Op0, LHSKnown, Depth + 1, Q);
317  computeKnownBits(Op1, Known2, Depth + 1, Q);
318 
319  KnownOut = KnownBits::computeForAddSub(Add, NSW, LHSKnown, Known2);
320 }
321 
322 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
323  KnownBits &Known, KnownBits &Known2,
324  unsigned Depth, const Query &Q) {
325  unsigned BitWidth = Known.getBitWidth();
326  computeKnownBits(Op1, Known, Depth + 1, Q);
327  computeKnownBits(Op0, Known2, Depth + 1, Q);
328 
329  bool isKnownNegative = false;
330  bool isKnownNonNegative = false;
331  // If the multiplication is known not to overflow, compute the sign bit.
332  if (NSW) {
333  if (Op0 == Op1) {
334  // The product of a number with itself is non-negative.
335  isKnownNonNegative = true;
336  } else {
337  bool isKnownNonNegativeOp1 = Known.isNonNegative();
338  bool isKnownNonNegativeOp0 = Known2.isNonNegative();
339  bool isKnownNegativeOp1 = Known.isNegative();
340  bool isKnownNegativeOp0 = Known2.isNegative();
341  // The product of two numbers with the same sign is non-negative.
342  isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
343  (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
344  // The product of a negative number and a non-negative number is either
345  // negative or zero.
346  if (!isKnownNonNegative)
347  isKnownNegative = (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
348  isKnownNonZero(Op0, Depth, Q)) ||
349  (isKnownNegativeOp0 && isKnownNonNegativeOp1 &&
350  isKnownNonZero(Op1, Depth, Q));
351  }
352  }
353 
354  assert(!Known.hasConflict() && !Known2.hasConflict());
355  // Compute a conservative estimate for high known-0 bits.
356  unsigned LeadZ = std::max(Known.countMinLeadingZeros() +
357  Known2.countMinLeadingZeros(),
358  BitWidth) - BitWidth;
359  LeadZ = std::min(LeadZ, BitWidth);
360 
361  // The result of the bottom bits of an integer multiply can be
362  // inferred by looking at the bottom bits of both operands and
363  // multiplying them together.
364  // We can infer at least the minimum number of known trailing bits
365  // of both operands. Depending on number of trailing zeros, we can
366  // infer more bits, because (a*b) <=> ((a/m) * (b/n)) * (m*n) assuming
367  // a and b are divisible by m and n respectively.
368  // We then calculate how many of those bits are inferrable and set
369  // the output. For example, the i8 mul:
370  // a = XXXX1100 (12)
371  // b = XXXX1110 (14)
372  // We know the bottom 3 bits are zero since the first can be divided by
373  // 4 and the second by 2, thus having ((12/4) * (14/2)) * (2*4).
374  // Applying the multiplication to the trimmed arguments gets:
375  // XX11 (3)
376  // X111 (7)
377  // -------
378  // XX11
379  // XX11
380  // XX11
381  // XX11
382  // -------
383  // XXXXX01
384  // Which allows us to infer the 2 LSBs. Since we're multiplying the result
385  // by 8, the bottom 3 bits will be 0, so we can infer a total of 5 bits.
386  // The proof for this can be described as:
387  // Pre: (C1 >= 0) && (C1 < (1 << C5)) && (C2 >= 0) && (C2 < (1 << C6)) &&
388  // (C7 == (1 << (umin(countTrailingZeros(C1), C5) +
389  // umin(countTrailingZeros(C2), C6) +
390  // umin(C5 - umin(countTrailingZeros(C1), C5),
391  // C6 - umin(countTrailingZeros(C2), C6)))) - 1)
392  // %aa = shl i8 %a, C5
393  // %bb = shl i8 %b, C6
394  // %aaa = or i8 %aa, C1
395  // %bbb = or i8 %bb, C2
396  // %mul = mul i8 %aaa, %bbb
397  // %mask = and i8 %mul, C7
398  // =>
399  // %mask = i8 ((C1*C2)&C7)
400  // Where C5, C6 describe the known bits of %a, %b
401  // C1, C2 describe the known bottom bits of %a, %b.
402  // C7 describes the mask of the known bits of the result.
403  APInt Bottom0 = Known.One;
404  APInt Bottom1 = Known2.One;
405 
406  // How many times we'd be able to divide each argument by 2 (shr by 1).
407  // This gives us the number of trailing zeros on the multiplication result.
408  unsigned TrailBitsKnown0 = (Known.Zero | Known.One).countTrailingOnes();
409  unsigned TrailBitsKnown1 = (Known2.Zero | Known2.One).countTrailingOnes();
410  unsigned TrailZero0 = Known.countMinTrailingZeros();
411  unsigned TrailZero1 = Known2.countMinTrailingZeros();
412  unsigned TrailZ = TrailZero0 + TrailZero1;
413 
414  // Figure out the fewest known-bits operand.
415  unsigned SmallestOperand = std::min(TrailBitsKnown0 - TrailZero0,
416  TrailBitsKnown1 - TrailZero1);
417  unsigned ResultBitsKnown = std::min(SmallestOperand + TrailZ, BitWidth);
418 
419  APInt BottomKnown = Bottom0.getLoBits(TrailBitsKnown0) *
420  Bottom1.getLoBits(TrailBitsKnown1);
421 
422  Known.resetAll();
423  Known.Zero.setHighBits(LeadZ);
424  Known.Zero |= (~BottomKnown).getLoBits(ResultBitsKnown);
425  Known.One |= BottomKnown.getLoBits(ResultBitsKnown);
426 
427  // Only make use of no-wrap flags if we failed to compute the sign bit
428  // directly. This matters if the multiplication always overflows, in
429  // which case we prefer to follow the result of the direct computation,
430  // though as the program is invoking undefined behaviour we can choose
431  // whatever we like here.
432  if (isKnownNonNegative && !Known.isNegative())
433  Known.makeNonNegative();
434  else if (isKnownNegative && !Known.isNonNegative())
435  Known.makeNegative();
436 }
437 
439  KnownBits &Known) {
440  unsigned BitWidth = Known.getBitWidth();
441  unsigned NumRanges = Ranges.getNumOperands() / 2;
442  assert(NumRanges >= 1);
443 
444  Known.Zero.setAllBits();
445  Known.One.setAllBits();
446 
447  for (unsigned i = 0; i < NumRanges; ++i) {
448  ConstantInt *Lower =
449  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
450  ConstantInt *Upper =
451  mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
452  ConstantRange Range(Lower->getValue(), Upper->getValue());
453 
454  // The first CommonPrefixBits of all values in Range are equal.
455  unsigned CommonPrefixBits =
456  (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
457 
458  APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
459  Known.One &= Range.getUnsignedMax() & Mask;
460  Known.Zero &= ~Range.getUnsignedMax() & Mask;
461  }
462 }
463 
464 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
465  SmallVector<const Value *, 16> WorkSet(1, I);
468 
469  // The instruction defining an assumption's condition itself is always
470  // considered ephemeral to that assumption (even if it has other
471  // non-ephemeral users). See r246696's test case for an example.
472  if (is_contained(I->operands(), E))
473  return true;
474 
475  while (!WorkSet.empty()) {
476  const Value *V = WorkSet.pop_back_val();
477  if (!Visited.insert(V).second)
478  continue;
479 
480  // If all uses of this value are ephemeral, then so is this value.
481  if (llvm::all_of(V->users(), [&](const User *U) {
482  return EphValues.count(U);
483  })) {
484  if (V == E)
485  return true;
486 
487  if (V == I || isSafeToSpeculativelyExecute(V)) {
488  EphValues.insert(V);
489  if (const User *U = dyn_cast<User>(V))
490  for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
491  J != JE; ++J)
492  WorkSet.push_back(*J);
493  }
494  }
495  }
496 
497  return false;
498 }
499 
500 // Is this an intrinsic that cannot be speculated but also cannot trap?
502  if (const CallInst *CI = dyn_cast<CallInst>(I))
503  if (Function *F = CI->getCalledFunction())
504  switch (F->getIntrinsicID()) {
505  default: break;
506  // FIXME: This list is repeated from NoTTI::getIntrinsicCost.
507  case Intrinsic::assume:
508  case Intrinsic::sideeffect:
509  case Intrinsic::dbg_declare:
510  case Intrinsic::dbg_value:
511  case Intrinsic::dbg_label:
512  case Intrinsic::invariant_start:
513  case Intrinsic::invariant_end:
514  case Intrinsic::lifetime_start:
515  case Intrinsic::lifetime_end:
516  case Intrinsic::objectsize:
517  case Intrinsic::ptr_annotation:
518  case Intrinsic::var_annotation:
519  return true;
520  }
521 
522  return false;
523 }
524 
526  const Instruction *CxtI,
527  const DominatorTree *DT) {
528  // There are two restrictions on the use of an assume:
529  // 1. The assume must dominate the context (or the control flow must
530  // reach the assume whenever it reaches the context).
531  // 2. The context must not be in the assume's set of ephemeral values
532  // (otherwise we will use the assume to prove that the condition
533  // feeding the assume is trivially true, thus causing the removal of
534  // the assume).
535 
536  if (DT) {
537  if (DT->dominates(Inv, CxtI))
538  return true;
539  } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
540  // We don't have a DT, but this trivially dominates.
541  return true;
542  }
543 
544  // With or without a DT, the only remaining case we will check is if the
545  // instructions are in the same BB. Give up if that is not the case.
546  if (Inv->getParent() != CxtI->getParent())
547  return false;
548 
549  // If we have a dom tree, then we now know that the assume doesn't dominate
550  // the other instruction. If we don't have a dom tree then we can check if
551  // the assume is first in the BB.
552  if (!DT) {
553  // Search forward from the assume until we reach the context (or the end
554  // of the block); the common case is that the assume will come first.
555  for (auto I = std::next(BasicBlock::const_iterator(Inv)),
556  IE = Inv->getParent()->end(); I != IE; ++I)
557  if (&*I == CxtI)
558  return true;
559  }
560 
561  // Don't let an assume affect itself - this would cause the problems
562  // `isEphemeralValueOf` is trying to prevent, and it would also make
563  // the loop below go out of bounds.
564  if (Inv == CxtI)
565  return false;
566 
567  // The context comes first, but they're both in the same block. Make sure
568  // there is nothing in between that might interrupt the control flow.
570  std::next(BasicBlock::const_iterator(CxtI)), IE(Inv);
571  I != IE; ++I)
573  return false;
574 
575  return !isEphemeralValueOf(Inv, CxtI);
576 }
577 
578 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
579  unsigned Depth, const Query &Q) {
580  // Use of assumptions is context-sensitive. If we don't have a context, we
581  // cannot use them!
582  if (!Q.AC || !Q.CxtI)
583  return;
584 
585  unsigned BitWidth = Known.getBitWidth();
586 
587  // Note that the patterns below need to be kept in sync with the code
588  // in AssumptionCache::updateAffectedValues.
589 
590  for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
591  if (!AssumeVH)
592  continue;
593  CallInst *I = cast<CallInst>(AssumeVH);
594  assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
595  "Got assumption for the wrong function!");
596  if (Q.isExcluded(I))
597  continue;
598 
599  // Warning: This loop can end up being somewhat performance sensitive.
600  // We're running this loop for once for each value queried resulting in a
601  // runtime of ~O(#assumes * #values).
602 
603  assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
604  "must be an assume intrinsic");
605 
606  Value *Arg = I->getArgOperand(0);
607 
608  if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
609  assert(BitWidth == 1 && "assume operand is not i1?");
610  Known.setAllOnes();
611  return;
612  }
613  if (match(Arg, m_Not(m_Specific(V))) &&
614  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
615  assert(BitWidth == 1 && "assume operand is not i1?");
616  Known.setAllZero();
617  return;
618  }
619 
620  // The remaining tests are all recursive, so bail out if we hit the limit.
621  if (Depth == MaxDepth)
622  continue;
623 
624  ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
625  if (!Cmp)
626  continue;
627 
628  Value *A, *B;
629  auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
630 
631  CmpInst::Predicate Pred;
632  uint64_t C;
633  switch (Cmp->getPredicate()) {
634  default:
635  break;
636  case ICmpInst::ICMP_EQ:
637  // assume(v = a)
638  if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
639  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
640  KnownBits RHSKnown(BitWidth);
641  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
642  Known.Zero |= RHSKnown.Zero;
643  Known.One |= RHSKnown.One;
644  // assume(v & b = a)
645  } else if (match(Cmp,
646  m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
647  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
648  KnownBits RHSKnown(BitWidth);
649  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
650  KnownBits MaskKnown(BitWidth);
651  computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
652 
653  // For those bits in the mask that are known to be one, we can propagate
654  // known bits from the RHS to V.
655  Known.Zero |= RHSKnown.Zero & MaskKnown.One;
656  Known.One |= RHSKnown.One & MaskKnown.One;
657  // assume(~(v & b) = a)
658  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
659  m_Value(A))) &&
660  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
661  KnownBits RHSKnown(BitWidth);
662  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
663  KnownBits MaskKnown(BitWidth);
664  computeKnownBits(B, MaskKnown, Depth+1, Query(Q, I));
665 
666  // For those bits in the mask that are known to be one, we can propagate
667  // inverted known bits from the RHS to V.
668  Known.Zero |= RHSKnown.One & MaskKnown.One;
669  Known.One |= RHSKnown.Zero & MaskKnown.One;
670  // assume(v | b = a)
671  } else if (match(Cmp,
672  m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
673  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
674  KnownBits RHSKnown(BitWidth);
675  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
676  KnownBits BKnown(BitWidth);
677  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
678 
679  // For those bits in B that are known to be zero, we can propagate known
680  // bits from the RHS to V.
681  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
682  Known.One |= RHSKnown.One & BKnown.Zero;
683  // assume(~(v | b) = a)
684  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
685  m_Value(A))) &&
686  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
687  KnownBits RHSKnown(BitWidth);
688  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
689  KnownBits BKnown(BitWidth);
690  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
691 
692  // For those bits in B that are known to be zero, we can propagate
693  // inverted known bits from the RHS to V.
694  Known.Zero |= RHSKnown.One & BKnown.Zero;
695  Known.One |= RHSKnown.Zero & BKnown.Zero;
696  // assume(v ^ b = a)
697  } else if (match(Cmp,
698  m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
699  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
700  KnownBits RHSKnown(BitWidth);
701  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
702  KnownBits BKnown(BitWidth);
703  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
704 
705  // For those bits in B that are known to be zero, we can propagate known
706  // bits from the RHS to V. For those bits in B that are known to be one,
707  // we can propagate inverted known bits from the RHS to V.
708  Known.Zero |= RHSKnown.Zero & BKnown.Zero;
709  Known.One |= RHSKnown.One & BKnown.Zero;
710  Known.Zero |= RHSKnown.One & BKnown.One;
711  Known.One |= RHSKnown.Zero & BKnown.One;
712  // assume(~(v ^ b) = a)
713  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
714  m_Value(A))) &&
715  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
716  KnownBits RHSKnown(BitWidth);
717  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
718  KnownBits BKnown(BitWidth);
719  computeKnownBits(B, BKnown, Depth+1, Query(Q, I));
720 
721  // For those bits in B that are known to be zero, we can propagate
722  // inverted known bits from the RHS to V. For those bits in B that are
723  // known to be one, we can propagate known bits from the RHS to V.
724  Known.Zero |= RHSKnown.One & BKnown.Zero;
725  Known.One |= RHSKnown.Zero & BKnown.Zero;
726  Known.Zero |= RHSKnown.Zero & BKnown.One;
727  Known.One |= RHSKnown.One & BKnown.One;
728  // assume(v << c = a)
729  } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
730  m_Value(A))) &&
731  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
732  KnownBits RHSKnown(BitWidth);
733  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
734  // For those bits in RHS that are known, we can propagate them to known
735  // bits in V shifted to the right by C.
736  RHSKnown.Zero.lshrInPlace(C);
737  Known.Zero |= RHSKnown.Zero;
738  RHSKnown.One.lshrInPlace(C);
739  Known.One |= RHSKnown.One;
740  // assume(~(v << c) = a)
741  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
742  m_Value(A))) &&
743  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
744  KnownBits RHSKnown(BitWidth);
745  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
746  // For those bits in RHS that are known, we can propagate them inverted
747  // to known bits in V shifted to the right by C.
748  RHSKnown.One.lshrInPlace(C);
749  Known.Zero |= RHSKnown.One;
750  RHSKnown.Zero.lshrInPlace(C);
751  Known.One |= RHSKnown.Zero;
752  // assume(v >> c = a)
753  } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
754  m_Value(A))) &&
755  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
756  KnownBits RHSKnown(BitWidth);
757  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
758  // For those bits in RHS that are known, we can propagate them to known
759  // bits in V shifted to the right by C.
760  Known.Zero |= RHSKnown.Zero << C;
761  Known.One |= RHSKnown.One << C;
762  // assume(~(v >> c) = a)
763  } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
764  m_Value(A))) &&
765  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
766  KnownBits RHSKnown(BitWidth);
767  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
768  // For those bits in RHS that are known, we can propagate them inverted
769  // to known bits in V shifted to the right by C.
770  Known.Zero |= RHSKnown.One << C;
771  Known.One |= RHSKnown.Zero << C;
772  }
773  break;
774  case ICmpInst::ICMP_SGE:
775  // assume(v >=_s c) where c is non-negative
776  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
777  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
778  KnownBits RHSKnown(BitWidth);
779  computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
780 
781  if (RHSKnown.isNonNegative()) {
782  // We know that the sign bit is zero.
783  Known.makeNonNegative();
784  }
785  }
786  break;
787  case ICmpInst::ICMP_SGT:
788  // assume(v >_s c) where c is at least -1.
789  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
790  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
791  KnownBits RHSKnown(BitWidth);
792  computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
793 
794  if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
795  // We know that the sign bit is zero.
796  Known.makeNonNegative();
797  }
798  }
799  break;
800  case ICmpInst::ICMP_SLE:
801  // assume(v <=_s c) where c is negative
802  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
803  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
804  KnownBits RHSKnown(BitWidth);
805  computeKnownBits(A, RHSKnown, Depth + 1, Query(Q, I));
806 
807  if (RHSKnown.isNegative()) {
808  // We know that the sign bit is one.
809  Known.makeNegative();
810  }
811  }
812  break;
813  case ICmpInst::ICMP_SLT:
814  // assume(v <_s c) where c is non-positive
815  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
816  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
817  KnownBits RHSKnown(BitWidth);
818  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
819 
820  if (RHSKnown.isZero() || RHSKnown.isNegative()) {
821  // We know that the sign bit is one.
822  Known.makeNegative();
823  }
824  }
825  break;
826  case ICmpInst::ICMP_ULE:
827  // assume(v <=_u c)
828  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
829  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
830  KnownBits RHSKnown(BitWidth);
831  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
832 
833  // Whatever high bits in c are zero are known to be zero.
834  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
835  }
836  break;
837  case ICmpInst::ICMP_ULT:
838  // assume(v <_u c)
839  if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
840  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
841  KnownBits RHSKnown(BitWidth);
842  computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
843 
844  // If the RHS is known zero, then this assumption must be wrong (nothing
845  // is unsigned less than zero). Signal a conflict and get out of here.
846  if (RHSKnown.isZero()) {
847  Known.Zero.setAllBits();
848  Known.One.setAllBits();
849  break;
850  }
851 
852  // Whatever high bits in c are zero are known to be zero (if c is a power
853  // of 2, then one more).
854  if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, Query(Q, I)))
855  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
856  else
857  Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
858  }
859  break;
860  }
861  }
862 
863  // If assumptions conflict with each other or previous known bits, then we
864  // have a logical fallacy. It's possible that the assumption is not reachable,
865  // so this isn't a real bug. On the other hand, the program may have undefined
866  // behavior, or we might have a bug in the compiler. We can't assert/crash, so
867  // clear out the known bits, try to warn the user, and hope for the best.
868  if (Known.Zero.intersects(Known.One)) {
869  Known.resetAll();
870 
871  if (Q.ORE)
872  Q.ORE->emit([&]() {
873  auto *CxtI = const_cast<Instruction *>(Q.CxtI);
874  return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
875  CxtI)
876  << "Detected conflicting code assumptions. Program may "
877  "have undefined behavior, or compiler may have "
878  "internal error.";
879  });
880  }
881 }
882 
883 /// Compute known bits from a shift operator, including those with a
884 /// non-constant shift amount. Known is the output of this function. Known2 is a
885 /// pre-allocated temporary with the same bit width as Known. KZF and KOF are
886 /// operator-specific functions that, given the known-zero or known-one bits
887 /// respectively, and a shift amount, compute the implied known-zero or
888 /// known-one bits of the shift operator's result respectively for that shift
889 /// amount. The results from calling KZF and KOF are conservatively combined for
890 /// all permitted shift amounts.
892  const Operator *I, KnownBits &Known, KnownBits &Known2,
893  unsigned Depth, const Query &Q,
894  function_ref<APInt(const APInt &, unsigned)> KZF,
895  function_ref<APInt(const APInt &, unsigned)> KOF) {
896  unsigned BitWidth = Known.getBitWidth();
897 
898  if (auto *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
899  unsigned ShiftAmt = SA->getLimitedValue(BitWidth-1);
900 
901  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
902  Known.Zero = KZF(Known.Zero, ShiftAmt);
903  Known.One = KOF(Known.One, ShiftAmt);
904  // If the known bits conflict, this must be an overflowing left shift, so
905  // the shift result is poison. We can return anything we want. Choose 0 for
906  // the best folding opportunity.
907  if (Known.hasConflict())
908  Known.setAllZero();
909 
910  return;
911  }
912 
913  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
914 
915  // If the shift amount could be greater than or equal to the bit-width of the
916  // LHS, the value could be poison, but bail out because the check below is
917  // expensive. TODO: Should we just carry on?
918  if ((~Known.Zero).uge(BitWidth)) {
919  Known.resetAll();
920  return;
921  }
922 
923  // Note: We cannot use Known.Zero.getLimitedValue() here, because if
924  // BitWidth > 64 and any upper bits are known, we'll end up returning the
925  // limit value (which implies all bits are known).
926  uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
927  uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
928 
929  // It would be more-clearly correct to use the two temporaries for this
930  // calculation. Reusing the APInts here to prevent unnecessary allocations.
931  Known.resetAll();
932 
933  // If we know the shifter operand is nonzero, we can sometimes infer more
934  // known bits. However this is expensive to compute, so be lazy about it and
935  // only compute it when absolutely necessary.
936  Optional<bool> ShifterOperandIsNonZero;
937 
938  // Early exit if we can't constrain any well-defined shift amount.
939  if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
940  !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
941  ShifterOperandIsNonZero = isKnownNonZero(I->getOperand(1), Depth + 1, Q);
942  if (!*ShifterOperandIsNonZero)
943  return;
944  }
945 
946  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
947 
948  Known.Zero.setAllBits();
949  Known.One.setAllBits();
950  for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
951  // Combine the shifted known input bits only for those shift amounts
952  // compatible with its known constraints.
953  if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
954  continue;
955  if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
956  continue;
957  // If we know the shifter is nonzero, we may be able to infer more known
958  // bits. This check is sunk down as far as possible to avoid the expensive
959  // call to isKnownNonZero if the cheaper checks above fail.
960  if (ShiftAmt == 0) {
961  if (!ShifterOperandIsNonZero.hasValue())
962  ShifterOperandIsNonZero =
963  isKnownNonZero(I->getOperand(1), Depth + 1, Q);
964  if (*ShifterOperandIsNonZero)
965  continue;
966  }
967 
968  Known.Zero &= KZF(Known2.Zero, ShiftAmt);
969  Known.One &= KOF(Known2.One, ShiftAmt);
970  }
971 
972  // If the known bits conflict, the result is poison. Return a 0 and hope the
973  // caller can further optimize that.
974  if (Known.hasConflict())
975  Known.setAllZero();
976 }
977 
978 static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
979  unsigned Depth, const Query &Q) {
980  unsigned BitWidth = Known.getBitWidth();
981 
982  KnownBits Known2(Known);
983  switch (I->getOpcode()) {
984  default: break;
985  case Instruction::Load:
986  if (MDNode *MD =
987  Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
989  break;
990  case Instruction::And: {
991  // If either the LHS or the RHS are Zero, the result is zero.
992  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
993  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
994 
995  // Output known-1 bits are only known if set in both the LHS & RHS.
996  Known.One &= Known2.One;
997  // Output known-0 are known to be clear if zero in either the LHS | RHS.
998  Known.Zero |= Known2.Zero;
999 
1000  // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1001  // here we handle the more general case of adding any odd number by
1002  // matching the form add(x, add(x, y)) where y is odd.
1003  // TODO: This could be generalized to clearing any bit set in y where the
1004  // following bit is known to be unset in y.
1005  Value *X = nullptr, *Y = nullptr;
1006  if (!Known.Zero[0] && !Known.One[0] &&
1007  match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1008  Known2.resetAll();
1009  computeKnownBits(Y, Known2, Depth + 1, Q);
1010  if (Known2.countMinTrailingOnes() > 0)
1011  Known.Zero.setBit(0);
1012  }
1013  break;
1014  }
1015  case Instruction::Or:
1016  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1017  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1018 
1019  // Output known-0 bits are only known if clear in both the LHS & RHS.
1020  Known.Zero &= Known2.Zero;
1021  // Output known-1 are known to be set if set in either the LHS | RHS.
1022  Known.One |= Known2.One;
1023  break;
1024  case Instruction::Xor: {
1025  computeKnownBits(I->getOperand(1), Known, Depth + 1, Q);
1026  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1027 
1028  // Output known-0 bits are known if clear or set in both the LHS & RHS.
1029  APInt KnownZeroOut = (Known.Zero & Known2.Zero) | (Known.One & Known2.One);
1030  // Output known-1 are known to be set if set in only one of the LHS, RHS.
1031  Known.One = (Known.Zero & Known2.One) | (Known.One & Known2.Zero);
1032  Known.Zero = std::move(KnownZeroOut);
1033  break;
1034  }
1035  case Instruction::Mul: {
1036  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1037  computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
1038  Known2, Depth, Q);
1039  break;
1040  }
1041  case Instruction::UDiv: {
1042  // For the purposes of computing leading zeros we can conservatively
1043  // treat a udiv as a logical right shift by the power of 2 known to
1044  // be less than the denominator.
1045  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1046  unsigned LeadZ = Known2.countMinLeadingZeros();
1047 
1048  Known2.resetAll();
1049  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1050  unsigned RHSMaxLeadingZeros = Known2.countMaxLeadingZeros();
1051  if (RHSMaxLeadingZeros != BitWidth)
1052  LeadZ = std::min(BitWidth, LeadZ + BitWidth - RHSMaxLeadingZeros - 1);
1053 
1054  Known.Zero.setHighBits(LeadZ);
1055  break;
1056  }
1057  case Instruction::Select: {
1058  const Value *LHS = nullptr, *RHS = nullptr;
1059  SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1061  computeKnownBits(RHS, Known, Depth + 1, Q);
1062  computeKnownBits(LHS, Known2, Depth + 1, Q);
1063  } else {
1064  computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1065  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1066  }
1067 
1068  unsigned MaxHighOnes = 0;
1069  unsigned MaxHighZeros = 0;
1070  if (SPF == SPF_SMAX) {
1071  // If both sides are negative, the result is negative.
1072  if (Known.isNegative() && Known2.isNegative())
1073  // We can derive a lower bound on the result by taking the max of the
1074  // leading one bits.
1075  MaxHighOnes =
1077  // If either side is non-negative, the result is non-negative.
1078  else if (Known.isNonNegative() || Known2.isNonNegative())
1079  MaxHighZeros = 1;
1080  } else if (SPF == SPF_SMIN) {
1081  // If both sides are non-negative, the result is non-negative.
1082  if (Known.isNonNegative() && Known2.isNonNegative())
1083  // We can derive an upper bound on the result by taking the max of the
1084  // leading zero bits.
1085  MaxHighZeros = std::max(Known.countMinLeadingZeros(),
1086  Known2.countMinLeadingZeros());
1087  // If either side is negative, the result is negative.
1088  else if (Known.isNegative() || Known2.isNegative())
1089  MaxHighOnes = 1;
1090  } else if (SPF == SPF_UMAX) {
1091  // We can derive a lower bound on the result by taking the max of the
1092  // leading one bits.
1093  MaxHighOnes =
1095  } else if (SPF == SPF_UMIN) {
1096  // We can derive an upper bound on the result by taking the max of the
1097  // leading zero bits.
1098  MaxHighZeros =
1100  } else if (SPF == SPF_ABS) {
1101  // RHS from matchSelectPattern returns the negation part of abs pattern.
1102  // If the negate has an NSW flag we can assume the sign bit of the result
1103  // will be 0 because that makes abs(INT_MIN) undefined.
1104  if (match(RHS, m_Neg(m_Specific(LHS))) &&
1105  Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
1106  MaxHighZeros = 1;
1107  }
1108 
1109  // Only known if known in both the LHS and RHS.
1110  Known.One &= Known2.One;
1111  Known.Zero &= Known2.Zero;
1112  if (MaxHighOnes > 0)
1113  Known.One.setHighBits(MaxHighOnes);
1114  if (MaxHighZeros > 0)
1115  Known.Zero.setHighBits(MaxHighZeros);
1116  break;
1117  }
1118  case Instruction::FPTrunc:
1119  case Instruction::FPExt:
1120  case Instruction::FPToUI:
1121  case Instruction::FPToSI:
1122  case Instruction::SIToFP:
1123  case Instruction::UIToFP:
1124  break; // Can't work with floating point.
1125  case Instruction::PtrToInt:
1126  case Instruction::IntToPtr:
1127  // Fall through and handle them the same as zext/trunc.
1129  case Instruction::ZExt:
1130  case Instruction::Trunc: {
1131  Type *SrcTy = I->getOperand(0)->getType();
1132 
1133  unsigned SrcBitWidth;
1134  // Note that we handle pointer operands here because of inttoptr/ptrtoint
1135  // which fall through here.
1136  Type *ScalarTy = SrcTy->getScalarType();
1137  SrcBitWidth = ScalarTy->isPointerTy() ?
1138  Q.DL.getIndexTypeSizeInBits(ScalarTy) :
1139  Q.DL.getTypeSizeInBits(ScalarTy);
1140 
1141  assert(SrcBitWidth && "SrcBitWidth can't be zero");
1142  Known = Known.zextOrTrunc(SrcBitWidth, false);
1143  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1144  Known = Known.zextOrTrunc(BitWidth, true /* ExtendedBitsAreKnownZero */);
1145  break;
1146  }
1147  case Instruction::BitCast: {
1148  Type *SrcTy = I->getOperand(0)->getType();
1149  if (SrcTy->isIntOrPtrTy() &&
1150  // TODO: For now, not handling conversions like:
1151  // (bitcast i64 %x to <2 x i32>)
1152  !I->getType()->isVectorTy()) {
1153  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1154  break;
1155  }
1156  break;
1157  }
1158  case Instruction::SExt: {
1159  // Compute the bits in the result that are not present in the input.
1160  unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1161 
1162  Known = Known.trunc(SrcBitWidth);
1163  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1164  // If the sign bit of the input is known set or clear, then we know the
1165  // top bits of the result.
1166  Known = Known.sext(BitWidth);
1167  break;
1168  }
1169  case Instruction::Shl: {
1170  // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1171  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1172  auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
1173  APInt KZResult = KnownZero << ShiftAmt;
1174  KZResult.setLowBits(ShiftAmt); // Low bits known 0.
1175  // If this shift has "nsw" keyword, then the result is either a poison
1176  // value or has the same sign bit as the first operand.
1177  if (NSW && KnownZero.isSignBitSet())
1178  KZResult.setSignBit();
1179  return KZResult;
1180  };
1181 
1182  auto KOF = [NSW](const APInt &KnownOne, unsigned ShiftAmt) {
1183  APInt KOResult = KnownOne << ShiftAmt;
1184  if (NSW && KnownOne.isSignBitSet())
1185  KOResult.setSignBit();
1186  return KOResult;
1187  };
1188 
1189  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1190  break;
1191  }
1192  case Instruction::LShr: {
1193  // (lshr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1194  auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1195  APInt KZResult = KnownZero.lshr(ShiftAmt);
1196  // High bits known zero.
1197  KZResult.setHighBits(ShiftAmt);
1198  return KZResult;
1199  };
1200 
1201  auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1202  return KnownOne.lshr(ShiftAmt);
1203  };
1204 
1205  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1206  break;
1207  }
1208  case Instruction::AShr: {
1209  // (ashr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1210  auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
1211  return KnownZero.ashr(ShiftAmt);
1212  };
1213 
1214  auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
1215  return KnownOne.ashr(ShiftAmt);
1216  };
1217 
1218  computeKnownBitsFromShiftOperator(I, Known, Known2, Depth, Q, KZF, KOF);
1219  break;
1220  }
1221  case Instruction::Sub: {
1222  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1223  computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1224  Known, Known2, Depth, Q);
1225  break;
1226  }
1227  case Instruction::Add: {
1228  bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1229  computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1230  Known, Known2, Depth, Q);
1231  break;
1232  }
1233  case Instruction::SRem:
1234  if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1235  APInt RA = Rem->getValue().abs();
1236  if (RA.isPowerOf2()) {
1237  APInt LowBits = RA - 1;
1238  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1239 
1240  // The low bits of the first operand are unchanged by the srem.
1241  Known.Zero = Known2.Zero & LowBits;
1242  Known.One = Known2.One & LowBits;
1243 
1244  // If the first operand is non-negative or has all low bits zero, then
1245  // the upper bits are all zero.
1246  if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
1247  Known.Zero |= ~LowBits;
1248 
1249  // If the first operand is negative and not all low bits are zero, then
1250  // the upper bits are all one.
1251  if (Known2.isNegative() && LowBits.intersects(Known2.One))
1252  Known.One |= ~LowBits;
1253 
1254  assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1255  break;
1256  }
1257  }
1258 
1259  // The sign bit is the LHS's sign bit, except when the result of the
1260  // remainder is zero.
1261  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1262  // If it's known zero, our sign bit is also zero.
1263  if (Known2.isNonNegative())
1264  Known.makeNonNegative();
1265 
1266  break;
1267  case Instruction::URem: {
1268  if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1269  const APInt &RA = Rem->getValue();
1270  if (RA.isPowerOf2()) {
1271  APInt LowBits = (RA - 1);
1272  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1273  Known.Zero |= ~LowBits;
1274  Known.One &= LowBits;
1275  break;
1276  }
1277  }
1278 
1279  // Since the result is less than or equal to either operand, any leading
1280  // zero bits in either operand must also exist in the result.
1281  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1282  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1283 
1284  unsigned Leaders =
1286  Known.resetAll();
1287  Known.Zero.setHighBits(Leaders);
1288  break;
1289  }
1290 
1291  case Instruction::Alloca: {
1292  const AllocaInst *AI = cast<AllocaInst>(I);
1293  unsigned Align = AI->getAlignment();
1294  if (Align == 0)
1295  Align = Q.DL.getABITypeAlignment(AI->getAllocatedType());
1296 
1297  if (Align > 0)
1298  Known.Zero.setLowBits(countTrailingZeros(Align));
1299  break;
1300  }
1301  case Instruction::GetElementPtr: {
1302  // Analyze all of the subscripts of this getelementptr instruction
1303  // to determine if we can prove known low zero bits.
1304  KnownBits LocalKnown(BitWidth);
1305  computeKnownBits(I->getOperand(0), LocalKnown, Depth + 1, Q);
1306  unsigned TrailZ = LocalKnown.countMinTrailingZeros();
1307 
1309  for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1310  Value *Index = I->getOperand(i);
1311  if (StructType *STy = GTI.getStructTypeOrNull()) {
1312  // Handle struct member offset arithmetic.
1313 
1314  // Handle case when index is vector zeroinitializer
1315  Constant *CIndex = cast<Constant>(Index);
1316  if (CIndex->isZeroValue())
1317  continue;
1318 
1319  if (CIndex->getType()->isVectorTy())
1320  Index = CIndex->getSplatValue();
1321 
1322  unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1323  const StructLayout *SL = Q.DL.getStructLayout(STy);
1324  uint64_t Offset = SL->getElementOffset(Idx);
1325  TrailZ = std::min<unsigned>(TrailZ,
1326  countTrailingZeros(Offset));
1327  } else {
1328  // Handle array index arithmetic.
1329  Type *IndexedTy = GTI.getIndexedType();
1330  if (!IndexedTy->isSized()) {
1331  TrailZ = 0;
1332  break;
1333  }
1334  unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
1335  uint64_t TypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1336  LocalKnown.Zero = LocalKnown.One = APInt(GEPOpiBits, 0);
1337  computeKnownBits(Index, LocalKnown, Depth + 1, Q);
1338  TrailZ = std::min(TrailZ,
1339  unsigned(countTrailingZeros(TypeSize) +
1340  LocalKnown.countMinTrailingZeros()));
1341  }
1342  }
1343 
1344  Known.Zero.setLowBits(TrailZ);
1345  break;
1346  }
1347  case Instruction::PHI: {
1348  const PHINode *P = cast<PHINode>(I);
1349  // Handle the case of a simple two-predecessor recurrence PHI.
1350  // There's a lot more that could theoretically be done here, but
1351  // this is sufficient to catch some interesting cases.
1352  if (P->getNumIncomingValues() == 2) {
1353  for (unsigned i = 0; i != 2; ++i) {
1354  Value *L = P->getIncomingValue(i);
1355  Value *R = P->getIncomingValue(!i);
1356  Operator *LU = dyn_cast<Operator>(L);
1357  if (!LU)
1358  continue;
1359  unsigned Opcode = LU->getOpcode();
1360  // Check for operations that have the property that if
1361  // both their operands have low zero bits, the result
1362  // will have low zero bits.
1363  if (Opcode == Instruction::Add ||
1364  Opcode == Instruction::Sub ||
1365  Opcode == Instruction::And ||
1366  Opcode == Instruction::Or ||
1367  Opcode == Instruction::Mul) {
1368  Value *LL = LU->getOperand(0);
1369  Value *LR = LU->getOperand(1);
1370  // Find a recurrence.
1371  if (LL == I)
1372  L = LR;
1373  else if (LR == I)
1374  L = LL;
1375  else
1376  continue; // Check for recurrence with L and R flipped.
1377  // Ok, we have a PHI of the form L op= R. Check for low
1378  // zero bits.
1379  computeKnownBits(R, Known2, Depth + 1, Q);
1380 
1381  // We need to take the minimum number of known bits
1382  KnownBits Known3(Known);
1383  computeKnownBits(L, Known3, Depth + 1, Q);
1384 
1385  Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1386  Known3.countMinTrailingZeros()));
1387 
1388  auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
1389  if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1390  // If initial value of recurrence is nonnegative, and we are adding
1391  // a nonnegative number with nsw, the result can only be nonnegative
1392  // or poison value regardless of the number of times we execute the
1393  // add in phi recurrence. If initial value is negative and we are
1394  // adding a negative number with nsw, the result can only be
1395  // negative or poison value. Similar arguments apply to sub and mul.
1396  //
1397  // (add non-negative, non-negative) --> non-negative
1398  // (add negative, negative) --> negative
1399  if (Opcode == Instruction::Add) {
1400  if (Known2.isNonNegative() && Known3.isNonNegative())
1401  Known.makeNonNegative();
1402  else if (Known2.isNegative() && Known3.isNegative())
1403  Known.makeNegative();
1404  }
1405 
1406  // (sub nsw non-negative, negative) --> non-negative
1407  // (sub nsw negative, non-negative) --> negative
1408  else if (Opcode == Instruction::Sub && LL == I) {
1409  if (Known2.isNonNegative() && Known3.isNegative())
1410  Known.makeNonNegative();
1411  else if (Known2.isNegative() && Known3.isNonNegative())
1412  Known.makeNegative();
1413  }
1414 
1415  // (mul nsw non-negative, non-negative) --> non-negative
1416  else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1417  Known3.isNonNegative())
1418  Known.makeNonNegative();
1419  }
1420 
1421  break;
1422  }
1423  }
1424  }
1425 
1426  // Unreachable blocks may have zero-operand PHI nodes.
1427  if (P->getNumIncomingValues() == 0)
1428  break;
1429 
1430  // Otherwise take the unions of the known bit sets of the operands,
1431  // taking conservative care to avoid excessive recursion.
1432  if (Depth < MaxDepth - 1 && !Known.Zero && !Known.One) {
1433  // Skip if every incoming value references to ourself.
1434  if (dyn_cast_or_null<UndefValue>(P->hasConstantValue()))
1435  break;
1436 
1437  Known.Zero.setAllBits();
1438  Known.One.setAllBits();
1439  for (Value *IncValue : P->incoming_values()) {
1440  // Skip direct self references.
1441  if (IncValue == P) continue;
1442 
1443  Known2 = KnownBits(BitWidth);
1444  // Recurse, but cap the recursion to one level, because we don't
1445  // want to waste time spinning around in loops.
1446  computeKnownBits(IncValue, Known2, MaxDepth - 1, Q);
1447  Known.Zero &= Known2.Zero;
1448  Known.One &= Known2.One;
1449  // If all bits have been ruled out, there's no need to check
1450  // more operands.
1451  if (!Known.Zero && !Known.One)
1452  break;
1453  }
1454  }
1455  break;
1456  }
1457  case Instruction::Call:
1458  case Instruction::Invoke:
1459  // If range metadata is attached to this call, set known bits from that,
1460  // and then intersect with known bits based on other properties of the
1461  // function.
1462  if (MDNode *MD =
1463  Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1465  if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
1466  computeKnownBits(RV, Known2, Depth + 1, Q);
1467  Known.Zero |= Known2.Zero;
1468  Known.One |= Known2.One;
1469  }
1470  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1471  switch (II->getIntrinsicID()) {
1472  default: break;
1473  case Intrinsic::bitreverse:
1474  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1475  Known.Zero |= Known2.Zero.reverseBits();
1476  Known.One |= Known2.One.reverseBits();
1477  break;
1478  case Intrinsic::bswap:
1479  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1480  Known.Zero |= Known2.Zero.byteSwap();
1481  Known.One |= Known2.One.byteSwap();
1482  break;
1483  case Intrinsic::ctlz: {
1484  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1485  // If we have a known 1, its position is our upper bound.
1486  unsigned PossibleLZ = Known2.One.countLeadingZeros();
1487  // If this call is undefined for 0, the result will be less than 2^n.
1488  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1489  PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1490  unsigned LowBits = Log2_32(PossibleLZ)+1;
1491  Known.Zero.setBitsFrom(LowBits);
1492  break;
1493  }
1494  case Intrinsic::cttz: {
1495  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1496  // If we have a known 1, its position is our upper bound.
1497  unsigned PossibleTZ = Known2.One.countTrailingZeros();
1498  // If this call is undefined for 0, the result will be less than 2^n.
1499  if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1500  PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1501  unsigned LowBits = Log2_32(PossibleTZ)+1;
1502  Known.Zero.setBitsFrom(LowBits);
1503  break;
1504  }
1505  case Intrinsic::ctpop: {
1506  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1507  // We can bound the space the count needs. Also, bits known to be zero
1508  // can't contribute to the population.
1509  unsigned BitsPossiblySet = Known2.countMaxPopulation();
1510  unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1511  Known.Zero.setBitsFrom(LowBits);
1512  // TODO: we could bound KnownOne using the lower bound on the number
1513  // of bits which might be set provided by popcnt KnownOne2.
1514  break;
1515  }
1516  case Intrinsic::fshr:
1517  case Intrinsic::fshl: {
1518  const APInt *SA;
1519  if (!match(I->getOperand(2), m_APInt(SA)))
1520  break;
1521 
1522  // Normalize to funnel shift left.
1523  uint64_t ShiftAmt = SA->urem(BitWidth);
1524  if (II->getIntrinsicID() == Intrinsic::fshr)
1525  ShiftAmt = BitWidth - ShiftAmt;
1526 
1527  KnownBits Known3(Known);
1528  computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1529  computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1530 
1531  Known.Zero =
1532  Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1533  Known.One =
1534  Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1535  break;
1536  }
1537  case Intrinsic::uadd_sat:
1538  case Intrinsic::usub_sat: {
1539  bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1540  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1541  computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1542 
1543  // Add: Leading ones of either operand are preserved.
1544  // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1545  // as leading zeros in the result.
1546  unsigned LeadingKnown;
1547  if (IsAdd)
1548  LeadingKnown = std::max(Known.countMinLeadingOnes(),
1549  Known2.countMinLeadingOnes());
1550  else
1551  LeadingKnown = std::max(Known.countMinLeadingZeros(),
1552  Known2.countMinLeadingOnes());
1553 
1555  IsAdd, /* NSW */ false, Known, Known2);
1556 
1557  // We select between the operation result and all-ones/zero
1558  // respectively, so we can preserve known ones/zeros.
1559  if (IsAdd) {
1560  Known.One.setHighBits(LeadingKnown);
1561  Known.Zero.clearAllBits();
1562  } else {
1563  Known.Zero.setHighBits(LeadingKnown);
1564  Known.One.clearAllBits();
1565  }
1566  break;
1567  }
1568  case Intrinsic::x86_sse42_crc32_64_64:
1569  Known.Zero.setBitsFrom(32);
1570  break;
1571  }
1572  }
1573  break;
1574  case Instruction::ExtractElement:
1575  // Look through extract element. At the moment we keep this simple and skip
1576  // tracking the specific element. But at least we might find information
1577  // valid for all elements of the vector (for example if vector is sign
1578  // extended, shifted, etc).
1579  computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1580  break;
1581  case Instruction::ExtractValue:
1582  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1583  const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1584  if (EVI->getNumIndices() != 1) break;
1585  if (EVI->getIndices()[0] == 0) {
1586  switch (II->getIntrinsicID()) {
1587  default: break;
1588  case Intrinsic::uadd_with_overflow:
1589  case Intrinsic::sadd_with_overflow:
1590  computeKnownBitsAddSub(true, II->getArgOperand(0),
1591  II->getArgOperand(1), false, Known, Known2,
1592  Depth, Q);
1593  break;
1594  case Intrinsic::usub_with_overflow:
1595  case Intrinsic::ssub_with_overflow:
1596  computeKnownBitsAddSub(false, II->getArgOperand(0),
1597  II->getArgOperand(1), false, Known, Known2,
1598  Depth, Q);
1599  break;
1600  case Intrinsic::umul_with_overflow:
1601  case Intrinsic::smul_with_overflow:
1602  computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1603  Known, Known2, Depth, Q);
1604  break;
1605  }
1606  }
1607  }
1608  }
1609 }
1610 
1611 /// Determine which bits of V are known to be either zero or one and return
1612 /// them.
1613 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1614  KnownBits Known(getBitWidth(V->getType(), Q.DL));
1615  computeKnownBits(V, Known, Depth, Q);
1616  return Known;
1617 }
1618 
1619 /// Determine which bits of V are known to be either zero or one and return
1620 /// them in the Known bit set.
1621 ///
1622 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1623 /// we cannot optimize based on the assumption that it is zero without changing
1624 /// it to be an explicit zero. If we don't change it to zero, other code could
1625 /// optimized based on the contradictory assumption that it is non-zero.
1626 /// Because instcombine aggressively folds operations with undef args anyway,
1627 /// this won't lose us code quality.
1628 ///
1629 /// This function is defined on values with integer type, values with pointer
1630 /// type, and vectors of integers. In the case
1631 /// where V is a vector, known zero, and known one values are the
1632 /// same width as the vector element, and the bit is set only if it is true
1633 /// for all of the elements in the vector.
1634 void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
1635  const Query &Q) {
1636  assert(V && "No Value?");
1637  assert(Depth <= MaxDepth && "Limit Search Depth");
1638  unsigned BitWidth = Known.getBitWidth();
1639 
1640  assert((V->getType()->isIntOrIntVectorTy(BitWidth) ||
1641  V->getType()->isPtrOrPtrVectorTy()) &&
1642  "Not integer or pointer type!");
1643 
1644  Type *ScalarTy = V->getType()->getScalarType();
1645  unsigned ExpectedWidth = ScalarTy->isPointerTy() ?
1646  Q.DL.getIndexTypeSizeInBits(ScalarTy) : Q.DL.getTypeSizeInBits(ScalarTy);
1647  assert(ExpectedWidth == BitWidth && "V and Known should have same BitWidth");
1648  (void)BitWidth;
1649  (void)ExpectedWidth;
1650 
1651  const APInt *C;
1652  if (match(V, m_APInt(C))) {
1653  // We know all of the bits for a scalar constant or a splat vector constant!
1654  Known.One = *C;
1655  Known.Zero = ~Known.One;
1656  return;
1657  }
1658  // Null and aggregate-zero are all-zeros.
1659  if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1660  Known.setAllZero();
1661  return;
1662  }
1663  // Handle a constant vector by taking the intersection of the known bits of
1664  // each element.
1665  if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(V)) {
1666  // We know that CDS must be a vector of integers. Take the intersection of
1667  // each element.
1668  Known.Zero.setAllBits(); Known.One.setAllBits();
1669  for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1670  APInt Elt = CDS->getElementAsAPInt(i);
1671  Known.Zero &= ~Elt;
1672  Known.One &= Elt;
1673  }
1674  return;
1675  }
1676 
1677  if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1678  // We know that CV must be a vector of integers. Take the intersection of
1679  // each element.
1680  Known.Zero.setAllBits(); Known.One.setAllBits();
1681  for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1682  Constant *Element = CV->getAggregateElement(i);
1683  auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1684  if (!ElementCI) {
1685  Known.resetAll();
1686  return;
1687  }
1688  const APInt &Elt = ElementCI->getValue();
1689  Known.Zero &= ~Elt;
1690  Known.One &= Elt;
1691  }
1692  return;
1693  }
1694 
1695  // Start out not knowing anything.
1696  Known.resetAll();
1697 
1698  // We can't imply anything about undefs.
1699  if (isa<UndefValue>(V))
1700  return;
1701 
1702  // There's no point in looking through other users of ConstantData for
1703  // assumptions. Confirm that we've handled them all.
1704  assert(!isa<ConstantData>(V) && "Unhandled constant data!");
1705 
1706  // Limit search depth.
1707  // All recursive calls that increase depth must come after this.
1708  if (Depth == MaxDepth)
1709  return;
1710 
1711  // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
1712  // the bits of its aliasee.
1713  if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
1714  if (!GA->isInterposable())
1715  computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
1716  return;
1717  }
1718 
1719  if (const Operator *I = dyn_cast<Operator>(V))
1720  computeKnownBitsFromOperator(I, Known, Depth, Q);
1721 
1722  // Aligned pointers have trailing zeros - refine Known.Zero set
1723  if (V->getType()->isPointerTy()) {
1724  const MaybeAlign Align = V->getPointerAlignment(Q.DL);
1725  if (Align)
1726  Known.Zero.setLowBits(countTrailingZeros(Align->value()));
1727  }
1728 
1729  // computeKnownBitsFromAssume strictly refines Known.
1730  // Therefore, we run them after computeKnownBitsFromOperator.
1731 
1732  // Check whether a nearby assume intrinsic can determine some known bits.
1733  computeKnownBitsFromAssume(V, Known, Depth, Q);
1734 
1735  assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
1736 }
1737 
1738 /// Return true if the given value is known to have exactly one
1739 /// bit set when defined. For vectors return true if every element is known to
1740 /// be a power of two when defined. Supports values with integer or pointer
1741 /// types and vectors of integers.
1742 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
1743  const Query &Q) {
1744  assert(Depth <= MaxDepth && "Limit Search Depth");
1745 
1746  // Attempt to match against constants.
1747  if (OrZero && match(V, m_Power2OrZero()))
1748  return true;
1749  if (match(V, m_Power2()))
1750  return true;
1751 
1752  // 1 << X is clearly a power of two if the one is not shifted off the end. If
1753  // it is shifted off the end then the result is undefined.
1754  if (match(V, m_Shl(m_One(), m_Value())))
1755  return true;
1756 
1757  // (signmask) >>l X is clearly a power of two if the one is not shifted off
1758  // the bottom. If it is shifted off the bottom then the result is undefined.
1759  if (match(V, m_LShr(m_SignMask(), m_Value())))
1760  return true;
1761 
1762  // The remaining tests are all recursive, so bail out if we hit the limit.
1763  if (Depth++ == MaxDepth)
1764  return false;
1765 
1766  Value *X = nullptr, *Y = nullptr;
1767  // A shift left or a logical shift right of a power of two is a power of two
1768  // or zero.
1769  if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
1770  match(V, m_LShr(m_Value(X), m_Value()))))
1771  return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
1772 
1773  if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
1774  return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
1775 
1776  if (const SelectInst *SI = dyn_cast<SelectInst>(V))
1777  return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
1778  isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
1779 
1780  if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
1781  // A power of two and'd with anything is a power of two or zero.
1782  if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
1783  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
1784  return true;
1785  // X & (-X) is always a power of two or zero.
1786  if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
1787  return true;
1788  return false;
1789  }
1790 
1791  // Adding a power-of-two or zero to the same power-of-two or zero yields
1792  // either the original power-of-two, a larger power-of-two or zero.
1793  if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
1794  const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
1795  if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
1796  Q.IIQ.hasNoSignedWrap(VOBO)) {
1797  if (match(X, m_And(m_Specific(Y), m_Value())) ||
1798  match(X, m_And(m_Value(), m_Specific(Y))))
1799  if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
1800  return true;
1801  if (match(Y, m_And(m_Specific(X), m_Value())) ||
1802  match(Y, m_And(m_Value(), m_Specific(X))))
1803  if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
1804  return true;
1805 
1806  unsigned BitWidth = V->getType()->getScalarSizeInBits();
1807  KnownBits LHSBits(BitWidth);
1808  computeKnownBits(X, LHSBits, Depth, Q);
1809 
1810  KnownBits RHSBits(BitWidth);
1811  computeKnownBits(Y, RHSBits, Depth, Q);
1812  // If i8 V is a power of two or zero:
1813  // ZeroBits: 1 1 1 0 1 1 1 1
1814  // ~ZeroBits: 0 0 0 1 0 0 0 0
1815  if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
1816  // If OrZero isn't set, we cannot give back a zero result.
1817  // Make sure either the LHS or RHS has a bit set.
1818  if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
1819  return true;
1820  }
1821  }
1822 
1823  // An exact divide or right shift can only shift off zero bits, so the result
1824  // is a power of two only if the first operand is a power of two and not
1825  // copying a sign bit (sdiv int_min, 2).
1826  if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
1827  match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
1828  return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
1829  Depth, Q);
1830  }
1831 
1832  return false;
1833 }
1834 
1835 /// Test whether a GEP's result is known to be non-null.
1836 ///
1837 /// Uses properties inherent in a GEP to try to determine whether it is known
1838 /// to be non-null.
1839 ///
1840 /// Currently this routine does not support vector GEPs.
1841 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
1842  const Query &Q) {
1843  const Function *F = nullptr;
1844  if (const Instruction *I = dyn_cast<Instruction>(GEP))
1845  F = I->getFunction();
1846 
1847  if (!GEP->isInBounds() ||
1849  return false;
1850 
1851  // FIXME: Support vector-GEPs.
1852  assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
1853 
1854  // If the base pointer is non-null, we cannot walk to a null address with an
1855  // inbounds GEP in address space zero.
1856  if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
1857  return true;
1858 
1859  // Walk the GEP operands and see if any operand introduces a non-zero offset.
1860  // If so, then the GEP cannot produce a null pointer, as doing so would
1861  // inherently violate the inbounds contract within address space zero.
1862  for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
1863  GTI != GTE; ++GTI) {
1864  // Struct types are easy -- they must always be indexed by a constant.
1865  if (StructType *STy = GTI.getStructTypeOrNull()) {
1866  ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
1867  unsigned ElementIdx = OpC->getZExtValue();
1868  const StructLayout *SL = Q.DL.getStructLayout(STy);
1869  uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
1870  if (ElementOffset > 0)
1871  return true;
1872  continue;
1873  }
1874 
1875  // If we have a zero-sized type, the index doesn't matter. Keep looping.
1876  if (Q.DL.getTypeAllocSize(GTI.getIndexedType()) == 0)
1877  continue;
1878 
1879  // Fast path the constant operand case both for efficiency and so we don't
1880  // increment Depth when just zipping down an all-constant GEP.
1881  if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
1882  if (!OpC->isZero())
1883  return true;
1884  continue;
1885  }
1886 
1887  // We post-increment Depth here because while isKnownNonZero increments it
1888  // as well, when we pop back up that increment won't persist. We don't want
1889  // to recurse 10k times just because we have 10k GEP operands. We don't
1890  // bail completely out because we want to handle constant GEPs regardless
1891  // of depth.
1892  if (Depth++ >= MaxDepth)
1893  continue;
1894 
1895  if (isKnownNonZero(GTI.getOperand(), Depth, Q))
1896  return true;
1897  }
1898 
1899  return false;
1900 }
1901 
1903  const Instruction *CtxI,
1904  const DominatorTree *DT) {
1905  assert(V->getType()->isPointerTy() && "V must be pointer type");
1906  assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
1907 
1908  if (!CtxI || !DT)
1909  return false;
1910 
1911  unsigned NumUsesExplored = 0;
1912  for (auto *U : V->users()) {
1913  // Avoid massive lists
1914  if (NumUsesExplored >= DomConditionsMaxUses)
1915  break;
1916  NumUsesExplored++;
1917 
1918  // If the value is used as an argument to a call or invoke, then argument
1919  // attributes may provide an answer about null-ness.
1920  if (auto CS = ImmutableCallSite(U))
1921  if (auto *CalledFunc = CS.getCalledFunction())
1922  for (const Argument &Arg : CalledFunc->args())
1923  if (CS.getArgOperand(Arg.getArgNo()) == V &&
1924  Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1925  return true;
1926 
1927  // Consider only compare instructions uniquely controlling a branch
1928  CmpInst::Predicate Pred;
1929  if (!match(const_cast<User *>(U),
1930  m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1931  (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1932  continue;
1933 
1936  for (auto *CmpU : U->users()) {
1937  assert(WorkList.empty() && "Should be!");
1938  if (Visited.insert(CmpU).second)
1939  WorkList.push_back(CmpU);
1940 
1941  while (!WorkList.empty()) {
1942  auto *Curr = WorkList.pop_back_val();
1943 
1944  // If a user is an AND, add all its users to the work list. We only
1945  // propagate "pred != null" condition through AND because it is only
1946  // correct to assume that all conditions of AND are met in true branch.
1947  // TODO: Support similar logic of OR and EQ predicate?
1948  if (Pred == ICmpInst::ICMP_NE)
1949  if (auto *BO = dyn_cast<BinaryOperator>(Curr))
1950  if (BO->getOpcode() == Instruction::And) {
1951  for (auto *BOU : BO->users())
1952  if (Visited.insert(BOU).second)
1953  WorkList.push_back(BOU);
1954  continue;
1955  }
1956 
1957  if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
1958  assert(BI->isConditional() && "uses a comparison!");
1959 
1960  BasicBlock *NonNullSuccessor =
1961  BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1962  BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1963  if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1964  return true;
1965  } else if (Pred == ICmpInst::ICMP_NE && isGuard(Curr) &&
1966  DT->dominates(cast<Instruction>(Curr), CtxI)) {
1967  return true;
1968  }
1969  }
1970  }
1971  }
1972 
1973  return false;
1974 }
1975 
1976 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
1977 /// ensure that the value it's attached to is never Value? 'RangeType' is
1978 /// is the type of the value described by the range.
1979 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
1980  const unsigned NumRanges = Ranges->getNumOperands() / 2;
1981  assert(NumRanges >= 1);
1982  for (unsigned i = 0; i < NumRanges; ++i) {
1983  ConstantInt *Lower =
1984  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
1985  ConstantInt *Upper =
1986  mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
1987  ConstantRange Range(Lower->getValue(), Upper->getValue());
1988  if (Range.contains(Value))
1989  return false;
1990  }
1991  return true;
1992 }
1993 
1994 /// Return true if the given value is known to be non-zero when defined. For
1995 /// vectors, return true if every element is known to be non-zero when
1996 /// defined. For pointers, if the context instruction and dominator tree are
1997 /// specified, perform context-sensitive analysis and return true if the
1998 /// pointer couldn't possibly be null at the specified instruction.
1999 /// Supports values with integer or pointer type and vectors of integers.
2000 bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
2001  if (auto *C = dyn_cast<Constant>(V)) {
2002  if (C->isNullValue())
2003  return false;
2004  if (isa<ConstantInt>(C))
2005  // Must be non-zero due to null test above.
2006  return true;
2007 
2008  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2009  // See the comment for IntToPtr/PtrToInt instructions below.
2010  if (CE->getOpcode() == Instruction::IntToPtr ||
2011  CE->getOpcode() == Instruction::PtrToInt)
2012  if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) <=
2013  Q.DL.getTypeSizeInBits(CE->getType()))
2014  return isKnownNonZero(CE->getOperand(0), Depth, Q);
2015  }
2016 
2017  // For constant vectors, check that all elements are undefined or known
2018  // non-zero to determine that the whole vector is known non-zero.
2019  if (auto *VecTy = dyn_cast<VectorType>(C->getType())) {
2020  for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2021  Constant *Elt = C->getAggregateElement(i);
2022  if (!Elt || Elt->isNullValue())
2023  return false;
2024  if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2025  return false;
2026  }
2027  return true;
2028  }
2029 
2030  // A global variable in address space 0 is non null unless extern weak
2031  // or an absolute symbol reference. Other address spaces may have null as a
2032  // valid address for a global, so we can't assume anything.
2033  if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2034  if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2035  GV->getType()->getAddressSpace() == 0)
2036  return true;
2037  } else
2038  return false;
2039  }
2040 
2041  if (auto *I = dyn_cast<Instruction>(V)) {
2042  if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2043  // If the possible ranges don't contain zero, then the value is
2044  // definitely non-zero.
2045  if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2046  const APInt ZeroValue(Ty->getBitWidth(), 0);
2047  if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2048  return true;
2049  }
2050  }
2051  }
2052 
2053  // Some of the tests below are recursive, so bail out if we hit the limit.
2054  if (Depth++ >= MaxDepth)
2055  return false;
2056 
2057  // Check for pointer simplifications.
2058  if (V->getType()->isPointerTy()) {
2059  // Alloca never returns null, malloc might.
2060  if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2061  return true;
2062 
2063  // A byval, inalloca, or nonnull argument is never null.
2064  if (const Argument *A = dyn_cast<Argument>(V))
2065  if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
2066  return true;
2067 
2068  // A Load tagged with nonnull metadata is never null.
2069  if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2070  if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2071  return true;
2072 
2073  if (const auto *Call = dyn_cast<CallBase>(V)) {
2074  if (Call->isReturnNonNull())
2075  return true;
2076  if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2077  return isKnownNonZero(RP, Depth, Q);
2078  }
2079  }
2080 
2081 
2082  // Check for recursive pointer simplifications.
2083  if (V->getType()->isPointerTy()) {
2084  if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2085  return true;
2086 
2087  // Look through bitcast operations, GEPs, and int2ptr instructions as they
2088  // do not alter the value, or at least not the nullness property of the
2089  // value, e.g., int2ptr is allowed to zero/sign extend the value.
2090  //
2091  // Note that we have to take special care to avoid looking through
2092  // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2093  // as casts that can alter the value, e.g., AddrSpaceCasts.
2094  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2095  if (isGEPKnownNonNull(GEP, Depth, Q))
2096  return true;
2097 
2098  if (auto *BCO = dyn_cast<BitCastOperator>(V))
2099  return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2100 
2101  if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2102  if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()) <=
2103  Q.DL.getTypeSizeInBits(I2P->getDestTy()))
2104  return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2105  }
2106 
2107  // Similar to int2ptr above, we can look through ptr2int here if the cast
2108  // is a no-op or an extend and not a truncate.
2109  if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2110  if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()) <=
2111  Q.DL.getTypeSizeInBits(P2I->getDestTy()))
2112  return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2113 
2114  unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2115 
2116  // X | Y != 0 if X != 0 or Y != 0.
2117  Value *X = nullptr, *Y = nullptr;
2118  if (match(V, m_Or(m_Value(X), m_Value(Y))))
2119  return isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q);
2120 
2121  // ext X != 0 if X != 0.
2122  if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2123  return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2124 
2125  // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2126  // if the lowest bit is shifted off the end.
2127  if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2128  // shl nuw can't remove any non-zero bits.
2129  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2130  if (Q.IIQ.hasNoUnsignedWrap(BO))
2131  return isKnownNonZero(X, Depth, Q);
2132 
2133  KnownBits Known(BitWidth);
2134  computeKnownBits(X, Known, Depth, Q);
2135  if (Known.One[0])
2136  return true;
2137  }
2138  // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2139  // defined if the sign bit is shifted off the end.
2140  else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2141  // shr exact can only shift out zero bits.
2142  const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2143  if (BO->isExact())
2144  return isKnownNonZero(X, Depth, Q);
2145 
2146  KnownBits Known = computeKnownBits(X, Depth, Q);
2147  if (Known.isNegative())
2148  return true;
2149 
2150  // If the shifter operand is a constant, and all of the bits shifted
2151  // out are known to be zero, and X is known non-zero then at least one
2152  // non-zero bit must remain.
2153  if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2154  auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2155  // Is there a known one in the portion not shifted out?
2156  if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2157  return true;
2158  // Are all the bits to be shifted out known zero?
2159  if (Known.countMinTrailingZeros() >= ShiftVal)
2160  return isKnownNonZero(X, Depth, Q);
2161  }
2162  }
2163  // div exact can only produce a zero if the dividend is zero.
2164  else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2165  return isKnownNonZero(X, Depth, Q);
2166  }
2167  // X + Y.
2168  else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2169  KnownBits XKnown = computeKnownBits(X, Depth, Q);
2170  KnownBits YKnown = computeKnownBits(Y, Depth, Q);
2171 
2172  // If X and Y are both non-negative (as signed values) then their sum is not
2173  // zero unless both X and Y are zero.
2174  if (XKnown.isNonNegative() && YKnown.isNonNegative())
2175  if (isKnownNonZero(X, Depth, Q) || isKnownNonZero(Y, Depth, Q))
2176  return true;
2177 
2178  // If X and Y are both negative (as signed values) then their sum is not
2179  // zero unless both X and Y equal INT_MIN.
2180  if (XKnown.isNegative() && YKnown.isNegative()) {
2181  APInt Mask = APInt::getSignedMaxValue(BitWidth);
2182  // The sign bit of X is set. If some other bit is set then X is not equal
2183  // to INT_MIN.
2184  if (XKnown.One.intersects(Mask))
2185  return true;
2186  // The sign bit of Y is set. If some other bit is set then Y is not equal
2187  // to INT_MIN.
2188  if (YKnown.One.intersects(Mask))
2189  return true;
2190  }
2191 
2192  // The sum of a non-negative number and a power of two is not zero.
2193  if (XKnown.isNonNegative() &&
2194  isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2195  return true;
2196  if (YKnown.isNonNegative() &&
2197  isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2198  return true;
2199  }
2200  // X * Y.
2201  else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2202  const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2203  // If X and Y are non-zero then so is X * Y as long as the multiplication
2204  // does not overflow.
2205  if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2206  isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
2207  return true;
2208  }
2209  // (C ? X : Y) != 0 if X != 0 and Y != 0.
2210  else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2211  if (isKnownNonZero(SI->getTrueValue(), Depth, Q) &&
2212  isKnownNonZero(SI->getFalseValue(), Depth, Q))
2213  return true;
2214  }
2215  // PHI
2216  else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2217  // Try and detect a recurrence that monotonically increases from a
2218  // starting value, as these are common as induction variables.
2219  if (PN->getNumIncomingValues() == 2) {
2220  Value *Start = PN->getIncomingValue(0);
2221  Value *Induction = PN->getIncomingValue(1);
2222  if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
2223  std::swap(Start, Induction);
2224  if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
2225  if (!C->isZero() && !C->isNegative()) {
2226  ConstantInt *X;
2227  if (Q.IIQ.UseInstrInfo &&
2228  (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
2229  match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
2230  !X->isNegative())
2231  return true;
2232  }
2233  }
2234  }
2235  // Check if all incoming values are non-zero constant.
2236  bool AllNonZeroConstants = llvm::all_of(PN->operands(), [](Value *V) {
2237  return isa<ConstantInt>(V) && !cast<ConstantInt>(V)->isZero();
2238  });
2239  if (AllNonZeroConstants)
2240  return true;
2241  }
2242 
2243  KnownBits Known(BitWidth);
2244  computeKnownBits(V, Known, Depth, Q);
2245  return Known.One != 0;
2246 }
2247 
2248 /// Return true if V2 == V1 + X, where X is known non-zero.
2249 static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) {
2250  const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2251  if (!BO || BO->getOpcode() != Instruction::Add)
2252  return false;
2253  Value *Op = nullptr;
2254  if (V2 == BO->getOperand(0))
2255  Op = BO->getOperand(1);
2256  else if (V2 == BO->getOperand(1))
2257  Op = BO->getOperand(0);
2258  else
2259  return false;
2260  return isKnownNonZero(Op, 0, Q);
2261 }
2262 
2263 /// Return true if it is known that V1 != V2.
2264 static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) {
2265  if (V1 == V2)
2266  return false;
2267  if (V1->getType() != V2->getType())
2268  // We can't look through casts yet.
2269  return false;
2270  if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q))
2271  return true;
2272 
2273  if (V1->getType()->isIntOrIntVectorTy()) {
2274  // Are any known bits in V1 contradictory to known bits in V2? If V1
2275  // has a known zero where V2 has a known one, they must not be equal.
2276  KnownBits Known1 = computeKnownBits(V1, 0, Q);
2277  KnownBits Known2 = computeKnownBits(V2, 0, Q);
2278 
2279  if (Known1.Zero.intersects(Known2.One) ||
2280  Known2.Zero.intersects(Known1.One))
2281  return true;
2282  }
2283  return false;
2284 }
2285 
2286 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2287 /// simplify operations downstream. Mask is known to be zero for bits that V
2288 /// cannot have.
2289 ///
2290 /// This function is defined on values with integer type, values with pointer
2291 /// type, and vectors of integers. In the case
2292 /// where V is a vector, the mask, known zero, and known one values are the
2293 /// same width as the vector element, and the bit is set only if it is true
2294 /// for all of the elements in the vector.
2295 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2296  const Query &Q) {
2297  KnownBits Known(Mask.getBitWidth());
2298  computeKnownBits(V, Known, Depth, Q);
2299  return Mask.isSubsetOf(Known.Zero);
2300 }
2301 
2302 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2303 // Returns the input and lower/upper bounds.
2304 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2305  const APInt *&CLow, const APInt *&CHigh) {
2306  assert(isa<Operator>(Select) &&
2307  cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2308  "Input should be a Select!");
2309 
2310  const Value *LHS = nullptr, *RHS = nullptr;
2311  SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2312  if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2313  return false;
2314 
2315  if (!match(RHS, m_APInt(CLow)))
2316  return false;
2317 
2318  const Value *LHS2 = nullptr, *RHS2 = nullptr;
2319  SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2320  if (getInverseMinMaxFlavor(SPF) != SPF2)
2321  return false;
2322 
2323  if (!match(RHS2, m_APInt(CHigh)))
2324  return false;
2325 
2326  if (SPF == SPF_SMIN)
2327  std::swap(CLow, CHigh);
2328 
2329  In = LHS2;
2330  return CLow->sle(*CHigh);
2331 }
2332 
2333 /// For vector constants, loop over the elements and find the constant with the
2334 /// minimum number of sign bits. Return 0 if the value is not a vector constant
2335 /// or if any element was not analyzed; otherwise, return the count for the
2336 /// element with the minimum number of sign bits.
2337 static unsigned computeNumSignBitsVectorConstant(const Value *V,
2338  unsigned TyBits) {
2339  const auto *CV = dyn_cast<Constant>(V);
2340  if (!CV || !CV->getType()->isVectorTy())
2341  return 0;
2342 
2343  unsigned MinSignBits = TyBits;
2344  unsigned NumElts = CV->getType()->getVectorNumElements();
2345  for (unsigned i = 0; i != NumElts; ++i) {
2346  // If we find a non-ConstantInt, bail out.
2347  auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
2348  if (!Elt)
2349  return 0;
2350 
2351  MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
2352  }
2353 
2354  return MinSignBits;
2355 }
2356 
2357 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2358  const Query &Q);
2359 
2360 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
2361  const Query &Q) {
2362  unsigned Result = ComputeNumSignBitsImpl(V, Depth, Q);
2363  assert(Result > 0 && "At least one sign bit needs to be present!");
2364  return Result;
2365 }
2366 
2367 /// Return the number of times the sign bit of the register is replicated into
2368 /// the other bits. We know that at least 1 bit is always equal to the sign bit
2369 /// (itself), but other cases can give us information. For example, immediately
2370 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
2371 /// other, so we return 3. For vectors, return the number of sign bits for the
2372 /// vector element with the minimum number of known sign bits.
2373 static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
2374  const Query &Q) {
2375  assert(Depth <= MaxDepth && "Limit Search Depth");
2376 
2377  // We return the minimum number of sign bits that are guaranteed to be present
2378  // in V, so for undef we have to conservatively return 1. We don't have the
2379  // same behavior for poison though -- that's a FIXME today.
2380 
2381  Type *ScalarTy = V->getType()->getScalarType();
2382  unsigned TyBits = ScalarTy->isPointerTy() ?
2383  Q.DL.getIndexTypeSizeInBits(ScalarTy) :
2384  Q.DL.getTypeSizeInBits(ScalarTy);
2385 
2386  unsigned Tmp, Tmp2;
2387  unsigned FirstAnswer = 1;
2388 
2389  // Note that ConstantInt is handled by the general computeKnownBits case
2390  // below.
2391 
2392  if (Depth == MaxDepth)
2393  return 1; // Limit search depth.
2394 
2395  if (auto *U = dyn_cast<Operator>(V)) {
2396  switch (Operator::getOpcode(V)) {
2397  default: break;
2398  case Instruction::SExt:
2399  Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
2400  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
2401 
2402  case Instruction::SDiv: {
2403  const APInt *Denominator;
2404  // sdiv X, C -> adds log(C) sign bits.
2405  if (match(U->getOperand(1), m_APInt(Denominator))) {
2406 
2407  // Ignore non-positive denominator.
2408  if (!Denominator->isStrictlyPositive())
2409  break;
2410 
2411  // Calculate the incoming numerator bits.
2412  unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2413 
2414  // Add floor(log(C)) bits to the numerator bits.
2415  return std::min(TyBits, NumBits + Denominator->logBase2());
2416  }
2417  break;
2418  }
2419 
2420  case Instruction::SRem: {
2421  const APInt *Denominator;
2422  // srem X, C -> we know that the result is within [-C+1,C) when C is a
2423  // positive constant. This let us put a lower bound on the number of sign
2424  // bits.
2425  if (match(U->getOperand(1), m_APInt(Denominator))) {
2426 
2427  // Ignore non-positive denominator.
2428  if (!Denominator->isStrictlyPositive())
2429  break;
2430 
2431  // Calculate the incoming numerator bits. SRem by a positive constant
2432  // can't lower the number of sign bits.
2433  unsigned NumrBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2434 
2435  // Calculate the leading sign bit constraints by examining the
2436  // denominator. Given that the denominator is positive, there are two
2437  // cases:
2438  //
2439  // 1. the numerator is positive. The result range is [0,C) and [0,C) u<
2440  // (1 << ceilLogBase2(C)).
2441  //
2442  // 2. the numerator is negative. Then the result range is (-C,0] and
2443  // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
2444  //
2445  // Thus a lower bound on the number of sign bits is `TyBits -
2446  // ceilLogBase2(C)`.
2447 
2448  unsigned ResBits = TyBits - Denominator->ceilLogBase2();
2449  return std::max(NumrBits, ResBits);
2450  }
2451  break;
2452  }
2453 
2454  case Instruction::AShr: {
2455  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2456  // ashr X, C -> adds C sign bits. Vectors too.
2457  const APInt *ShAmt;
2458  if (match(U->getOperand(1), m_APInt(ShAmt))) {
2459  if (ShAmt->uge(TyBits))
2460  break; // Bad shift.
2461  unsigned ShAmtLimited = ShAmt->getZExtValue();
2462  Tmp += ShAmtLimited;
2463  if (Tmp > TyBits) Tmp = TyBits;
2464  }
2465  return Tmp;
2466  }
2467  case Instruction::Shl: {
2468  const APInt *ShAmt;
2469  if (match(U->getOperand(1), m_APInt(ShAmt))) {
2470  // shl destroys sign bits.
2471  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2472  if (ShAmt->uge(TyBits) || // Bad shift.
2473  ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
2474  Tmp2 = ShAmt->getZExtValue();
2475  return Tmp - Tmp2;
2476  }
2477  break;
2478  }
2479  case Instruction::And:
2480  case Instruction::Or:
2481  case Instruction::Xor: // NOT is handled here.
2482  // Logical binary ops preserve the number of sign bits at the worst.
2483  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2484  if (Tmp != 1) {
2485  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2486  FirstAnswer = std::min(Tmp, Tmp2);
2487  // We computed what we know about the sign bits as our first
2488  // answer. Now proceed to the generic code that uses
2489  // computeKnownBits, and pick whichever answer is better.
2490  }
2491  break;
2492 
2493  case Instruction::Select: {
2494  // If we have a clamp pattern, we know that the number of sign bits will
2495  // be the minimum of the clamp min/max range.
2496  const Value *X;
2497  const APInt *CLow, *CHigh;
2498  if (isSignedMinMaxClamp(U, X, CLow, CHigh))
2499  return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
2500 
2501  Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2502  if (Tmp == 1) break;
2503  Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
2504  return std::min(Tmp, Tmp2);
2505  }
2506 
2507  case Instruction::Add:
2508  // Add can have at most one carry bit. Thus we know that the output
2509  // is, at worst, one more bit than the inputs.
2510  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2511  if (Tmp == 1) break;
2512 
2513  // Special case decrementing a value (ADD X, -1):
2514  if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
2515  if (CRHS->isAllOnesValue()) {
2516  KnownBits Known(TyBits);
2517  computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
2518 
2519  // If the input is known to be 0 or 1, the output is 0/-1, which is
2520  // all sign bits set.
2521  if ((Known.Zero | 1).isAllOnesValue())
2522  return TyBits;
2523 
2524  // If we are subtracting one from a positive number, there is no carry
2525  // out of the result.
2526  if (Known.isNonNegative())
2527  return Tmp;
2528  }
2529 
2530  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2531  if (Tmp2 == 1) break;
2532  return std::min(Tmp, Tmp2) - 1;
2533 
2534  case Instruction::Sub:
2535  Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2536  if (Tmp2 == 1) break;
2537 
2538  // Handle NEG.
2539  if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
2540  if (CLHS->isNullValue()) {
2541  KnownBits Known(TyBits);
2542  computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
2543  // If the input is known to be 0 or 1, the output is 0/-1, which is
2544  // all sign bits set.
2545  if ((Known.Zero | 1).isAllOnesValue())
2546  return TyBits;
2547 
2548  // If the input is known to be positive (the sign bit is known clear),
2549  // the output of the NEG has the same number of sign bits as the
2550  // input.
2551  if (Known.isNonNegative())
2552  return Tmp2;
2553 
2554  // Otherwise, we treat this like a SUB.
2555  }
2556 
2557  // Sub can have at most one carry bit. Thus we know that the output
2558  // is, at worst, one more bit than the inputs.
2559  Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2560  if (Tmp == 1) break;
2561  return std::min(Tmp, Tmp2) - 1;
2562 
2563  case Instruction::Mul: {
2564  // The output of the Mul can be at most twice the valid bits in the
2565  // inputs.
2566  unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2567  if (SignBitsOp0 == 1) break;
2568  unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
2569  if (SignBitsOp1 == 1) break;
2570  unsigned OutValidBits =
2571  (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
2572  return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
2573  }
2574 
2575  case Instruction::PHI: {
2576  const PHINode *PN = cast<PHINode>(U);
2577  unsigned NumIncomingValues = PN->getNumIncomingValues();
2578  // Don't analyze large in-degree PHIs.
2579  if (NumIncomingValues > 4) break;
2580  // Unreachable blocks may have zero-operand PHI nodes.
2581  if (NumIncomingValues == 0) break;
2582 
2583  // Take the minimum of all incoming values. This can't infinitely loop
2584  // because of our depth threshold.
2585  Tmp = ComputeNumSignBits(PN->getIncomingValue(0), Depth + 1, Q);
2586  for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
2587  if (Tmp == 1) return Tmp;
2588  Tmp = std::min(
2589  Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, Q));
2590  }
2591  return Tmp;
2592  }
2593 
2594  case Instruction::Trunc:
2595  // FIXME: it's tricky to do anything useful for this, but it is an
2596  // important case for targets like X86.
2597  break;
2598 
2599  case Instruction::ExtractElement:
2600  // Look through extract element. At the moment we keep this simple and
2601  // skip tracking the specific element. But at least we might find
2602  // information valid for all elements of the vector (for example if vector
2603  // is sign extended, shifted, etc).
2604  return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
2605 
2606  case Instruction::ShuffleVector: {
2607  // TODO: This is copied almost directly from the SelectionDAG version of
2608  // ComputeNumSignBits. It would be better if we could share common
2609  // code. If not, make sure that changes are translated to the DAG.
2610 
2611  // Collect the minimum number of sign bits that are shared by every vector
2612  // element referenced by the shuffle.
2613  auto *Shuf = cast<ShuffleVectorInst>(U);
2614  int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements();
2615  int NumMaskElts = Shuf->getMask()->getType()->getVectorNumElements();
2616  APInt DemandedLHS(NumElts, 0), DemandedRHS(NumElts, 0);
2617  for (int i = 0; i != NumMaskElts; ++i) {
2618  int M = Shuf->getMaskValue(i);
2619  assert(M < NumElts * 2 && "Invalid shuffle mask constant");
2620  // For undef elements, we don't know anything about the common state of
2621  // the shuffle result.
2622  if (M == -1)
2623  return 1;
2624  if (M < NumElts)
2625  DemandedLHS.setBit(M % NumElts);
2626  else
2627  DemandedRHS.setBit(M % NumElts);
2628  }
2630  if (!!DemandedLHS)
2631  Tmp = ComputeNumSignBits(Shuf->getOperand(0), Depth + 1, Q);
2632  if (!!DemandedRHS) {
2633  Tmp2 = ComputeNumSignBits(Shuf->getOperand(1), Depth + 1, Q);
2634  Tmp = std::min(Tmp, Tmp2);
2635  }
2636  // If we don't know anything, early out and try computeKnownBits
2637  // fall-back.
2638  if (Tmp == 1)
2639  break;
2640  assert(Tmp <= V->getType()->getScalarSizeInBits() &&
2641  "Failed to determine minimum sign bits");
2642  return Tmp;
2643  }
2644  }
2645  }
2646 
2647  // Finally, if we can prove that the top bits of the result are 0's or 1's,
2648  // use this information.
2649 
2650  // If we can examine all elements of a vector constant successfully, we're
2651  // done (we can't do any better than that). If not, keep trying.
2652  if (unsigned VecSignBits = computeNumSignBitsVectorConstant(V, TyBits))
2653  return VecSignBits;
2654 
2655  KnownBits Known(TyBits);
2656  computeKnownBits(V, Known, Depth, Q);
2657 
2658  // If we know that the sign bit is either zero or one, determine the number of
2659  // identical bits in the top of the input value.
2660  return std::max(FirstAnswer, Known.countMinSignBits());
2661 }
2662 
2663 /// This function computes the integer multiple of Base that equals V.
2664 /// If successful, it returns true and returns the multiple in
2665 /// Multiple. If unsuccessful, it returns false. It looks
2666 /// through SExt instructions only if LookThroughSExt is true.
2667 bool llvm::ComputeMultiple(Value *V, unsigned Base, Value *&Multiple,
2668  bool LookThroughSExt, unsigned Depth) {
2669  assert(V && "No Value?");
2670  assert(Depth <= MaxDepth && "Limit Search Depth");
2671  assert(V->getType()->isIntegerTy() && "Not integer or pointer type!");
2672 
2673  Type *T = V->getType();
2674 
2675  ConstantInt *CI = dyn_cast<ConstantInt>(V);
2676 
2677  if (Base == 0)
2678  return false;
2679 
2680  if (Base == 1) {
2681  Multiple = V;
2682  return true;
2683  }
2684 
2686  Constant *BaseVal = ConstantInt::get(T, Base);
2687  if (CO && CO == BaseVal) {
2688  // Multiple is 1.
2689  Multiple = ConstantInt::get(T, 1);
2690  return true;
2691  }
2692 
2693  if (CI && CI->getZExtValue() % Base == 0) {
2694  Multiple = ConstantInt::get(T, CI->getZExtValue() / Base);
2695  return true;
2696  }
2697 
2698  if (Depth == MaxDepth) return false; // Limit search depth.
2699 
2700  Operator *I = dyn_cast<Operator>(V);
2701  if (!I) return false;
2702 
2703  switch (I->getOpcode()) {
2704  default: break;
2705  case Instruction::SExt:
2706  if (!LookThroughSExt) return false;
2707  // otherwise fall through to ZExt
2709  case Instruction::ZExt:
2710  return ComputeMultiple(I->getOperand(0), Base, Multiple,
2711  LookThroughSExt, Depth+1);
2712  case Instruction::Shl:
2713  case Instruction::Mul: {
2714  Value *Op0 = I->getOperand(0);
2715  Value *Op1 = I->getOperand(1);
2716 
2717  if (I->getOpcode() == Instruction::Shl) {
2718  ConstantInt *Op1CI = dyn_cast<ConstantInt>(Op1);
2719  if (!Op1CI) return false;
2720  // Turn Op0 << Op1 into Op0 * 2^Op1
2721  APInt Op1Int = Op1CI->getValue();
2722  uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
2723  APInt API(Op1Int.getBitWidth(), 0);
2724  API.setBit(BitToSet);
2725  Op1 = ConstantInt::get(V->getContext(), API);
2726  }
2727 
2728  Value *Mul0 = nullptr;
2729  if (ComputeMultiple(Op0, Base, Mul0, LookThroughSExt, Depth+1)) {
2730  if (Constant *Op1C = dyn_cast<Constant>(Op1))
2731  if (Constant *MulC = dyn_cast<Constant>(Mul0)) {
2732  if (Op1C->getType()->getPrimitiveSizeInBits() <
2733  MulC->getType()->getPrimitiveSizeInBits())
2734  Op1C = ConstantExpr::getZExt(Op1C, MulC->getType());
2735  if (Op1C->getType()->getPrimitiveSizeInBits() >
2736  MulC->getType()->getPrimitiveSizeInBits())
2737  MulC = ConstantExpr::getZExt(MulC, Op1C->getType());
2738 
2739  // V == Base * (Mul0 * Op1), so return (Mul0 * Op1)
2740  Multiple = ConstantExpr::getMul(MulC, Op1C);
2741  return true;
2742  }
2743 
2744  if (ConstantInt *Mul0CI = dyn_cast<ConstantInt>(Mul0))
2745  if (Mul0CI->getValue() == 1) {
2746  // V == Base * Op1, so return Op1
2747  Multiple = Op1;
2748  return true;
2749  }
2750  }
2751 
2752  Value *Mul1 = nullptr;
2753  if (ComputeMultiple(Op1, Base, Mul1, LookThroughSExt, Depth+1)) {
2754  if (Constant *Op0C = dyn_cast<Constant>(Op0))
2755  if (Constant *MulC = dyn_cast<Constant>(Mul1)) {
2756  if (Op0C->getType()->getPrimitiveSizeInBits() <
2757  MulC->getType()->getPrimitiveSizeInBits())
2758  Op0C = ConstantExpr::getZExt(Op0C, MulC->getType());
2759  if (Op0C->getType()->getPrimitiveSizeInBits() >
2760  MulC->getType()->getPrimitiveSizeInBits())
2761  MulC = ConstantExpr::getZExt(MulC, Op0C->getType());
2762 
2763  // V == Base * (Mul1 * Op0), so return (Mul1 * Op0)
2764  Multiple = ConstantExpr::getMul(MulC, Op0C);
2765  return true;
2766  }
2767 
2768  if (ConstantInt *Mul1CI = dyn_cast<ConstantInt>(Mul1))
2769  if (Mul1CI->getValue() == 1) {
2770  // V == Base * Op0, so return Op0
2771  Multiple = Op0;
2772  return true;
2773  }
2774  }
2775  }
2776  }
2777 
2778  // We could not determine if V is a multiple of Base.
2779  return false;
2780 }
2781 
2783  const TargetLibraryInfo *TLI) {
2784  const Function *F = ICS.getCalledFunction();
2785  if (!F)
2786  return Intrinsic::not_intrinsic;
2787 
2788  if (F->isIntrinsic())
2789  return F->getIntrinsicID();
2790 
2791  if (!TLI)
2792  return Intrinsic::not_intrinsic;
2793 
2794  LibFunc Func;
2795  // We're going to make assumptions on the semantics of the functions, check
2796  // that the target knows that it's available in this environment and it does
2797  // not have local linkage.
2798  if (!F || F->hasLocalLinkage() || !TLI->getLibFunc(*F, Func))
2799  return Intrinsic::not_intrinsic;
2800 
2801  if (!ICS.onlyReadsMemory())
2802  return Intrinsic::not_intrinsic;
2803 
2804  // Otherwise check if we have a call to a function that can be turned into a
2805  // vector intrinsic.
2806  switch (Func) {
2807  default:
2808  break;
2809  case LibFunc_sin:
2810  case LibFunc_sinf:
2811  case LibFunc_sinl:
2812  return Intrinsic::sin;
2813  case LibFunc_cos:
2814  case LibFunc_cosf:
2815  case LibFunc_cosl:
2816  return Intrinsic::cos;
2817  case LibFunc_exp:
2818  case LibFunc_expf:
2819  case LibFunc_expl:
2820  return Intrinsic::exp;
2821  case LibFunc_exp2:
2822  case LibFunc_exp2f:
2823  case LibFunc_exp2l:
2824  return Intrinsic::exp2;
2825  case LibFunc_log:
2826  case LibFunc_logf:
2827  case LibFunc_logl:
2828  return Intrinsic::log;
2829  case LibFunc_log10:
2830  case LibFunc_log10f:
2831  case LibFunc_log10l:
2832  return Intrinsic::log10;
2833  case LibFunc_log2:
2834  case LibFunc_log2f:
2835  case LibFunc_log2l:
2836  return Intrinsic::log2;
2837  case LibFunc_fabs:
2838  case LibFunc_fabsf:
2839  case LibFunc_fabsl:
2840  return Intrinsic::fabs;
2841  case LibFunc_fmin:
2842  case LibFunc_fminf:
2843  case LibFunc_fminl:
2844  return Intrinsic::minnum;
2845  case LibFunc_fmax:
2846  case LibFunc_fmaxf:
2847  case LibFunc_fmaxl:
2848  return Intrinsic::maxnum;
2849  case LibFunc_copysign:
2850  case LibFunc_copysignf:
2851  case LibFunc_copysignl:
2852  return Intrinsic::copysign;
2853  case LibFunc_floor:
2854  case LibFunc_floorf:
2855  case LibFunc_floorl:
2856  return Intrinsic::floor;
2857  case LibFunc_ceil:
2858  case LibFunc_ceilf:
2859  case LibFunc_ceill:
2860  return Intrinsic::ceil;
2861  case LibFunc_trunc:
2862  case LibFunc_truncf:
2863  case LibFunc_truncl:
2864  return Intrinsic::trunc;
2865  case LibFunc_rint:
2866  case LibFunc_rintf:
2867  case LibFunc_rintl:
2868  return Intrinsic::rint;
2869  case LibFunc_nearbyint:
2870  case LibFunc_nearbyintf:
2871  case LibFunc_nearbyintl:
2872  return Intrinsic::nearbyint;
2873  case LibFunc_round:
2874  case LibFunc_roundf:
2875  case LibFunc_roundl:
2876  return Intrinsic::round;
2877  case LibFunc_pow:
2878  case LibFunc_powf:
2879  case LibFunc_powl:
2880  return Intrinsic::pow;
2881  case LibFunc_sqrt:
2882  case LibFunc_sqrtf:
2883  case LibFunc_sqrtl:
2884  return Intrinsic::sqrt;
2885  }
2886 
2887  return Intrinsic::not_intrinsic;
2888 }
2889 
2890 /// Return true if we can prove that the specified FP value is never equal to
2891 /// -0.0.
2892 ///
2893 /// NOTE: this function will need to be revisited when we support non-default
2894 /// rounding modes!
2896  unsigned Depth) {
2897  if (auto *CFP = dyn_cast<ConstantFP>(V))
2898  return !CFP->getValueAPF().isNegZero();
2899 
2900  // Limit search depth.
2901  if (Depth == MaxDepth)
2902  return false;
2903 
2904  auto *Op = dyn_cast<Operator>(V);
2905  if (!Op)
2906  return false;
2907 
2908  // Check if the nsz fast-math flag is set.
2909  if (auto *FPO = dyn_cast<FPMathOperator>(Op))
2910  if (FPO->hasNoSignedZeros())
2911  return true;
2912 
2913  // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
2914  if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
2915  return true;
2916 
2917  // sitofp and uitofp turn into +0.0 for zero.
2918  if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
2919  return true;
2920 
2921  if (auto *Call = dyn_cast<CallInst>(Op)) {
2922  Intrinsic::ID IID = getIntrinsicForCallSite(Call, TLI);
2923  switch (IID) {
2924  default:
2925  break;
2926  // sqrt(-0.0) = -0.0, no other negative results are possible.
2927  case Intrinsic::sqrt:
2928  case Intrinsic::canonicalize:
2929  return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
2930  // fabs(x) != -0.0
2931  case Intrinsic::fabs:
2932  return true;
2933  }
2934  }
2935 
2936  return false;
2937 }
2938 
2939 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
2940 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
2941 /// bit despite comparing equal.
2943  const TargetLibraryInfo *TLI,
2944  bool SignBitOnly,
2945  unsigned Depth) {
2946  // TODO: This function does not do the right thing when SignBitOnly is true
2947  // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
2948  // which flips the sign bits of NaNs. See
2949  // https://llvm.org/bugs/show_bug.cgi?id=31702.
2950 
2951  if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
2952  return !CFP->getValueAPF().isNegative() ||
2953  (!SignBitOnly && CFP->getValueAPF().isZero());
2954  }
2955 
2956  // Handle vector of constants.
2957  if (auto *CV = dyn_cast<Constant>(V)) {
2958  if (CV->getType()->isVectorTy()) {
2959  unsigned NumElts = CV->getType()->getVectorNumElements();
2960  for (unsigned i = 0; i != NumElts; ++i) {
2961  auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
2962  if (!CFP)
2963  return false;
2964  if (CFP->getValueAPF().isNegative() &&
2965  (SignBitOnly || !CFP->getValueAPF().isZero()))
2966  return false;
2967  }
2968 
2969  // All non-negative ConstantFPs.
2970  return true;
2971  }
2972  }
2973 
2974  if (Depth == MaxDepth)
2975  return false; // Limit search depth.
2976 
2977  const Operator *I = dyn_cast<Operator>(V);
2978  if (!I)
2979  return false;
2980 
2981  switch (I->getOpcode()) {
2982  default:
2983  break;
2984  // Unsigned integers are always nonnegative.
2985  case Instruction::UIToFP:
2986  return true;
2987  case Instruction::FMul:
2988  // x*x is always non-negative or a NaN.
2989  if (I->getOperand(0) == I->getOperand(1) &&
2990  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
2991  return true;
2992 
2994  case Instruction::FAdd:
2995  case Instruction::FDiv:
2996  case Instruction::FRem:
2997  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
2998  Depth + 1) &&
2999  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3000  Depth + 1);
3001  case Instruction::Select:
3002  return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3003  Depth + 1) &&
3004  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3005  Depth + 1);
3006  case Instruction::FPExt:
3007  case Instruction::FPTrunc:
3008  // Widening/narrowing never change sign.
3009  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3010  Depth + 1);
3011  case Instruction::ExtractElement:
3012  // Look through extract element. At the moment we keep this simple and skip
3013  // tracking the specific element. But at least we might find information
3014  // valid for all elements of the vector.
3015  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3016  Depth + 1);
3017  case Instruction::Call:
3018  const auto *CI = cast<CallInst>(I);
3019  Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
3020  switch (IID) {
3021  default:
3022  break;
3023  case Intrinsic::maxnum:
3024  return (isKnownNeverNaN(I->getOperand(0), TLI) &&
3026  SignBitOnly, Depth + 1)) ||
3027  (isKnownNeverNaN(I->getOperand(1), TLI) &&
3029  SignBitOnly, Depth + 1));
3030 
3031  case Intrinsic::maximum:
3032  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3033  Depth + 1) ||
3034  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3035  Depth + 1);
3036  case Intrinsic::minnum:
3037  case Intrinsic::minimum:
3038  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3039  Depth + 1) &&
3040  cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3041  Depth + 1);
3042  case Intrinsic::exp:
3043  case Intrinsic::exp2:
3044  case Intrinsic::fabs:
3045  return true;
3046 
3047  case Intrinsic::sqrt:
3048  // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3049  if (!SignBitOnly)
3050  return true;
3051  return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3052  CannotBeNegativeZero(CI->getOperand(0), TLI));
3053 
3054  case Intrinsic::powi:
3055  if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3056  // powi(x,n) is non-negative if n is even.
3057  if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3058  return true;
3059  }
3060  // TODO: This is not correct. Given that exp is an integer, here are the
3061  // ways that pow can return a negative value:
3062  //
3063  // pow(x, exp) --> negative if exp is odd and x is negative.
3064  // pow(-0, exp) --> -inf if exp is negative odd.
3065  // pow(-0, exp) --> -0 if exp is positive odd.
3066  // pow(-inf, exp) --> -0 if exp is negative odd.
3067  // pow(-inf, exp) --> -inf if exp is positive odd.
3068  //
3069  // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3070  // but we must return false if x == -0. Unfortunately we do not currently
3071  // have a way of expressing this constraint. See details in
3072  // https://llvm.org/bugs/show_bug.cgi?id=31702.
3073  return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3074  Depth + 1);
3075 
3076  case Intrinsic::fma:
3077  case Intrinsic::fmuladd:
3078  // x*x+y is non-negative if y is non-negative.
3079  return I->getOperand(0) == I->getOperand(1) &&
3080  (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3081  cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3082  Depth + 1);
3083  }
3084  break;
3085  }
3086  return false;
3087 }
3088 
3090  const TargetLibraryInfo *TLI) {
3091  return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3092 }
3093 
3095  return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3096 }
3097 
3099  unsigned Depth) {
3100  assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3101 
3102  // If we're told that NaNs won't happen, assume they won't.
3103  if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3104  if (FPMathOp->hasNoNaNs())
3105  return true;
3106 
3107  // Handle scalar constants.
3108  if (auto *CFP = dyn_cast<ConstantFP>(V))
3109  return !CFP->isNaN();
3110 
3111  if (Depth == MaxDepth)
3112  return false;
3113 
3114  if (auto *Inst = dyn_cast<Instruction>(V)) {
3115  switch (Inst->getOpcode()) {
3116  case Instruction::FAdd:
3117  case Instruction::FMul:
3118  case Instruction::FSub:
3119  case Instruction::FDiv:
3120  case Instruction::FRem: {
3121  // TODO: Need isKnownNeverInfinity
3122  return false;
3123  }
3124  case Instruction::Select: {
3125  return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3126  isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3127  }
3128  case Instruction::SIToFP:
3129  case Instruction::UIToFP:
3130  return true;
3131  case Instruction::FPTrunc:
3132  case Instruction::FPExt:
3133  return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3134  default:
3135  break;
3136  }
3137  }
3138 
3139  if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3140  switch (II->getIntrinsicID()) {
3141  case Intrinsic::canonicalize:
3142  case Intrinsic::fabs:
3143  case Intrinsic::copysign:
3144  case Intrinsic::exp:
3145  case Intrinsic::exp2:
3146  case Intrinsic::floor:
3147  case Intrinsic::ceil:
3148  case Intrinsic::trunc:
3149  case Intrinsic::rint:
3150  case Intrinsic::nearbyint:
3151  case Intrinsic::round:
3152  return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3153  case Intrinsic::sqrt:
3154  return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3155  CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3156  case Intrinsic::minnum:
3157  case Intrinsic::maxnum:
3158  // If either operand is not NaN, the result is not NaN.
3159  return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3160  isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3161  default:
3162  return false;
3163  }
3164  }
3165 
3166  // Bail out for constant expressions, but try to handle vector constants.
3167  if (!V->getType()->isVectorTy() || !isa<Constant>(V))
3168  return false;
3169 
3170  // For vectors, verify that each element is not NaN.
3171  unsigned NumElts = V->getType()->getVectorNumElements();
3172  for (unsigned i = 0; i != NumElts; ++i) {
3173  Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3174  if (!Elt)
3175  return false;
3176  if (isa<UndefValue>(Elt))
3177  continue;
3178  auto *CElt = dyn_cast<ConstantFP>(Elt);
3179  if (!CElt || CElt->isNaN())
3180  return false;
3181  }
3182  // All elements were confirmed not-NaN or undefined.
3183  return true;
3184 }
3185 
3187 
3188  // All byte-wide stores are splatable, even of arbitrary variables.
3189  if (V->getType()->isIntegerTy(8))
3190  return V;
3191 
3192  LLVMContext &Ctx = V->getContext();
3193 
3194  // Undef don't care.
3195  auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3196  if (isa<UndefValue>(V))
3197  return UndefInt8;
3198 
3199  const uint64_t Size = DL.getTypeStoreSize(V->getType());
3200  if (!Size)
3201  return UndefInt8;
3202 
3203  Constant *C = dyn_cast<Constant>(V);
3204  if (!C) {
3205  // Conceptually, we could handle things like:
3206  // %a = zext i8 %X to i16
3207  // %b = shl i16 %a, 8
3208  // %c = or i16 %a, %b
3209  // but until there is an example that actually needs this, it doesn't seem
3210  // worth worrying about.
3211  return nullptr;
3212  }
3213 
3214  // Handle 'null' ConstantArrayZero etc.
3215  if (C->isNullValue())
3217 
3218  // Constant floating-point values can be handled as integer values if the
3219  // corresponding integer value is "byteable". An important case is 0.0.
3220  if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3221  Type *Ty = nullptr;
3222  if (CFP->getType()->isHalfTy())
3223  Ty = Type::getInt16Ty(Ctx);
3224  else if (CFP->getType()->isFloatTy())
3225  Ty = Type::getInt32Ty(Ctx);
3226  else if (CFP->getType()->isDoubleTy())
3227  Ty = Type::getInt64Ty(Ctx);
3228  // Don't handle long double formats, which have strange constraints.
3229  return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3230  : nullptr;
3231  }
3232 
3233  // We can handle constant integers that are multiple of 8 bits.
3234  if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3235  if (CI->getBitWidth() % 8 == 0) {
3236  assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3237  if (!CI->getValue().isSplat(8))
3238  return nullptr;
3239  return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3240  }
3241  }
3242 
3243  if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3244  if (CE->getOpcode() == Instruction::IntToPtr) {
3245  auto PS = DL.getPointerSizeInBits(
3246  cast<PointerType>(CE->getType())->getAddressSpace());
3247  return isBytewiseValue(
3248  ConstantExpr::getIntegerCast(CE->getOperand(0),
3249  Type::getIntNTy(Ctx, PS), false),
3250  DL);
3251  }
3252  }
3253 
3254  auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3255  if (LHS == RHS)
3256  return LHS;
3257  if (!LHS || !RHS)
3258  return nullptr;
3259  if (LHS == UndefInt8)
3260  return RHS;
3261  if (RHS == UndefInt8)
3262  return LHS;
3263  return nullptr;
3264  };
3265 
3266  if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3267  Value *Val = UndefInt8;
3268  for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3269  if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3270  return nullptr;
3271  return Val;
3272  }
3273 
3274  if (isa<ConstantAggregate>(C)) {
3275  Value *Val = UndefInt8;
3276  for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3277  if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3278  return nullptr;
3279  return Val;
3280  }
3281 
3282  // Don't try to handle the handful of other constants.
3283  return nullptr;
3284 }
3285 
3286 // This is the recursive version of BuildSubAggregate. It takes a few different
3287 // arguments. Idxs is the index within the nested struct From that we are
3288 // looking at now (which is of type IndexedType). IdxSkip is the number of
3289 // indices from Idxs that should be left out when inserting into the resulting
3290 // struct. To is the result struct built so far, new insertvalue instructions
3291 // build on that.
3292 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
3294  unsigned IdxSkip,
3295  Instruction *InsertBefore) {
3296  StructType *STy = dyn_cast<StructType>(IndexedType);
3297  if (STy) {
3298  // Save the original To argument so we can modify it
3299  Value *OrigTo = To;
3300  // General case, the type indexed by Idxs is a struct
3301  for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3302  // Process each struct element recursively
3303  Idxs.push_back(i);
3304  Value *PrevTo = To;
3305  To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
3306  InsertBefore);
3307  Idxs.pop_back();
3308  if (!To) {
3309  // Couldn't find any inserted value for this index? Cleanup
3310  while (PrevTo != OrigTo) {
3311  InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
3312  PrevTo = Del->getAggregateOperand();
3313  Del->eraseFromParent();
3314  }
3315  // Stop processing elements
3316  break;
3317  }
3318  }
3319  // If we successfully found a value for each of our subaggregates
3320  if (To)
3321  return To;
3322  }
3323  // Base case, the type indexed by SourceIdxs is not a struct, or not all of
3324  // the struct's elements had a value that was inserted directly. In the latter
3325  // case, perhaps we can't determine each of the subelements individually, but
3326  // we might be able to find the complete struct somewhere.
3327 
3328  // Find the value that is at that particular spot
3329  Value *V = FindInsertedValue(From, Idxs);
3330 
3331  if (!V)
3332  return nullptr;
3333 
3334  // Insert the value in the new (sub) aggregate
3335  return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
3336  "tmp", InsertBefore);
3337 }
3338 
3339 // This helper takes a nested struct and extracts a part of it (which is again a
3340 // struct) into a new value. For example, given the struct:
3341 // { a, { b, { c, d }, e } }
3342 // and the indices "1, 1" this returns
3343 // { c, d }.
3344 //
3345 // It does this by inserting an insertvalue for each element in the resulting
3346 // struct, as opposed to just inserting a single struct. This will only work if
3347 // each of the elements of the substruct are known (ie, inserted into From by an
3348 // insertvalue instruction somewhere).
3349 //
3350 // All inserted insertvalue instructions are inserted before InsertBefore
3352  Instruction *InsertBefore) {
3353  assert(InsertBefore && "Must have someplace to insert!");
3354  Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
3355  idx_range);
3356  Value *To = UndefValue::get(IndexedType);
3357  SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
3358  unsigned IdxSkip = Idxs.size();
3359 
3360  return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
3361 }
3362 
3363 /// Given an aggregate and a sequence of indices, see if the scalar value
3364 /// indexed is already around as a register, for example if it was inserted
3365 /// directly into the aggregate.
3366 ///
3367 /// If InsertBefore is not null, this function will duplicate (modified)
3368 /// insertvalues when a part of a nested struct is extracted.
3370  Instruction *InsertBefore) {
3371  // Nothing to index? Just return V then (this is useful at the end of our
3372  // recursion).
3373  if (idx_range.empty())
3374  return V;
3375  // We have indices, so V should have an indexable type.
3376  assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
3377  "Not looking at a struct or array?");
3378  assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
3379  "Invalid indices for type?");
3380 
3381  if (Constant *C = dyn_cast<Constant>(V)) {
3382  C = C->getAggregateElement(idx_range[0]);
3383  if (!C) return nullptr;
3384  return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
3385  }
3386 
3387  if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
3388  // Loop the indices for the insertvalue instruction in parallel with the
3389  // requested indices
3390  const unsigned *req_idx = idx_range.begin();
3391  for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
3392  i != e; ++i, ++req_idx) {
3393  if (req_idx == idx_range.end()) {
3394  // We can't handle this without inserting insertvalues
3395  if (!InsertBefore)
3396  return nullptr;
3397 
3398  // The requested index identifies a part of a nested aggregate. Handle
3399  // this specially. For example,
3400  // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
3401  // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
3402  // %C = extractvalue {i32, { i32, i32 } } %B, 1
3403  // This can be changed into
3404  // %A = insertvalue {i32, i32 } undef, i32 10, 0
3405  // %C = insertvalue {i32, i32 } %A, i32 11, 1
3406  // which allows the unused 0,0 element from the nested struct to be
3407  // removed.
3408  return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
3409  InsertBefore);
3410  }
3411 
3412  // This insert value inserts something else than what we are looking for.
3413  // See if the (aggregate) value inserted into has the value we are
3414  // looking for, then.
3415  if (*req_idx != *i)
3416  return FindInsertedValue(I->getAggregateOperand(), idx_range,
3417  InsertBefore);
3418  }
3419  // If we end up here, the indices of the insertvalue match with those
3420  // requested (though possibly only partially). Now we recursively look at
3421  // the inserted value, passing any remaining indices.
3422  return FindInsertedValue(I->getInsertedValueOperand(),
3423  makeArrayRef(req_idx, idx_range.end()),
3424  InsertBefore);
3425  }
3426 
3427  if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
3428  // If we're extracting a value from an aggregate that was extracted from
3429  // something else, we can extract from that something else directly instead.
3430  // However, we will need to chain I's indices with the requested indices.
3431 
3432  // Calculate the number of indices required
3433  unsigned size = I->getNumIndices() + idx_range.size();
3434  // Allocate some space to put the new indices in
3436  Idxs.reserve(size);
3437  // Add indices from the extract value instruction
3438  Idxs.append(I->idx_begin(), I->idx_end());
3439 
3440  // Add requested indices
3441  Idxs.append(idx_range.begin(), idx_range.end());
3442 
3443  assert(Idxs.size() == size
3444  && "Number of indices added not correct?");
3445 
3446  return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
3447  }
3448  // Otherwise, we don't know (such as, extracting from a function return value
3449  // or load instruction)
3450  return nullptr;
3451 }
3452 
3454  unsigned CharSize) {
3455  // Make sure the GEP has exactly three arguments.
3456  if (GEP->getNumOperands() != 3)
3457  return false;
3458 
3459  // Make sure the index-ee is a pointer to array of \p CharSize integers.
3460  // CharSize.
3462  if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
3463  return false;
3464 
3465  // Check to make sure that the first operand of the GEP is an integer and
3466  // has value 0 so that we are sure we're indexing into the initializer.
3467  const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
3468  if (!FirstIdx || !FirstIdx->isZero())
3469  return false;
3470 
3471  return true;
3472 }
3473 
3475  ConstantDataArraySlice &Slice,
3476  unsigned ElementSize, uint64_t Offset) {
3477  assert(V);
3478 
3479  // Look through bitcast instructions and geps.
3480  V = V->stripPointerCasts();
3481 
3482  // If the value is a GEP instruction or constant expression, treat it as an
3483  // offset.
3484  if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3485  // The GEP operator should be based on a pointer to string constant, and is
3486  // indexing into the string constant.
3487  if (!isGEPBasedOnPointerToString(GEP, ElementSize))
3488  return false;
3489 
3490  // If the second index isn't a ConstantInt, then this is a variable index
3491  // into the array. If this occurs, we can't say anything meaningful about
3492  // the string.
3493  uint64_t StartIdx = 0;
3494  if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
3495  StartIdx = CI->getZExtValue();
3496  else
3497  return false;
3498  return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
3499  StartIdx + Offset);
3500  }
3501 
3502  // The GEP instruction, constant or instruction, must reference a global
3503  // variable that is a constant and is initialized. The referenced constant
3504  // initializer is the array that we'll use for optimization.
3505  const GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
3506  if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
3507  return false;
3508 
3509  const ConstantDataArray *Array;
3510  ArrayType *ArrayTy;
3511  if (GV->getInitializer()->isNullValue()) {
3512  Type *GVTy = GV->getValueType();
3513  if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
3514  // A zeroinitializer for the array; there is no ConstantDataArray.
3515  Array = nullptr;
3516  } else {
3517  const DataLayout &DL = GV->getParent()->getDataLayout();
3518  uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy);
3519  uint64_t Length = SizeInBytes / (ElementSize / 8);
3520  if (Length <= Offset)
3521  return false;
3522 
3523  Slice.Array = nullptr;
3524  Slice.Offset = 0;
3525  Slice.Length = Length - Offset;
3526  return true;
3527  }
3528  } else {
3529  // This must be a ConstantDataArray.
3530  Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
3531  if (!Array)
3532  return false;
3533  ArrayTy = Array->getType();
3534  }
3535  if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
3536  return false;
3537 
3538  uint64_t NumElts = ArrayTy->getArrayNumElements();
3539  if (Offset > NumElts)
3540  return false;
3541 
3542  Slice.Array = Array;
3543  Slice.Offset = Offset;
3544  Slice.Length = NumElts - Offset;
3545  return true;
3546 }
3547 
3548 /// This function computes the length of a null-terminated C string pointed to
3549 /// by V. If successful, it returns true and returns the string in Str.
3550 /// If unsuccessful, it returns false.
3552  uint64_t Offset, bool TrimAtNul) {
3553  ConstantDataArraySlice Slice;
3554  if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
3555  return false;
3556 
3557  if (Slice.Array == nullptr) {
3558  if (TrimAtNul) {
3559  Str = StringRef();
3560  return true;
3561  }
3562  if (Slice.Length == 1) {
3563  Str = StringRef("", 1);
3564  return true;
3565  }
3566  // We cannot instantiate a StringRef as we do not have an appropriate string
3567  // of 0s at hand.
3568  return false;
3569  }
3570 
3571  // Start out with the entire array in the StringRef.
3572  Str = Slice.Array->getAsString();
3573  // Skip over 'offset' bytes.
3574  Str = Str.substr(Slice.Offset);
3575 
3576  if (TrimAtNul) {
3577  // Trim off the \0 and anything after it. If the array is not nul
3578  // terminated, we just return the whole end of string. The client may know
3579  // some other way that the string is length-bound.
3580  Str = Str.substr(0, Str.find('\0'));
3581  }
3582  return true;
3583 }
3584 
3585 // These next two are very similar to the above, but also look through PHI
3586 // nodes.
3587 // TODO: See if we can integrate these two together.
3588 
3589 /// If we can compute the length of the string pointed to by
3590 /// the specified pointer, return 'len+1'. If we can't, return 0.
3591 static uint64_t GetStringLengthH(const Value *V,
3593  unsigned CharSize) {
3594  // Look through noop bitcast instructions.
3595  V = V->stripPointerCasts();
3596 
3597  // If this is a PHI node, there are two cases: either we have already seen it
3598  // or we haven't.
3599  if (const PHINode *PN = dyn_cast<PHINode>(V)) {
3600  if (!PHIs.insert(PN).second)
3601  return ~0ULL; // already in the set.
3602 
3603  // If it was new, see if all the input strings are the same length.
3604  uint64_t LenSoFar = ~0ULL;
3605  for (Value *IncValue : PN->incoming_values()) {
3606  uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
3607  if (Len == 0) return 0; // Unknown length -> unknown.
3608 
3609  if (Len == ~0ULL) continue;
3610 
3611  if (Len != LenSoFar && LenSoFar != ~0ULL)
3612  return 0; // Disagree -> unknown.
3613  LenSoFar = Len;
3614  }
3615 
3616  // Success, all agree.
3617  return LenSoFar;
3618  }
3619 
3620  // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
3621  if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
3622  uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
3623  if (Len1 == 0) return 0;
3624  uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
3625  if (Len2 == 0) return 0;
3626  if (Len1 == ~0ULL) return Len2;
3627  if (Len2 == ~0ULL) return Len1;
3628  if (Len1 != Len2) return 0;
3629  return Len1;
3630  }
3631 
3632  // Otherwise, see if we can read the string.
3633  ConstantDataArraySlice Slice;
3634  if (!getConstantDataArrayInfo(V, Slice, CharSize))
3635  return 0;
3636 
3637  if (Slice.Array == nullptr)
3638  return 1;
3639 
3640  // Search for nul characters
3641  unsigned NullIndex = 0;
3642  for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
3643  if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
3644  break;
3645  }
3646 
3647  return NullIndex + 1;
3648 }
3649 
3650 /// If we can compute the length of the string pointed to by
3651 /// the specified pointer, return 'len+1'. If we can't, return 0.
3652 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
3653  if (!V->getType()->isPointerTy())
3654  return 0;
3655 
3657  uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
3658  // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
3659  // an empty string as a length.
3660  return Len == ~0ULL ? 1 : Len;
3661 }
3662 
3663 const Value *
3665  bool MustPreserveNullness) {
3666  assert(Call &&
3667  "getArgumentAliasingToReturnedPointer only works on nonnull calls");
3668  if (const Value *RV = Call->getReturnedArgOperand())
3669  return RV;
3670  // This can be used only as a aliasing property.
3672  Call, MustPreserveNullness))
3673  return Call->getArgOperand(0);
3674  return nullptr;
3675 }
3676 
3678  const CallBase *Call, bool MustPreserveNullness) {
3679  return Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
3680  Call->getIntrinsicID() == Intrinsic::strip_invariant_group ||
3681  Call->getIntrinsicID() == Intrinsic::aarch64_irg ||
3682  Call->getIntrinsicID() == Intrinsic::aarch64_tagp ||
3683  (!MustPreserveNullness &&
3684  Call->getIntrinsicID() == Intrinsic::ptrmask);
3685 }
3686 
3687 /// \p PN defines a loop-variant pointer to an object. Check if the
3688 /// previous iteration of the loop was referring to the same object as \p PN.
3690  const LoopInfo *LI) {
3691  // Find the loop-defined value.
3692  Loop *L = LI->getLoopFor(PN->getParent());
3693  if (PN->getNumIncomingValues() != 2)
3694  return true;
3695 
3696  // Find the value from previous iteration.
3697  auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
3698  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3699  PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
3700  if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
3701  return true;
3702 
3703  // If a new pointer is loaded in the loop, the pointer references a different
3704  // object in every iteration. E.g.:
3705  // for (i)
3706  // int *p = a[i];
3707  // ...
3708  if (auto *Load = dyn_cast<LoadInst>(PrevValue))
3709  if (!L->isLoopInvariant(Load->getPointerOperand()))
3710  return false;
3711  return true;
3712 }
3713 
3715  unsigned MaxLookup) {
3716  if (!V->getType()->isPointerTy())
3717  return V;
3718  for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
3719  if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
3720  V = GEP->getPointerOperand();
3721  } else if (Operator::getOpcode(V) == Instruction::BitCast ||
3722  Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
3723  V = cast<Operator>(V)->getOperand(0);
3724  } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
3725  if (GA->isInterposable())
3726  return V;
3727  V = GA->getAliasee();
3728  } else if (isa<AllocaInst>(V)) {
3729  // An alloca can't be further simplified.
3730  return V;
3731  } else {
3732  if (auto *Call = dyn_cast<CallBase>(V)) {
3733  // CaptureTracking can know about special capturing properties of some
3734  // intrinsics like launder.invariant.group, that can't be expressed with
3735  // the attributes, but have properties like returning aliasing pointer.
3736  // Because some analysis may assume that nocaptured pointer is not
3737  // returned from some special intrinsic (because function would have to
3738  // be marked with returns attribute), it is crucial to use this function
3739  // because it should be in sync with CaptureTracking. Not using it may
3740  // cause weird miscompilations where 2 aliasing pointers are assumed to
3741  // noalias.
3742  if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
3743  V = RP;
3744  continue;
3745  }
3746  }
3747 
3748  // See if InstructionSimplify knows any relevant tricks.
3749  if (Instruction *I = dyn_cast<Instruction>(V))
3750  // TODO: Acquire a DominatorTree and AssumptionCache and use them.
3751  if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
3752  V = Simplified;
3753  continue;
3754  }
3755 
3756  return V;
3757  }
3758  assert(V->getType()->isPointerTy() && "Unexpected operand type!");
3759  }
3760  return V;
3761 }
3762 
3765  const DataLayout &DL, LoopInfo *LI,
3766  unsigned MaxLookup) {
3769  Worklist.push_back(V);
3770  do {
3771  const Value *P = Worklist.pop_back_val();
3772  P = GetUnderlyingObject(P, DL, MaxLookup);
3773 
3774  if (!Visited.insert(P).second)
3775  continue;
3776 
3777  if (auto *SI = dyn_cast<SelectInst>(P)) {
3778  Worklist.push_back(SI->getTrueValue());
3779  Worklist.push_back(SI->getFalseValue());
3780  continue;
3781  }
3782 
3783  if (auto *PN = dyn_cast<PHINode>(P)) {
3784  // If this PHI changes the underlying object in every iteration of the
3785  // loop, don't look through it. Consider:
3786  // int **A;
3787  // for (i) {
3788  // Prev = Curr; // Prev = PHI (Prev_0, Curr)
3789  // Curr = A[i];
3790  // *Prev, *Curr;
3791  //
3792  // Prev is tracking Curr one iteration behind so they refer to different
3793  // underlying objects.
3794  if (!LI || !LI->isLoopHeader(PN->getParent()) ||
3796  for (Value *IncValue : PN->incoming_values())
3797  Worklist.push_back(IncValue);
3798  continue;
3799  }
3800 
3801  Objects.push_back(P);
3802  } while (!Worklist.empty());
3803 }
3804 
3805 /// This is the function that does the work of looking through basic
3806 /// ptrtoint+arithmetic+inttoptr sequences.
3807 static const Value *getUnderlyingObjectFromInt(const Value *V) {
3808  do {
3809  if (const Operator *U = dyn_cast<Operator>(V)) {
3810  // If we find a ptrtoint, we can transfer control back to the
3811  // regular getUnderlyingObjectFromInt.
3812  if (U->getOpcode() == Instruction::PtrToInt)
3813  return U->getOperand(0);
3814  // If we find an add of a constant, a multiplied value, or a phi, it's
3815  // likely that the other operand will lead us to the base
3816  // object. We don't have to worry about the case where the
3817  // object address is somehow being computed by the multiply,
3818  // because our callers only care when the result is an
3819  // identifiable object.
3820  if (U->getOpcode() != Instruction::Add ||
3821  (!isa<ConstantInt>(U->getOperand(1)) &&
3822  Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
3823  !isa<PHINode>(U->getOperand(1))))
3824  return V;
3825  V = U->getOperand(0);
3826  } else {
3827  return V;
3828  }
3829  assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
3830  } while (true);
3831 }
3832 
3833 /// This is a wrapper around GetUnderlyingObjects and adds support for basic
3834 /// ptrtoint+arithmetic+inttoptr sequences.
3835 /// It returns false if unidentified object is found in GetUnderlyingObjects.
3837  SmallVectorImpl<Value *> &Objects,
3838  const DataLayout &DL) {
3840  SmallVector<const Value *, 4> Working(1, V);
3841  do {
3842  V = Working.pop_back_val();
3843 
3845  GetUnderlyingObjects(V, Objs, DL);
3846 
3847  for (const Value *V : Objs) {
3848  if (!Visited.insert(V).second)
3849  continue;
3850  if (Operator::getOpcode(V) == Instruction::IntToPtr) {
3851  const Value *O =
3852  getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
3853  if (O->getType()->isPointerTy()) {
3854  Working.push_back(O);
3855  continue;
3856  }
3857  }
3858  // If GetUnderlyingObjects fails to find an identifiable object,
3859  // getUnderlyingObjectsForCodeGen also fails for safety.
3860  if (!isIdentifiedObject(V)) {
3861  Objects.clear();
3862  return false;
3863  }
3864  Objects.push_back(const_cast<Value *>(V));
3865  }
3866  } while (!Working.empty());
3867  return true;
3868 }
3869 
3870 /// Return true if the only users of this pointer are lifetime markers.
3872  for (const User *U : V->users()) {
3873  const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
3874  if (!II) return false;
3875 
3876  if (!II->isLifetimeStartOrEnd())
3877  return false;
3878  }
3879  return true;
3880 }
3881 
3883  if (!LI.isUnordered())
3884  return true;
3885  const Function &F = *LI.getFunction();
3886  // Speculative load may create a race that did not exist in the source.
3887  return F.hasFnAttribute(Attribute::SanitizeThread) ||
3888  // Speculative load may load data from dirty regions.
3889  F.hasFnAttribute(Attribute::SanitizeAddress) ||
3890  F.hasFnAttribute(Attribute::SanitizeHWAddress);
3891 }
3892 
3893 
3895  const Instruction *CtxI,
3896  const DominatorTree *DT) {
3897  const Operator *Inst = dyn_cast<Operator>(V);
3898  if (!Inst)
3899  return false;
3900 
3901  for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i)
3902  if (Constant *C = dyn_cast<Constant>(Inst->getOperand(i)))
3903  if (C->canTrap())
3904  return false;
3905 
3906  switch (Inst->getOpcode()) {
3907  default:
3908  return true;
3909  case Instruction::UDiv:
3910  case Instruction::URem: {
3911  // x / y is undefined if y == 0.
3912  const APInt *V;
3913  if (match(Inst->getOperand(1), m_APInt(V)))
3914  return *V != 0;
3915  return false;
3916  }
3917  case Instruction::SDiv:
3918  case Instruction::SRem: {
3919  // x / y is undefined if y == 0 or x == INT_MIN and y == -1
3920  const APInt *Numerator, *Denominator;
3921  if (!match(Inst->getOperand(1), m_APInt(Denominator)))
3922  return false;
3923  // We cannot hoist this division if the denominator is 0.
3924  if (*Denominator == 0)
3925  return false;
3926  // It's safe to hoist if the denominator is not 0 or -1.
3927  if (*Denominator != -1)
3928  return true;
3929  // At this point we know that the denominator is -1. It is safe to hoist as
3930  // long we know that the numerator is not INT_MIN.
3931  if (match(Inst->getOperand(0), m_APInt(Numerator)))
3932  return !Numerator->isMinSignedValue();
3933  // The numerator *might* be MinSignedValue.
3934  return false;
3935  }
3936  case Instruction::Load: {
3937  const LoadInst *LI = cast<LoadInst>(Inst);
3938  if (mustSuppressSpeculation(*LI))
3939  return false;
3940  const DataLayout &DL = LI->getModule()->getDataLayout();
3942  LI->getPointerOperand(), LI->getType(), MaybeAlign(LI->getAlignment()),
3943  DL, CtxI, DT);
3944  }
3945  case Instruction::Call: {
3946  auto *CI = cast<const CallInst>(Inst);
3947  const Function *Callee = CI->getCalledFunction();
3948 
3949  // The called function could have undefined behavior or side-effects, even
3950  // if marked readnone nounwind.
3951  return Callee && Callee->isSpeculatable();
3952  }
3953  case Instruction::VAArg:
3954  case Instruction::Alloca:
3955  case Instruction::Invoke:
3956  case Instruction::CallBr:
3957  case Instruction::PHI:
3958  case Instruction::Store:
3959  case Instruction::Ret:
3960  case Instruction::Br:
3961  case Instruction::IndirectBr:
3962  case Instruction::Switch:
3963  case Instruction::Unreachable:
3964  case Instruction::Fence:
3965  case Instruction::AtomicRMW:
3966  case Instruction::AtomicCmpXchg:
3967  case Instruction::LandingPad:
3968  case Instruction::Resume:
3969  case Instruction::CatchSwitch:
3970  case Instruction::CatchPad:
3971  case Instruction::CatchRet:
3972  case Instruction::CleanupPad:
3973  case Instruction::CleanupRet:
3974  return false; // Misc instructions which have effects
3975  }
3976 }
3977 
3980 }
3981 
3982 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
3984  switch (OR) {
3993  }
3994  llvm_unreachable("Unknown OverflowResult");
3995 }
3996 
3997 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
3999  const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4000  AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4001  OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4002  KnownBits Known = computeKnownBits(
4003  V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4004  ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4005  ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4008  return CR1.intersectWith(CR2, RangeType);
4009 }
4010 
4012  const Value *LHS, const Value *RHS, const DataLayout &DL,
4013  AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4014  bool UseInstrInfo) {
4015  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4016  nullptr, UseInstrInfo);
4017  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4018  nullptr, UseInstrInfo);
4019  ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4020  ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4021  return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4022 }
4023 
4026  const DataLayout &DL, AssumptionCache *AC,
4027  const Instruction *CxtI,
4028  const DominatorTree *DT, bool UseInstrInfo) {
4029  // Multiplying n * m significant bits yields a result of n + m significant
4030  // bits. If the total number of significant bits does not exceed the
4031  // result bit width (minus 1), there is no overflow.
4032  // This means if we have enough leading sign bits in the operands
4033  // we can guarantee that the result does not overflow.
4034  // Ref: "Hacker's Delight" by Henry Warren
4035  unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4036 
4037  // Note that underestimating the number of sign bits gives a more
4038  // conservative answer.
4039  unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4040  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4041 
4042  // First handle the easy case: if we have enough sign bits there's
4043  // definitely no overflow.
4044  if (SignBits > BitWidth + 1)
4046 
4047  // There are two ambiguous cases where there can be no overflow:
4048  // SignBits == BitWidth + 1 and
4049  // SignBits == BitWidth
4050  // The second case is difficult to check, therefore we only handle the
4051  // first case.
4052  if (SignBits == BitWidth + 1) {
4053  // It overflows only when both arguments are negative and the true
4054  // product is exactly the minimum negative number.
4055  // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4056  // For simplicity we just check if at least one side is not negative.
4057  KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4058  nullptr, UseInstrInfo);
4059  KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4060  nullptr, UseInstrInfo);
4061  if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4063  }
4065 }
4066 
4068  const Value *LHS, const Value *RHS, const DataLayout &DL,
4069  AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4070  bool UseInstrInfo) {
4072  LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4073  nullptr, UseInstrInfo);
4075  RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4076  nullptr, UseInstrInfo);
4077  return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4078 }
4079 
4081  const Value *RHS,
4082  const AddOperator *Add,
4083  const DataLayout &DL,
4084  AssumptionCache *AC,
4085  const Instruction *CxtI,
4086  const DominatorTree *DT) {
4087  if (Add && Add->hasNoSignedWrap()) {
4089  }
4090 
4091  // If LHS and RHS each have at least two sign bits, the addition will look
4092  // like
4093  //
4094  // XX..... +
4095  // YY.....
4096  //
4097  // If the carry into the most significant position is 0, X and Y can't both
4098  // be 1 and therefore the carry out of the addition is also 0.
4099  //
4100  // If the carry into the most significant position is 1, X and Y can't both
4101  // be 0 and therefore the carry out of the addition is also 1.
4102  //
4103  // Since the carry into the most significant position is always equal to
4104  // the carry out of the addition, there is no signed overflow.
4105  if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4106  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4108 
4110  LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4112  RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4113  OverflowResult OR =
4114  mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4115  if (OR != OverflowResult::MayOverflow)
4116  return OR;
4117 
4118  // The remaining code needs Add to be available. Early returns if not so.
4119  if (!Add)
4121 
4122  // If the sign of Add is the same as at least one of the operands, this add
4123  // CANNOT overflow. If this can be determined from the known bits of the
4124  // operands the above signedAddMayOverflow() check will have already done so.
4125  // The only other way to improve on the known bits is from an assumption, so
4126  // call computeKnownBitsFromAssume() directly.
4127  bool LHSOrRHSKnownNonNegative =
4128  (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4129  bool LHSOrRHSKnownNegative =
4130  (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4131  if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4132  KnownBits AddKnown(LHSRange.getBitWidth());
4134  Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4135  if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4136  (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4138  }
4139 
4141 }
4142 
4144  const Value *RHS,
4145  const DataLayout &DL,
4146  AssumptionCache *AC,
4147  const Instruction *CxtI,
4148  const DominatorTree *DT) {
4150  LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4152  RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
4153  return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
4154 }
4155 
4157  const Value *RHS,
4158  const DataLayout &DL,
4159  AssumptionCache *AC,
4160  const Instruction *CxtI,
4161  const DominatorTree *DT) {
4162  // If LHS and RHS each have at least two sign bits, the subtraction
4163  // cannot overflow.
4164  if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4165  ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4167 
4169  LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4171  RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4172  return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
4173 }
4174 
4176  const DominatorTree &DT) {
4177  SmallVector<const BranchInst *, 2> GuardingBranches;
4179 
4180  for (const User *U : WO->users()) {
4181  if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
4182  assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
4183 
4184  if (EVI->getIndices()[0] == 0)
4185  Results.push_back(EVI);
4186  else {
4187  assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
4188 
4189  for (const auto *U : EVI->users())
4190  if (const auto *B = dyn_cast<BranchInst>(U)) {
4191  assert(B->isConditional() && "How else is it using an i1?");
4192  GuardingBranches.push_back(B);
4193  }
4194  }
4195  } else {
4196  // We are using the aggregate directly in a way we don't want to analyze
4197  // here (storing it to a global, say).
4198  return false;
4199  }
4200  }
4201 
4202  auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
4203  BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
4204  if (!NoWrapEdge.isSingleEdge())
4205  return false;
4206 
4207  // Check if all users of the add are provably no-wrap.
4208  for (const auto *Result : Results) {
4209  // If the extractvalue itself is not executed on overflow, the we don't
4210  // need to check each use separately, since domination is transitive.
4211  if (DT.dominates(NoWrapEdge, Result->getParent()))
4212  continue;
4213 
4214  for (auto &RU : Result->uses())
4215  if (!DT.dominates(NoWrapEdge, RU))
4216  return false;
4217  }
4218 
4219  return true;
4220  };
4221 
4222  return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
4223 }
4224 
4225 
4227  const DataLayout &DL,
4228  AssumptionCache *AC,
4229  const Instruction *CxtI,
4230  const DominatorTree *DT) {
4232  Add, DL, AC, CxtI, DT);
4233 }
4234 
4236  const Value *RHS,
4237  const DataLayout &DL,
4238  AssumptionCache *AC,
4239  const Instruction *CxtI,
4240  const DominatorTree *DT) {
4241  return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
4242 }
4243 
4245  // Note: An atomic operation isn't guaranteed to return in a reasonable amount
4246  // of time because it's possible for another thread to interfere with it for an
4247  // arbitrary length of time, but programs aren't allowed to rely on that.
4248 
4249  // If there is no successor, then execution can't transfer to it.
4250  if (const auto *CRI = dyn_cast<CleanupReturnInst>(I))
4251  return !CRI->unwindsToCaller();
4252  if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I))
4253  return !CatchSwitch->unwindsToCaller();
4254  if (isa<ResumeInst>(I))
4255  return false;
4256  if (isa<ReturnInst>(I))
4257  return false;
4258  if (isa<UnreachableInst>(I))
4259  return false;
4260 
4261  // Calls can throw, or contain an infinite loop, or kill the process.
4262  if (auto CS = ImmutableCallSite(I)) {
4263  // Call sites that throw have implicit non-local control flow.
4264  if (!CS.doesNotThrow())
4265  return false;
4266 
4267  // A function which doens't throw and has "willreturn" attribute will
4268  // always return.
4269  if (CS.hasFnAttr(Attribute::WillReturn))
4270  return true;
4271 
4272  // Non-throwing call sites can loop infinitely, call exit/pthread_exit
4273  // etc. and thus not return. However, LLVM already assumes that
4274  //
4275  // - Thread exiting actions are modeled as writes to memory invisible to
4276  // the program.
4277  //
4278  // - Loops that don't have side effects (side effects are volatile/atomic
4279  // stores and IO) always terminate (see http://llvm.org/PR965).
4280  // Furthermore IO itself is also modeled as writes to memory invisible to
4281  // the program.
4282  //
4283  // We rely on those assumptions here, and use the memory effects of the call
4284  // target as a proxy for checking that it always returns.
4285 
4286  // FIXME: This isn't aggressive enough; a call which only writes to a global
4287  // is guaranteed to return.
4288  return CS.onlyReadsMemory() || CS.onlyAccessesArgMemory();
4289  }
4290 
4291  // Other instructions return normally.
4292  return true;
4293 }
4294 
4296  // TODO: This is slightly conservative for invoke instruction since exiting
4297  // via an exception *is* normal control for them.
4298  for (auto I = BB->begin(), E = BB->end(); I != E; ++I)
4300  return false;
4301  return true;
4302 }
4303 
4305  const Loop *L) {
4306  // The loop header is guaranteed to be executed for every iteration.
4307  //
4308  // FIXME: Relax this constraint to cover all basic blocks that are
4309  // guaranteed to be executed at every iteration.
4310  if (I->getParent() != L->getHeader()) return false;
4311 
4312  for (const Instruction &LI : *L->getHeader()) {
4313  if (&LI == I) return true;
4314  if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
4315  }
4316  llvm_unreachable("Instruction not contained in its own parent basic block.");
4317 }
4318 
4320  // TODO: This should include all instructions apart from phis, selects and
4321  // call-like instructions.
4322  switch (I->getOpcode()) {
4323  case Instruction::Add:
4324  case Instruction::Sub:
4325  case Instruction::Xor:
4326  case Instruction::Trunc:
4327  case Instruction::BitCast:
4328  case Instruction::AddrSpaceCast:
4329  case Instruction::Mul:
4330  case Instruction::Shl:
4331  case Instruction::GetElementPtr:
4332  // These operations all propagate poison unconditionally. Note that poison
4333  // is not any particular value, so xor or subtraction of poison with
4334  // itself still yields poison, not zero.
4335  return true;
4336 
4337  case Instruction::AShr:
4338  case Instruction::SExt:
4339  // For these operations, one bit of the input is replicated across
4340  // multiple output bits. A replicated poison bit is still poison.
4341  return true;
4342 
4343  case Instruction::ICmp:
4344  // Comparing poison with any value yields poison. This is why, for
4345  // instance, x s< (x +nsw 1) can be folded to true.
4346  return true;
4347 
4348  default:
4349  return false;
4350  }
4351 }
4352 
4354  switch (I->getOpcode()) {
4355  case Instruction::Store:
4356  return cast<StoreInst>(I)->getPointerOperand();
4357 
4358  case Instruction::Load:
4359  return cast<LoadInst>(I)->getPointerOperand();
4360 
4361  case Instruction::AtomicCmpXchg:
4362  return cast<AtomicCmpXchgInst>(I)->getPointerOperand();
4363 
4364  case Instruction::AtomicRMW:
4365  return cast<AtomicRMWInst>(I)->getPointerOperand();
4366 
4367  case Instruction::UDiv:
4368  case Instruction::SDiv:
4369  case Instruction::URem:
4370  case Instruction::SRem:
4371  return I->getOperand(1);
4372 
4373  default:
4374  // Note: It's really tempting to think that a conditional branch or
4375  // switch should be listed here, but that's incorrect. It's not
4376  // branching off of poison which is UB, it is executing a side effecting
4377  // instruction which follows the branch.
4378  return nullptr;
4379  }
4380 }
4381 
4383  const SmallSet<const Value *, 16>& KnownPoison) {
4384  auto *NotPoison = getGuaranteedNonFullPoisonOp(I);
4385  return (NotPoison && KnownPoison.count(NotPoison));
4386 }
4387 
4388 
4390  // We currently only look for uses of poison values within the same basic
4391  // block, as that makes it easier to guarantee that the uses will be
4392  // executed given that PoisonI is executed.
4393  //
4394  // FIXME: Expand this to consider uses beyond the same basic block. To do
4395  // this, look out for the distinction between post-dominance and strong
4396  // post-dominance.
4397  const BasicBlock *BB = PoisonI->getParent();
4398 
4399  // Set of instructions that we have proved will yield poison if PoisonI
4400  // does.
4401  SmallSet<const Value *, 16> YieldsPoison;
4403  YieldsPoison.insert(PoisonI);
4404  Visited.insert(PoisonI->getParent());
4405 
4406  BasicBlock::const_iterator Begin = PoisonI->getIterator(), End = BB->end();
4407 
4408  unsigned Iter = 0;
4409  while (Iter++ < MaxDepth) {
4410  for (auto &I : make_range(Begin, End)) {
4411  if (&I != PoisonI) {
4412  if (mustTriggerUB(&I, YieldsPoison))
4413  return true;
4415  return false;
4416  }
4417 
4418  // Mark poison that propagates from I through uses of I.
4419  if (YieldsPoison.count(&I)) {
4420  for (const User *User : I.users()) {
4421  const Instruction *UserI = cast<Instruction>(User);
4422  if (propagatesFullPoison(UserI))
4423  YieldsPoison.insert(User);
4424  }
4425  }
4426  }
4427 
4428  if (auto *NextBB = BB->getSingleSuccessor()) {
4429  if (Visited.insert(NextBB).second) {
4430  BB = NextBB;
4431  Begin = BB->getFirstNonPHI()->getIterator();
4432  End = BB->end();
4433  continue;
4434  }
4435  }
4436 
4437  break;
4438  }
4439  return false;
4440 }
4441 
4442 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
4443  if (FMF.noNaNs())
4444  return true;
4445 
4446  if (auto *C = dyn_cast<ConstantFP>(V))
4447  return !C->isNaN();
4448 
4449  if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4450  if (!C->getElementType()->isFloatingPointTy())
4451  return false;
4452  for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4453  if (C->getElementAsAPFloat(I).isNaN())
4454  return false;
4455  }
4456  return true;
4457  }
4458 
4459  return false;
4460 }
4461 
4462 static bool isKnownNonZero(const Value *V) {
4463  if (auto *C = dyn_cast<ConstantFP>(V))
4464  return !C->isZero();
4465 
4466  if (auto *C = dyn_cast<ConstantDataVector>(V)) {
4467  if (!C->getElementType()->isFloatingPointTy())
4468  return false;
4469  for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
4470  if (C->getElementAsAPFloat(I).isZero())
4471  return false;
4472  }
4473  return true;
4474  }
4475 
4476  return false;
4477 }
4478 
4479 /// Match clamp pattern for float types without care about NaNs or signed zeros.
4480 /// Given non-min/max outer cmp/select from the clamp pattern this
4481 /// function recognizes if it can be substitued by a "canonical" min/max
4482 /// pattern.
4484  Value *CmpLHS, Value *CmpRHS,
4485  Value *TrueVal, Value *FalseVal,
4486  Value *&LHS, Value *&RHS) {
4487  // Try to match
4488  // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
4489  // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
4490  // and return description of the outer Max/Min.
4491 
4492  // First, check if select has inverse order:
4493  if (CmpRHS == FalseVal) {
4494  std::swap(TrueVal, FalseVal);
4495  Pred = CmpInst::getInversePredicate(Pred);
4496  }
4497 
4498  // Assume success now. If there's no match, callers should not use these anyway.
4499  LHS = TrueVal;
4500  RHS = FalseVal;
4501 
4502  const APFloat *FC1;
4503  if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
4504  return {SPF_UNKNOWN, SPNB_NA, false};
4505 
4506  const APFloat *FC2;
4507  switch (Pred) {
4508  case CmpInst::FCMP_OLT:
4509  case CmpInst::FCMP_OLE:
4510  case CmpInst::FCMP_ULT:
4511  case CmpInst::FCMP_ULE:
4512  if (match(FalseVal,
4513  m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
4514  m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4515  FC1->compare(*FC2) == APFloat::cmpResult::cmpLessThan)
4516  return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
4517  break;
4518  case CmpInst::FCMP_OGT:
4519  case CmpInst::FCMP_OGE:
4520  case CmpInst::FCMP_UGT:
4521  case CmpInst::FCMP_UGE:
4522  if (match(FalseVal,
4523  m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
4524  m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
4525  FC1->compare(*FC2) == APFloat::cmpResult::cmpGreaterThan)
4526  return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
4527  break;
4528  default:
4529  break;
4530  }
4531 
4532  return {SPF_UNKNOWN, SPNB_NA, false};
4533 }
4534 
4535 /// Recognize variations of:
4536 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
4538  Value *CmpLHS, Value *CmpRHS,
4539  Value *TrueVal, Value *FalseVal) {
4540  // Swap the select operands and predicate to match the patterns below.
4541  if (CmpRHS != TrueVal) {
4542  Pred = ICmpInst::getSwappedPredicate(Pred);
4543  std::swap(TrueVal, FalseVal);
4544  }
4545  const APInt *C1;
4546  if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
4547  const APInt *C2;
4548  // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
4549  if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4550  C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
4551  return {SPF_SMAX, SPNB_NA, false};
4552 
4553  // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
4554  if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4555  C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
4556  return {SPF_SMIN, SPNB_NA, false};
4557 
4558  // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
4559  if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
4560  C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
4561  return {SPF_UMAX, SPNB_NA, false};
4562 
4563  // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
4564  if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
4565  C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
4566  return {SPF_UMIN, SPNB_NA, false};
4567  }
4568  return {SPF_UNKNOWN, SPNB_NA, false};
4569 }
4570 
4571 /// Recognize variations of:
4572 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
4574  Value *CmpLHS, Value *CmpRHS,
4575  Value *TVal, Value *FVal,
4576  unsigned Depth) {
4577  // TODO: Allow FP min/max with nnan/nsz.
4578  assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
4579 
4580  Value *A = nullptr, *B = nullptr;
4581  SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
4583  return {SPF_UNKNOWN, SPNB_NA, false};
4584 
4585  Value *C = nullptr, *D = nullptr;
4586  SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
4587  if (L.Flavor != R.Flavor)
4588  return {SPF_UNKNOWN, SPNB_NA, false};
4589 
4590  // We have something like: x Pred y ? min(a, b) : min(c, d).
4591  // Try to match the compare to the min/max operations of the select operands.
4592  // First, make sure we have the right compare predicate.
4593  switch (L.Flavor) {
4594  case SPF_SMIN:
4595  if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
4596  Pred = ICmpInst::getSwappedPredicate(Pred);
4597  std::swap(CmpLHS, CmpRHS);
4598  }
4599  if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
4600  break;
4601  return {SPF_UNKNOWN, SPNB_NA, false};
4602  case SPF_SMAX:
4603  if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
4604  Pred = ICmpInst::getSwappedPredicate(Pred);
4605  std::swap(CmpLHS, CmpRHS);
4606  }
4607  if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
4608  break;
4609  return {SPF_UNKNOWN, SPNB_NA, false};
4610  case SPF_UMIN:
4611  if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
4612  Pred = ICmpInst::getSwappedPredicate(Pred);
4613  std::swap(CmpLHS, CmpRHS);
4614  }
4615  if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
4616  break;
4617  return {SPF_UNKNOWN, SPNB_NA, false};
4618  case SPF_UMAX:
4619  if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
4620  Pred = ICmpInst::getSwappedPredicate(Pred);
4621  std::swap(CmpLHS, CmpRHS);
4622  }
4623  if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
4624  break;
4625  return {SPF_UNKNOWN, SPNB_NA, false};
4626  default:
4627  return {SPF_UNKNOWN, SPNB_NA, false};
4628  }
4629 
4630  // If there is a common operand in the already matched min/max and the other
4631  // min/max operands match the compare operands (either directly or inverted),
4632  // then this is min/max of the same flavor.
4633 
4634  // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4635  // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
4636  if (D == B) {
4637  if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4638  match(A, m_Not(m_Specific(CmpRHS)))))
4639  return {L.Flavor, SPNB_NA, false};
4640  }
4641  // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4642  // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
4643  if (C == B) {
4644  if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4645  match(A, m_Not(m_Specific(CmpRHS)))))
4646  return {L.Flavor, SPNB_NA, false};
4647  }
4648  // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4649  // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
4650  if (D == A) {
4651  if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
4652  match(B, m_Not(m_Specific(CmpRHS)))))
4653  return {L.Flavor, SPNB_NA, false};
4654  }
4655  // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4656  // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
4657  if (C == A) {
4658  if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
4659  match(B, m_Not(m_Specific(CmpRHS)))))
4660  return {L.Flavor, SPNB_NA, false};
4661  }
4662 
4663  return {SPF_UNKNOWN, SPNB_NA, false};
4664 }
4665 
4666 /// Match non-obvious integer minimum and maximum sequences.
4668  Value *CmpLHS, Value *CmpRHS,
4669  Value *TrueVal, Value *FalseVal,
4670  Value *&LHS, Value *&RHS,
4671  unsigned Depth) {
4672  // Assume success. If there's no match, callers should not use these anyway.
4673  LHS = TrueVal;
4674  RHS = FalseVal;
4675 
4676  SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
4678  return SPR;
4679 
4680  SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
4682  return SPR;
4683 
4684  if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
4685  return {SPF_UNKNOWN, SPNB_NA, false};
4686 
4687  // Z = X -nsw Y
4688  // (X >s Y) ? 0 : Z ==> (Z >s 0) ? 0 : Z ==> SMIN(Z, 0)
4689  // (X <s Y) ? 0 : Z ==> (Z <s 0) ? 0 : Z ==> SMAX(Z, 0)
4690  if (match(TrueVal, m_Zero()) &&
4691  match(FalseVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4692  return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4693 
4694  // Z = X -nsw Y
4695  // (X >s Y) ? Z : 0 ==> (Z >s 0) ? Z : 0 ==> SMAX(Z, 0)
4696  // (X <s Y) ? Z : 0 ==> (Z <s 0) ? Z : 0 ==> SMIN(Z, 0)
4697  if (match(FalseVal, m_Zero()) &&
4698  match(TrueVal, m_NSWSub(m_Specific(CmpLHS), m_Specific(CmpRHS))))
4699  return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4700 
4701  const APInt *C1;
4702  if (!match(CmpRHS, m_APInt(C1)))
4703  return {SPF_UNKNOWN, SPNB_NA, false};
4704 
4705  // An unsigned min/max can be written with a signed compare.
4706  const APInt *C2;
4707  if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
4708  (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
4709  // Is the sign bit set?
4710  // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
4711  // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
4712  if (Pred == CmpInst::ICMP_SLT && C1->isNullValue() &&
4713  C2->isMaxSignedValue())
4714  return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4715 
4716  // Is the sign bit clear?
4717  // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
4718  // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
4719  if (Pred == CmpInst::ICMP_SGT && C1->isAllOnesValue() &&
4720  C2->isMinSignedValue())
4721  return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
4722  }
4723 
4724  // Look through 'not' ops to find disguised signed min/max.
4725  // (X >s C) ? ~X : ~C ==> (~X <s ~C) ? ~X : ~C ==> SMIN(~X, ~C)
4726  // (X <s C) ? ~X : ~C ==> (~X >s ~C) ? ~X : ~C ==> SMAX(~X, ~C)
4727  if (match(TrueVal, m_Not(m_Specific(CmpLHS))) &&
4728  match(FalseVal, m_APInt(C2)) && ~(*C1) == *C2)
4729  return {Pred == CmpInst::ICMP_SGT ? SPF_SMIN : SPF_SMAX, SPNB_NA, false};
4730 
4731  // (X >s C) ? ~C : ~X ==> (~X <s ~C) ? ~C : ~X ==> SMAX(~C, ~X)
4732  // (X <s C) ? ~C : ~X ==> (~X >s ~C) ? ~C : ~X ==> SMIN(~C, ~X)
4733  if (match(FalseVal, m_Not(m_Specific(CmpLHS))) &&
4734  match(TrueVal, m_APInt(C2)) && ~(*C1) == *C2)
4735  return {Pred == CmpInst::ICMP_SGT ? SPF_SMAX : SPF_SMIN, SPNB_NA, false};
4736 
4737  return {SPF_UNKNOWN, SPNB_NA, false};
4738 }
4739 
4740 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
4741  assert(X && Y && "Invalid operand");
4742 
4743  // X = sub (0, Y) || X = sub nsw (0, Y)
4744  if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
4745  (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
4746  return true;
4747 
4748  // Y = sub (0, X) || Y = sub nsw (0, X)
4749  if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
4750  (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
4751  return true;
4752 
4753  // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
4754  Value *A, *B;
4755  return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
4756  match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
4757  (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
4758  match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
4759 }
4760 
4762  FastMathFlags FMF,
4763  Value *CmpLHS, Value *CmpRHS,
4764  Value *TrueVal, Value *FalseVal,
4765  Value *&LHS, Value *&RHS,
4766  unsigned Depth) {
4767  if (CmpInst::isFPPredicate(Pred)) {
4768  // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
4769  // 0.0 operand, set the compare's 0.0 operands to that same value for the
4770  // purpose of identifying min/max. Disregard vector constants with undefined
4771  // elements because those can not be back-propagated for analysis.
4772  Value *OutputZeroVal = nullptr;
4773  if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
4774  !cast<Constant>(TrueVal)->containsUndefElement())
4775  OutputZeroVal = TrueVal;
4776  else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
4777  !cast<Constant>(FalseVal)->containsUndefElement())
4778  OutputZeroVal = FalseVal;
4779 
4780  if (OutputZeroVal) {
4781  if (match(CmpLHS, m_AnyZeroFP()))
4782  CmpLHS = OutputZeroVal;
4783  if (match(CmpRHS, m_AnyZeroFP()))
4784  CmpRHS = OutputZeroVal;
4785  }
4786  }
4787 
4788  LHS = CmpLHS;
4789  RHS = CmpRHS;
4790 
4791  // Signed zero may return inconsistent results between implementations.
4792  // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
4793  // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
4794  // Therefore, we behave conservatively and only proceed if at least one of the
4795  // operands is known to not be zero or if we don't care about signed zero.
4796  switch (Pred) {
4797  default: break;
4798  // FIXME: Include OGT/OLT/UGT/ULT.
4801  if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4802  !isKnownNonZero(CmpRHS))
4803  return {SPF_UNKNOWN, SPNB_NA, false};
4804  }
4805 
4806  SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
4807  bool Ordered = false;
4808 
4809  // When given one NaN and one non-NaN input:
4810  // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
4811  // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
4812  // ordered comparison fails), which could be NaN or non-NaN.
4813  // so here we discover exactly what NaN behavior is required/accepted.
4814  if (CmpInst::isFPPredicate(Pred)) {
4815  bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
4816  bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
4817 
4818  if (LHSSafe && RHSSafe) {
4819  // Both operands are known non-NaN.
4820  NaNBehavior = SPNB_RETURNS_ANY;
4821  } else if (CmpInst::isOrdered(Pred)) {
4822  // An ordered comparison will return false when given a NaN, so it
4823  // returns the RHS.
4824  Ordered = true;
4825  if (LHSSafe)
4826  // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
4827  NaNBehavior = SPNB_RETURNS_NAN;
4828  else if (RHSSafe)
4829  NaNBehavior = SPNB_RETURNS_OTHER;
4830  else
4831  // Completely unsafe.
4832  return {SPF_UNKNOWN, SPNB_NA, false};
4833  } else {
4834  Ordered = false;
4835  // An unordered comparison will return true when given a NaN, so it
4836  // returns the LHS.
4837  if (LHSSafe)
4838  // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
4839  NaNBehavior = SPNB_RETURNS_OTHER;
4840  else if (RHSSafe)
4841  NaNBehavior = SPNB_RETURNS_NAN;
4842  else
4843  // Completely unsafe.
4844  return {SPF_UNKNOWN, SPNB_NA, false};
4845  }
4846  }
4847 
4848  if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
4849  std::swap(CmpLHS, CmpRHS);
4850  Pred = CmpInst::getSwappedPredicate(Pred);
4851  if (NaNBehavior == SPNB_RETURNS_NAN)
4852  NaNBehavior = SPNB_RETURNS_OTHER;
4853  else if (NaNBehavior == SPNB_RETURNS_OTHER)
4854  NaNBehavior = SPNB_RETURNS_NAN;
4855  Ordered = !Ordered;
4856  }
4857 
4858  // ([if]cmp X, Y) ? X : Y
4859  if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
4860  switch (Pred) {
4861  default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
4862  case ICmpInst::ICMP_UGT:
4863  case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
4864  case ICmpInst::ICMP_SGT:
4865  case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
4866  case ICmpInst::ICMP_ULT:
4867  case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
4868  case ICmpInst::ICMP_SLT:
4869  case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
4870  case FCmpInst::FCMP_UGT:
4871  case FCmpInst::FCMP_UGE:
4872  case FCmpInst::FCMP_OGT:
4873  case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
4874  case FCmpInst::FCMP_ULT:
4875  case FCmpInst::FCMP_ULE:
4876  case FCmpInst::FCMP_OLT:
4877  case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
4878  }
4879  }
4880 
4881  if (isKnownNegation(TrueVal, FalseVal)) {
4882  // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
4883  // match against either LHS or sext(LHS).
4884  auto MaybeSExtCmpLHS =
4885  m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
4886  auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
4887  auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
4888  if (match(TrueVal, MaybeSExtCmpLHS)) {
4889  // Set the return values. If the compare uses the negated value (-X >s 0),
4890  // swap the return values because the negated value is always 'RHS'.
4891  LHS = TrueVal;
4892  RHS = FalseVal;
4893  if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
4894  std::swap(LHS, RHS);
4895 
4896  // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
4897  // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
4898  if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4899  return {SPF_ABS, SPNB_NA, false};
4900 
4901  // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
4902  if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
4903  return {SPF_ABS, SPNB_NA, false};
4904 
4905  // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
4906  // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
4907  if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4908  return {SPF_NABS, SPNB_NA, false};
4909  }
4910  else if (match(FalseVal, MaybeSExtCmpLHS)) {
4911  // Set the return values. If the compare uses the negated value (-X >s 0),
4912  // swap the return values because the negated value is always 'RHS'.
4913  LHS = FalseVal;
4914  RHS = TrueVal;
4915  if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
4916  std::swap(LHS, RHS);
4917 
4918  // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
4919  // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
4920  if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
4921  return {SPF_NABS, SPNB_NA, false};
4922 
4923  // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
4924  // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
4925  if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
4926  return {SPF_ABS, SPNB_NA, false};
4927  }
4928  }
4929 
4930  if (CmpInst::isIntPredicate(Pred))
4931  return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
4932 
4933  // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
4934  // may return either -0.0 or 0.0, so fcmp/select pair has stricter
4935  // semantics than minNum. Be conservative in such case.
4936  if (NaNBehavior != SPNB_RETURNS_ANY ||
4937  (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
4938  !isKnownNonZero(CmpRHS)))
4939  return {SPF_UNKNOWN, SPNB_NA, false};
4940 
4941  return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
4942 }
4943 
4944 /// Helps to match a select pattern in case of a type mismatch.
4945 ///
4946 /// The function processes the case when type of true and false values of a
4947 /// select instruction differs from type of the cmp instruction operands because
4948 /// of a cast instruction. The function checks if it is legal to move the cast
4949 /// operation after "select". If yes, it returns the new second value of
4950 /// "select" (with the assumption that cast is moved):
4951 /// 1. As operand of cast instruction when both values of "select" are same cast
4952 /// instructions.
4953 /// 2. As restored constant (by applying reverse cast operation) when the first
4954 /// value of the "select" is a cast operation and the second value is a
4955 /// constant.
4956 /// NOTE: We return only the new second value because the first value could be
4957 /// accessed as operand of cast instruction.
4958 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
4959  Instruction::CastOps *CastOp) {
4960  auto *Cast1 = dyn_cast<CastInst>(V1);
4961  if (!Cast1)
4962  return nullptr;
4963 
4964  *CastOp = Cast1->getOpcode();
4965  Type *SrcTy = Cast1->getSrcTy();
4966  if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
4967  // If V1 and V2 are both the same cast from the same type, look through V1.
4968  if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
4969  return Cast2->getOperand(0);
4970  return nullptr;
4971  }
4972 
4973  auto *C = dyn_cast<Constant>(V2);
4974  if (!C)
4975  return nullptr;
4976 
4977  Constant *CastedTo = nullptr;
4978  switch (*CastOp) {
4979  case Instruction::ZExt:
4980  if (CmpI->isUnsigned())
4981  CastedTo = ConstantExpr::getTrunc(C, SrcTy);
4982  break;
4983  case Instruction::SExt:
4984  if (CmpI->isSigned())
4985  CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
4986  break;
4987  case Instruction::Trunc:
4988  Constant *CmpConst;
4989  if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
4990  CmpConst->getType() == SrcTy) {
4991  // Here we have the following case:
4992  //
4993  // %cond = cmp iN %x, CmpConst
4994  // %tr = trunc iN %x to iK
4995  // %narrowsel = select i1 %cond, iK %t, iK C
4996  //
4997  // We can always move trunc after select operation:
4998  //
4999  // %cond = cmp iN %x, CmpConst
5000  // %widesel = select i1 %cond, iN %x, iN CmpConst
5001  // %tr = trunc iN %widesel to iK
5002  //
5003  // Note that C could be extended in any way because we don't care about
5004  // upper bits after truncation. It can't be abs pattern, because it would
5005  // look like:
5006  //
5007  // select i1 %cond, x, -x.
5008  //
5009  // So only min/max pattern could be matched. Such match requires widened C
5010  // == CmpConst. That is why set widened C = CmpConst, condition trunc
5011  // CmpConst == C is checked below.
5012  CastedTo = CmpConst;
5013  } else {
5014  CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
5015  }
5016  break;
5017  case Instruction::FPTrunc:
5018  CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
5019  break;
5020  case Instruction::FPExt:
5021  CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
5022  break;
5023  case Instruction::FPToUI:
5024  CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
5025  break;
5026  case Instruction::FPToSI:
5027  CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
5028  break;
5029  case Instruction::UIToFP:
5030  CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
5031  break;
5032  case Instruction::SIToFP:
5033  CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
5034  break;
5035  default:
5036  break;
5037  }
5038 
5039  if (!CastedTo)
5040  return nullptr;
5041 
5042  // Make sure the cast doesn't lose any information.
5043  Constant *CastedBack =
5044  ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
5045  if (CastedBack != C)
5046  return nullptr;
5047 
5048  return CastedTo;
5049 }
5050 
5052  Instruction::CastOps *CastOp,
5053  unsigned Depth) {
5054  if (Depth >= MaxDepth)
5055  return {SPF_UNKNOWN, SPNB_NA, false};
5056 
5058  if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
5059 
5060  CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
5061  if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
5062 
5063  Value *TrueVal = SI->getTrueValue();
5064  Value *FalseVal = SI->getFalseValue();
5065 
5066  return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
5067  CastOp, Depth);
5068 }
5069 
5071  CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
5072  Instruction::CastOps *CastOp, unsigned Depth) {
5073  CmpInst::Predicate Pred = CmpI->getPredicate();
5074  Value *CmpLHS = CmpI->getOperand(0);
5075  Value *CmpRHS = CmpI->getOperand(1);
5076  FastMathFlags FMF;
5077  if (isa<FPMathOperator>(CmpI))
5078  FMF = CmpI->getFastMathFlags();
5079 
5080  // Bail out early.
5081  if (CmpI->isEquality())
5082  return {SPF_UNKNOWN, SPNB_NA, false};
5083 
5084  // Deal with type mismatches.
5085  if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
5086  if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
5087  // If this is a potential fmin/fmax with a cast to integer, then ignore
5088  // -0.0 because there is no corresponding integer value.
5089  if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5090  FMF.setNoSignedZeros();
5091  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5092  cast<CastInst>(TrueVal)->getOperand(0), C,
5093  LHS, RHS, Depth);
5094  }
5095  if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
5096  // If this is a potential fmin/fmax with a cast to integer, then ignore
5097  // -0.0 because there is no corresponding integer value.
5098  if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
5099  FMF.setNoSignedZeros();
5100  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
5101  C, cast<CastInst>(FalseVal)->getOperand(0),
5102  LHS, RHS, Depth);
5103  }
5104  }
5105  return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
5106  LHS, RHS, Depth);
5107 }
5108 
5110  if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
5111  if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
5112  if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
5113  if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
5114  if (SPF == SPF_FMINNUM)
5115  return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
5116  if (SPF == SPF_FMAXNUM)
5117  return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
5118  llvm_unreachable("unhandled!");
5119 }
5120 
5122  if (SPF == SPF_SMIN) return SPF_SMAX;
5123  if (SPF == SPF_UMIN) return SPF_UMAX;
5124  if (SPF == SPF_SMAX) return SPF_SMIN;
5125  if (SPF == SPF_UMAX) return SPF_UMIN;
5126  llvm_unreachable("unhandled!");
5127 }
5128 
5131 }
5132 
5133 /// Return true if "icmp Pred LHS RHS" is always true.
5134 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
5135  const Value *RHS, const DataLayout &DL,
5136  unsigned Depth) {
5137  assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
5138  if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
5139  return true;
5140 
5141  switch (Pred) {
5142  default:
5143  return false;
5144 
5145  case CmpInst::ICMP_SLE: {
5146  const APInt *C;
5147 
5148  // LHS s<= LHS +_{nsw} C if C >= 0
5149  if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
5150  return !C->isNegative();
5151  return false;
5152  }
5153 
5154  case CmpInst::ICMP_ULE: {
5155  const APInt *C;
5156 
5157  // LHS u<= LHS +_{nuw} C for any C
5158  if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
5159  return true;
5160 
5161  // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
5162  auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
5163  const Value *&X,
5164  const APInt *&CA, const APInt *&CB) {
5165  if (