LLVM 20.0.0git
ValueTracking.cpp
Go to the documentation of this file.
1//===- ValueTracking.cpp - Walk computations to compute properties --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains routines that help analyze properties that chains of
10// computations have.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/ScopeExit.h"
21#include "llvm/ADT/SmallSet.h"
23#include "llvm/ADT/StringRef.h"
32#include "llvm/Analysis/Loads.h"
38#include "llvm/IR/Argument.h"
39#include "llvm/IR/Attributes.h"
40#include "llvm/IR/BasicBlock.h"
41#include "llvm/IR/Constant.h"
43#include "llvm/IR/Constants.h"
46#include "llvm/IR/Dominators.h"
48#include "llvm/IR/Function.h"
50#include "llvm/IR/GlobalAlias.h"
51#include "llvm/IR/GlobalValue.h"
53#include "llvm/IR/InstrTypes.h"
54#include "llvm/IR/Instruction.h"
57#include "llvm/IR/Intrinsics.h"
58#include "llvm/IR/IntrinsicsAArch64.h"
59#include "llvm/IR/IntrinsicsAMDGPU.h"
60#include "llvm/IR/IntrinsicsRISCV.h"
61#include "llvm/IR/IntrinsicsX86.h"
62#include "llvm/IR/LLVMContext.h"
63#include "llvm/IR/Metadata.h"
64#include "llvm/IR/Module.h"
65#include "llvm/IR/Operator.h"
67#include "llvm/IR/Type.h"
68#include "llvm/IR/User.h"
69#include "llvm/IR/Value.h"
77#include <algorithm>
78#include <cassert>
79#include <cstdint>
80#include <optional>
81#include <utility>
82
83using namespace llvm;
84using namespace llvm::PatternMatch;
85
86// Controls the number of uses of the value searched for possible
87// dominating comparisons.
88static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
89 cl::Hidden, cl::init(20));
90
91
92/// Returns the bitwidth of the given scalar or pointer type. For vector types,
93/// returns the element type's bitwidth.
94static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
95 if (unsigned BitWidth = Ty->getScalarSizeInBits())
96 return BitWidth;
97
98 return DL.getPointerTypeSizeInBits(Ty);
99}
100
101// Given the provided Value and, potentially, a context instruction, return
102// the preferred context instruction (if any).
103static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
104 // If we've been provided with a context instruction, then use that (provided
105 // it has been inserted).
106 if (CxtI && CxtI->getParent())
107 return CxtI;
108
109 // If the value is really an already-inserted instruction, then use that.
110 CxtI = dyn_cast<Instruction>(V);
111 if (CxtI && CxtI->getParent())
112 return CxtI;
113
114 return nullptr;
115}
116
117static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
118 // If we've been provided with a context instruction, then use that (provided
119 // it has been inserted).
120 if (CxtI && CxtI->getParent())
121 return CxtI;
122
123 // If the value is really an already-inserted instruction, then use that.
124 CxtI = dyn_cast<Instruction>(V1);
125 if (CxtI && CxtI->getParent())
126 return CxtI;
127
128 CxtI = dyn_cast<Instruction>(V2);
129 if (CxtI && CxtI->getParent())
130 return CxtI;
131
132 return nullptr;
133}
134
136 const APInt &DemandedElts,
137 APInt &DemandedLHS, APInt &DemandedRHS) {
138 if (isa<ScalableVectorType>(Shuf->getType())) {
139 assert(DemandedElts == APInt(1,1));
140 DemandedLHS = DemandedRHS = DemandedElts;
141 return true;
142 }
143
144 int NumElts =
145 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
146 return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(),
147 DemandedElts, DemandedLHS, DemandedRHS);
148}
149
150static void computeKnownBits(const Value *V, const APInt &DemandedElts,
151 KnownBits &Known, unsigned Depth,
152 const SimplifyQuery &Q);
153
154void llvm::computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
155 const SimplifyQuery &Q) {
156 // Since the number of lanes in a scalable vector is unknown at compile time,
157 // we track one bit which is implicitly broadcast to all lanes. This means
158 // that all lanes in a scalable vector are considered demanded.
159 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
160 APInt DemandedElts =
161 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
162 ::computeKnownBits(V, DemandedElts, Known, Depth, Q);
163}
164
166 const DataLayout &DL, unsigned Depth,
167 AssumptionCache *AC, const Instruction *CxtI,
168 const DominatorTree *DT, bool UseInstrInfo) {
170 V, Known, Depth,
171 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
172}
173
175 unsigned Depth, AssumptionCache *AC,
176 const Instruction *CxtI,
177 const DominatorTree *DT, bool UseInstrInfo) {
178 return computeKnownBits(
179 V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
180}
181
182KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
183 const DataLayout &DL, unsigned Depth,
184 AssumptionCache *AC, const Instruction *CxtI,
185 const DominatorTree *DT, bool UseInstrInfo) {
186 return computeKnownBits(
187 V, DemandedElts, Depth,
188 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
189}
190
191static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS,
192 const SimplifyQuery &SQ) {
193 // Look for an inverted mask: (X & ~M) op (Y & M).
194 {
195 Value *M;
196 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
198 isGuaranteedNotToBeUndef(M, SQ.AC, SQ.CxtI, SQ.DT))
199 return true;
200 }
201
202 // X op (Y & ~X)
205 return true;
206
207 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
208 // for constant Y.
209 Value *Y;
210 if (match(RHS,
212 isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT) &&
213 isGuaranteedNotToBeUndef(Y, SQ.AC, SQ.CxtI, SQ.DT))
214 return true;
215
216 // Peek through extends to find a 'not' of the other side:
217 // (ext Y) op ext(~Y)
218 if (match(LHS, m_ZExtOrSExt(m_Value(Y))) &&
220 isGuaranteedNotToBeUndef(Y, SQ.AC, SQ.CxtI, SQ.DT))
221 return true;
222
223 // Look for: (A & B) op ~(A | B)
224 {
225 Value *A, *B;
226 if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
228 isGuaranteedNotToBeUndef(A, SQ.AC, SQ.CxtI, SQ.DT) &&
229 isGuaranteedNotToBeUndef(B, SQ.AC, SQ.CxtI, SQ.DT))
230 return true;
231 }
232
233 return false;
234}
235
237 const WithCache<const Value *> &RHSCache,
238 const SimplifyQuery &SQ) {
239 const Value *LHS = LHSCache.getValue();
240 const Value *RHS = RHSCache.getValue();
241
242 assert(LHS->getType() == RHS->getType() &&
243 "LHS and RHS should have the same type");
245 "LHS and RHS should be integers");
246
249 return true;
250
252 RHSCache.getKnownBits(SQ));
253}
254
256 return !I->user_empty() && all_of(I->users(), [](const User *U) {
257 return match(U, m_ICmp(m_Value(), m_Zero()));
258 });
259}
260
262 return !I->user_empty() && all_of(I->users(), [](const User *U) {
263 ICmpInst::Predicate P;
264 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
265 });
266}
267
268static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
269 const SimplifyQuery &Q);
270
272 bool OrZero, unsigned Depth,
273 AssumptionCache *AC, const Instruction *CxtI,
274 const DominatorTree *DT, bool UseInstrInfo) {
275 return ::isKnownToBeAPowerOfTwo(
276 V, OrZero, Depth,
277 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
278}
279
280static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
281 const SimplifyQuery &Q, unsigned Depth);
282
284 unsigned Depth) {
285 return computeKnownBits(V, Depth, SQ).isNonNegative();
286}
287
289 unsigned Depth) {
290 if (auto *CI = dyn_cast<ConstantInt>(V))
291 return CI->getValue().isStrictlyPositive();
292
293 // If `isKnownNonNegative` ever becomes more sophisticated, make sure to keep
294 // this updated.
295 KnownBits Known = computeKnownBits(V, Depth, SQ);
296 return Known.isNonNegative() &&
297 (Known.isNonZero() || isKnownNonZero(V, SQ, Depth));
298}
299
301 unsigned Depth) {
302 return computeKnownBits(V, Depth, SQ).isNegative();
303}
304
305static bool isKnownNonEqual(const Value *V1, const Value *V2,
306 const APInt &DemandedElts, unsigned Depth,
307 const SimplifyQuery &Q);
308
309bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
310 const DataLayout &DL, AssumptionCache *AC,
311 const Instruction *CxtI, const DominatorTree *DT,
312 bool UseInstrInfo) {
313 // We don't support looking through casts.
314 if (V1 == V2 || V1->getType() != V2->getType())
315 return false;
316 auto *FVTy = dyn_cast<FixedVectorType>(V1->getType());
317 APInt DemandedElts =
318 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
319 return ::isKnownNonEqual(
320 V1, V2, DemandedElts, 0,
321 SimplifyQuery(DL, DT, AC, safeCxtI(V2, V1, CxtI), UseInstrInfo));
322}
323
324bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
325 const SimplifyQuery &SQ, unsigned Depth) {
326 KnownBits Known(Mask.getBitWidth());
327 computeKnownBits(V, Known, Depth, SQ);
328 return Mask.isSubsetOf(Known.Zero);
329}
330
331static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
332 unsigned Depth, const SimplifyQuery &Q);
333
334static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
335 const SimplifyQuery &Q) {
336 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
337 APInt DemandedElts =
338 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
339 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
340}
341
342unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
343 unsigned Depth, AssumptionCache *AC,
344 const Instruction *CxtI,
345 const DominatorTree *DT, bool UseInstrInfo) {
346 return ::ComputeNumSignBits(
347 V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo));
348}
349
351 unsigned Depth, AssumptionCache *AC,
352 const Instruction *CxtI,
353 const DominatorTree *DT) {
354 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
355 return V->getType()->getScalarSizeInBits() - SignBits + 1;
356}
357
358static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
359 bool NSW, bool NUW,
360 const APInt &DemandedElts,
361 KnownBits &KnownOut, KnownBits &Known2,
362 unsigned Depth, const SimplifyQuery &Q) {
363 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
364
365 // If one operand is unknown and we have no nowrap information,
366 // the result will be unknown independently of the second operand.
367 if (KnownOut.isUnknown() && !NSW && !NUW)
368 return;
369
370 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
371 KnownOut = KnownBits::computeForAddSub(Add, NSW, NUW, Known2, KnownOut);
372}
373
374static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
375 const APInt &DemandedElts, KnownBits &Known,
376 KnownBits &Known2, unsigned Depth,
377 const SimplifyQuery &Q) {
378 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
379 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
380
381 bool isKnownNegative = false;
382 bool isKnownNonNegative = false;
383 // If the multiplication is known not to overflow, compute the sign bit.
384 if (NSW) {
385 if (Op0 == Op1) {
386 // The product of a number with itself is non-negative.
387 isKnownNonNegative = true;
388 } else {
389 bool isKnownNonNegativeOp1 = Known.isNonNegative();
390 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
391 bool isKnownNegativeOp1 = Known.isNegative();
392 bool isKnownNegativeOp0 = Known2.isNegative();
393 // The product of two numbers with the same sign is non-negative.
394 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
395 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
396 // The product of a negative number and a non-negative number is either
397 // negative or zero.
400 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
401 Known2.isNonZero()) ||
402 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
403 }
404 }
405
406 bool SelfMultiply = Op0 == Op1;
407 if (SelfMultiply)
408 SelfMultiply &=
409 isGuaranteedNotToBeUndef(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1);
410 Known = KnownBits::mul(Known, Known2, SelfMultiply);
411
412 // Only make use of no-wrap flags if we failed to compute the sign bit
413 // directly. This matters if the multiplication always overflows, in
414 // which case we prefer to follow the result of the direct computation,
415 // though as the program is invoking undefined behaviour we can choose
416 // whatever we like here.
417 if (isKnownNonNegative && !Known.isNegative())
418 Known.makeNonNegative();
419 else if (isKnownNegative && !Known.isNonNegative())
420 Known.makeNegative();
421}
422
424 KnownBits &Known) {
425 unsigned BitWidth = Known.getBitWidth();
426 unsigned NumRanges = Ranges.getNumOperands() / 2;
427 assert(NumRanges >= 1);
428
429 Known.Zero.setAllBits();
430 Known.One.setAllBits();
431
432 for (unsigned i = 0; i < NumRanges; ++i) {
434 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
436 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
437 ConstantRange Range(Lower->getValue(), Upper->getValue());
438
439 // The first CommonPrefixBits of all values in Range are equal.
440 unsigned CommonPrefixBits =
442 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
444 Known.One &= UnsignedMax & Mask;
445 Known.Zero &= ~UnsignedMax & Mask;
446 }
447}
448
449static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
453
454 // The instruction defining an assumption's condition itself is always
455 // considered ephemeral to that assumption (even if it has other
456 // non-ephemeral users). See r246696's test case for an example.
457 if (is_contained(I->operands(), E))
458 return true;
459
460 while (!WorkSet.empty()) {
461 const Value *V = WorkSet.pop_back_val();
462 if (!Visited.insert(V).second)
463 continue;
464
465 // If all uses of this value are ephemeral, then so is this value.
466 if (llvm::all_of(V->users(), [&](const User *U) {
467 return EphValues.count(U);
468 })) {
469 if (V == E)
470 return true;
471
472 if (V == I || (isa<Instruction>(V) &&
473 !cast<Instruction>(V)->mayHaveSideEffects() &&
474 !cast<Instruction>(V)->isTerminator())) {
475 EphValues.insert(V);
476 if (const User *U = dyn_cast<User>(V))
477 append_range(WorkSet, U->operands());
478 }
479 }
480 }
481
482 return false;
483}
484
485// Is this an intrinsic that cannot be speculated but also cannot trap?
487 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
488 return CI->isAssumeLikeIntrinsic();
489
490 return false;
491}
492
494 const Instruction *CxtI,
495 const DominatorTree *DT,
496 bool AllowEphemerals) {
497 // There are two restrictions on the use of an assume:
498 // 1. The assume must dominate the context (or the control flow must
499 // reach the assume whenever it reaches the context).
500 // 2. The context must not be in the assume's set of ephemeral values
501 // (otherwise we will use the assume to prove that the condition
502 // feeding the assume is trivially true, thus causing the removal of
503 // the assume).
504
505 if (Inv->getParent() == CxtI->getParent()) {
506 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
507 // in the BB.
508 if (Inv->comesBefore(CxtI))
509 return true;
510
511 // Don't let an assume affect itself - this would cause the problems
512 // `isEphemeralValueOf` is trying to prevent, and it would also make
513 // the loop below go out of bounds.
514 if (!AllowEphemerals && Inv == CxtI)
515 return false;
516
517 // The context comes first, but they're both in the same block.
518 // Make sure there is nothing in between that might interrupt
519 // the control flow, not even CxtI itself.
520 // We limit the scan distance between the assume and its context instruction
521 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
522 // it can be adjusted if needed (could be turned into a cl::opt).
523 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
525 return false;
526
527 return AllowEphemerals || !isEphemeralValueOf(Inv, CxtI);
528 }
529
530 // Inv and CxtI are in different blocks.
531 if (DT) {
532 if (DT->dominates(Inv, CxtI))
533 return true;
534 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
535 // We don't have a DT, but this trivially dominates.
536 return true;
537 }
538
539 return false;
540}
541
542// TODO: cmpExcludesZero misses many cases where `RHS` is non-constant but
543// we still have enough information about `RHS` to conclude non-zero. For
544// example Pred=EQ, RHS=isKnownNonZero. cmpExcludesZero is called in loops
545// so the extra compile time may not be worth it, but possibly a second API
546// should be created for use outside of loops.
547static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
548 // v u> y implies v != 0.
549 if (Pred == ICmpInst::ICMP_UGT)
550 return true;
551
552 // Special-case v != 0 to also handle v != null.
553 if (Pred == ICmpInst::ICMP_NE)
554 return match(RHS, m_Zero());
555
556 // All other predicates - rely on generic ConstantRange handling.
557 const APInt *C;
559 if (match(RHS, m_APInt(C))) {
561 return !TrueValues.contains(Zero);
562 }
563
564 auto *VC = dyn_cast<ConstantDataVector>(RHS);
565 if (VC == nullptr)
566 return false;
567
568 for (unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem;
569 ++ElemIdx) {
571 Pred, VC->getElementAsAPInt(ElemIdx));
572 if (TrueValues.contains(Zero))
573 return false;
574 }
575 return true;
576}
577
578static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q) {
579 // Use of assumptions is context-sensitive. If we don't have a context, we
580 // cannot use them!
581 if (!Q.AC || !Q.CxtI)
582 return false;
583
584 for (AssumptionCache::ResultElem &Elem : Q.AC->assumptionsFor(V)) {
585 if (!Elem.Assume)
586 continue;
587
588 AssumeInst *I = cast<AssumeInst>(Elem.Assume);
589 assert(I->getFunction() == Q.CxtI->getFunction() &&
590 "Got assumption for the wrong function!");
591
592 if (Elem.Index != AssumptionCache::ExprResultIdx) {
593 if (!V->getType()->isPointerTy())
594 continue;
596 *I, I->bundle_op_info_begin()[Elem.Index])) {
597 if (RK.WasOn == V &&
598 (RK.AttrKind == Attribute::NonNull ||
599 (RK.AttrKind == Attribute::Dereferenceable &&
601 V->getType()->getPointerAddressSpace()))) &&
603 return true;
604 }
605 continue;
606 }
607
608 // Warning: This loop can end up being somewhat performance sensitive.
609 // We're running this loop for once for each value queried resulting in a
610 // runtime of ~O(#assumes * #values).
611
612 Value *RHS;
614 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
615 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
616 return false;
617
618 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
619 return true;
620 }
621
622 return false;
623}
624
626 Value *LHS, Value *RHS, KnownBits &Known,
627 const SimplifyQuery &Q) {
628 if (RHS->getType()->isPointerTy()) {
629 // Handle comparison of pointer to null explicitly, as it will not be
630 // covered by the m_APInt() logic below.
631 if (LHS == V && match(RHS, m_Zero())) {
632 switch (Pred) {
633 case ICmpInst::ICMP_EQ:
634 Known.setAllZero();
635 break;
636 case ICmpInst::ICMP_SGE:
637 case ICmpInst::ICMP_SGT:
638 Known.makeNonNegative();
639 break;
640 case ICmpInst::ICMP_SLT:
641 Known.makeNegative();
642 break;
643 default:
644 break;
645 }
646 }
647 return;
648 }
649
650 unsigned BitWidth = Known.getBitWidth();
651 auto m_V =
653
654 Value *Y;
655 const APInt *Mask, *C;
656 uint64_t ShAmt;
657 switch (Pred) {
658 case ICmpInst::ICMP_EQ:
659 // assume(V = C)
660 if (match(LHS, m_V) && match(RHS, m_APInt(C))) {
661 Known = Known.unionWith(KnownBits::makeConstant(*C));
662 // assume(V & Mask = C)
663 } else if (match(LHS, m_c_And(m_V, m_Value(Y))) &&
664 match(RHS, m_APInt(C))) {
665 // For one bits in Mask, we can propagate bits from C to V.
666 Known.One |= *C;
667 if (match(Y, m_APInt(Mask)))
668 Known.Zero |= ~*C & *Mask;
669 // assume(V | Mask = C)
670 } else if (match(LHS, m_c_Or(m_V, m_Value(Y))) && match(RHS, m_APInt(C))) {
671 // For zero bits in Mask, we can propagate bits from C to V.
672 Known.Zero |= ~*C;
673 if (match(Y, m_APInt(Mask)))
674 Known.One |= *C & ~*Mask;
675 // assume(V ^ Mask = C)
676 } else if (match(LHS, m_Xor(m_V, m_APInt(Mask))) &&
677 match(RHS, m_APInt(C))) {
678 // Equivalent to assume(V == Mask ^ C)
679 Known = Known.unionWith(KnownBits::makeConstant(*C ^ *Mask));
680 // assume(V << ShAmt = C)
681 } else if (match(LHS, m_Shl(m_V, m_ConstantInt(ShAmt))) &&
682 match(RHS, m_APInt(C)) && ShAmt < BitWidth) {
683 // For those bits in C that are known, we can propagate them to known
684 // bits in V shifted to the right by ShAmt.
686 RHSKnown.Zero.lshrInPlace(ShAmt);
687 RHSKnown.One.lshrInPlace(ShAmt);
688 Known = Known.unionWith(RHSKnown);
689 // assume(V >> ShAmt = C)
690 } else if (match(LHS, m_Shr(m_V, m_ConstantInt(ShAmt))) &&
691 match(RHS, m_APInt(C)) && ShAmt < BitWidth) {
693 // For those bits in RHS that are known, we can propagate them to known
694 // bits in V shifted to the right by C.
695 Known.Zero |= RHSKnown.Zero << ShAmt;
696 Known.One |= RHSKnown.One << ShAmt;
697 }
698 break;
699 case ICmpInst::ICMP_NE: {
700 // assume (V & B != 0) where B is a power of 2
701 const APInt *BPow2;
702 if (match(LHS, m_And(m_V, m_Power2(BPow2))) && match(RHS, m_Zero()))
703 Known.One |= *BPow2;
704 break;
705 }
706 default:
707 if (match(RHS, m_APInt(C))) {
708 const APInt *Offset = nullptr;
709 if (match(LHS, m_CombineOr(m_V, m_AddLike(m_V, m_APInt(Offset))))) {
711 if (Offset)
712 LHSRange = LHSRange.sub(*Offset);
713 Known = Known.unionWith(LHSRange.toKnownBits());
714 }
715 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
716 // X & Y u> C -> X u> C && Y u> C
717 // X nuw- Y u> C -> X u> C
718 if (match(LHS, m_c_And(m_V, m_Value())) ||
719 match(LHS, m_NUWSub(m_V, m_Value())))
720 Known.One.setHighBits(
721 (*C + (Pred == ICmpInst::ICMP_UGT)).countLeadingOnes());
722 }
723 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
724 // X | Y u< C -> X u< C && Y u< C
725 // X nuw+ Y u< C -> X u< C && Y u< C
726 if (match(LHS, m_c_Or(m_V, m_Value())) ||
727 match(LHS, m_c_NUWAdd(m_V, m_Value()))) {
728 Known.Zero.setHighBits(
729 (*C - (Pred == ICmpInst::ICMP_ULT)).countLeadingZeros());
730 }
731 }
732 }
733 break;
734 }
735}
736
737static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp,
738 KnownBits &Known,
739 const SimplifyQuery &SQ, bool Invert) {
741 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate();
742 Value *LHS = Cmp->getOperand(0);
743 Value *RHS = Cmp->getOperand(1);
744
745 // Handle icmp pred (trunc V), C
746 if (match(LHS, m_Trunc(m_Specific(V)))) {
748 computeKnownBitsFromCmp(LHS, Pred, LHS, RHS, DstKnown, SQ);
749 Known = Known.unionWith(DstKnown.anyext(Known.getBitWidth()));
750 return;
751 }
752
753 computeKnownBitsFromCmp(V, Pred, LHS, RHS, Known, SQ);
754}
755
757 KnownBits &Known, unsigned Depth,
758 const SimplifyQuery &SQ, bool Invert) {
759 Value *A, *B;
762 KnownBits Known2(Known.getBitWidth());
763 KnownBits Known3(Known.getBitWidth());
764 computeKnownBitsFromCond(V, A, Known2, Depth + 1, SQ, Invert);
765 computeKnownBitsFromCond(V, B, Known3, Depth + 1, SQ, Invert);
766 if (Invert ? match(Cond, m_LogicalOr(m_Value(), m_Value()))
768 Known2 = Known2.unionWith(Known3);
769 else
770 Known2 = Known2.intersectWith(Known3);
771 Known = Known.unionWith(Known2);
772 }
773
774 if (auto *Cmp = dyn_cast<ICmpInst>(Cond))
775 computeKnownBitsFromICmpCond(V, Cmp, Known, SQ, Invert);
776}
777
779 unsigned Depth, const SimplifyQuery &Q) {
780 // Handle injected condition.
781 if (Q.CC && Q.CC->AffectedValues.contains(V))
782 computeKnownBitsFromCond(V, Q.CC->Cond, Known, Depth, Q, Q.CC->Invert);
783
784 if (!Q.CxtI)
785 return;
786
787 if (Q.DC && Q.DT) {
788 // Handle dominating conditions.
789 for (BranchInst *BI : Q.DC->conditionsFor(V)) {
790 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
791 if (Q.DT->dominates(Edge0, Q.CxtI->getParent()))
792 computeKnownBitsFromCond(V, BI->getCondition(), Known, Depth, Q,
793 /*Invert*/ false);
794
795 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
796 if (Q.DT->dominates(Edge1, Q.CxtI->getParent()))
797 computeKnownBitsFromCond(V, BI->getCondition(), Known, Depth, Q,
798 /*Invert*/ true);
799 }
800
801 if (Known.hasConflict())
802 Known.resetAll();
803 }
804
805 if (!Q.AC)
806 return;
807
808 unsigned BitWidth = Known.getBitWidth();
809
810 // Note that the patterns below need to be kept in sync with the code
811 // in AssumptionCache::updateAffectedValues.
812
813 for (AssumptionCache::ResultElem &Elem : Q.AC->assumptionsFor(V)) {
814 if (!Elem.Assume)
815 continue;
816
817 AssumeInst *I = cast<AssumeInst>(Elem.Assume);
818 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
819 "Got assumption for the wrong function!");
820
821 if (Elem.Index != AssumptionCache::ExprResultIdx) {
822 if (!V->getType()->isPointerTy())
823 continue;
825 *I, I->bundle_op_info_begin()[Elem.Index])) {
826 if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment &&
827 isPowerOf2_64(RK.ArgValue) &&
829 Known.Zero.setLowBits(Log2_64(RK.ArgValue));
830 }
831 continue;
832 }
833
834 // Warning: This loop can end up being somewhat performance sensitive.
835 // We're running this loop for once for each value queried resulting in a
836 // runtime of ~O(#assumes * #values).
837
838 Value *Arg = I->getArgOperand(0);
839
840 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
841 assert(BitWidth == 1 && "assume operand is not i1?");
842 (void)BitWidth;
843 Known.setAllOnes();
844 return;
845 }
846 if (match(Arg, m_Not(m_Specific(V))) &&
848 assert(BitWidth == 1 && "assume operand is not i1?");
849 (void)BitWidth;
850 Known.setAllZero();
851 return;
852 }
853
854 // The remaining tests are all recursive, so bail out if we hit the limit.
856 continue;
857
858 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
859 if (!Cmp)
860 continue;
861
862 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
863 continue;
864
865 computeKnownBitsFromICmpCond(V, Cmp, Known, Q, /*Invert=*/false);
866 }
867
868 // Conflicting assumption: Undefined behavior will occur on this execution
869 // path.
870 if (Known.hasConflict())
871 Known.resetAll();
872}
873
874/// Compute known bits from a shift operator, including those with a
875/// non-constant shift amount. Known is the output of this function. Known2 is a
876/// pre-allocated temporary with the same bit width as Known and on return
877/// contains the known bit of the shift value source. KF is an
878/// operator-specific function that, given the known-bits and a shift amount,
879/// compute the implied known-bits of the shift operator's result respectively
880/// for that shift amount. The results from calling KF are conservatively
881/// combined for all permitted shift amounts.
883 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
884 KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q,
885 function_ref<KnownBits(const KnownBits &, const KnownBits &, bool)> KF) {
886 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
887 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
888 // To limit compile-time impact, only query isKnownNonZero() if we know at
889 // least something about the shift amount.
890 bool ShAmtNonZero =
891 Known.isNonZero() ||
892 (Known.getMaxValue().ult(Known.getBitWidth()) &&
893 isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth + 1));
894 Known = KF(Known2, Known, ShAmtNonZero);
895}
896
897static KnownBits
898getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts,
899 const KnownBits &KnownLHS, const KnownBits &KnownRHS,
900 unsigned Depth, const SimplifyQuery &Q) {
901 unsigned BitWidth = KnownLHS.getBitWidth();
902 KnownBits KnownOut(BitWidth);
903 bool IsAnd = false;
904 bool HasKnownOne = !KnownLHS.One.isZero() || !KnownRHS.One.isZero();
905 Value *X = nullptr, *Y = nullptr;
906
907 switch (I->getOpcode()) {
908 case Instruction::And:
909 KnownOut = KnownLHS & KnownRHS;
910 IsAnd = true;
911 // and(x, -x) is common idioms that will clear all but lowest set
912 // bit. If we have a single known bit in x, we can clear all bits
913 // above it.
914 // TODO: instcombine often reassociates independent `and` which can hide
915 // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x).
916 if (HasKnownOne && match(I, m_c_And(m_Value(X), m_Neg(m_Deferred(X))))) {
917 // -(-x) == x so using whichever (LHS/RHS) gets us a better result.
918 if (KnownLHS.countMaxTrailingZeros() <= KnownRHS.countMaxTrailingZeros())
919 KnownOut = KnownLHS.blsi();
920 else
921 KnownOut = KnownRHS.blsi();
922 }
923 break;
924 case Instruction::Or:
925 KnownOut = KnownLHS | KnownRHS;
926 break;
927 case Instruction::Xor:
928 KnownOut = KnownLHS ^ KnownRHS;
929 // xor(x, x-1) is common idioms that will clear all but lowest set
930 // bit. If we have a single known bit in x, we can clear all bits
931 // above it.
932 // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C !=
933 // -1 but for the purpose of demanded bits (xor(x, x-C) &
934 // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern
935 // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1).
936 if (HasKnownOne &&
938 const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS;
939 KnownOut = XBits.blsmsk();
940 }
941 break;
942 default:
943 llvm_unreachable("Invalid Op used in 'analyzeKnownBitsFromAndXorOr'");
944 }
945
946 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
947 // xor/or(x, add (x, -1)) is an idiom that will always set the low bit.
948 // here we handle the more general case of adding any odd number by
949 // matching the form and/xor/or(x, add(x, y)) where y is odd.
950 // TODO: This could be generalized to clearing any bit set in y where the
951 // following bit is known to be unset in y.
952 if (!KnownOut.Zero[0] && !KnownOut.One[0] &&
956 KnownBits KnownY(BitWidth);
957 computeKnownBits(Y, DemandedElts, KnownY, Depth + 1, Q);
958 if (KnownY.countMinTrailingOnes() > 0) {
959 if (IsAnd)
960 KnownOut.Zero.setBit(0);
961 else
962 KnownOut.One.setBit(0);
963 }
964 }
965 return KnownOut;
966}
967
969 const Operator *I, const APInt &DemandedElts, unsigned Depth,
970 const SimplifyQuery &Q,
971 const function_ref<KnownBits(const KnownBits &, const KnownBits &)>
972 KnownBitsFunc) {
973 APInt DemandedEltsLHS, DemandedEltsRHS;
975 DemandedElts, DemandedEltsLHS,
976 DemandedEltsRHS);
977
978 const auto ComputeForSingleOpFunc =
979 [Depth, &Q, KnownBitsFunc](const Value *Op, APInt &DemandedEltsOp) {
980 return KnownBitsFunc(
981 computeKnownBits(Op, DemandedEltsOp, Depth + 1, Q),
982 computeKnownBits(Op, DemandedEltsOp << 1, Depth + 1, Q));
983 };
984
985 if (DemandedEltsRHS.isZero())
986 return ComputeForSingleOpFunc(I->getOperand(0), DemandedEltsLHS);
987 if (DemandedEltsLHS.isZero())
988 return ComputeForSingleOpFunc(I->getOperand(1), DemandedEltsRHS);
989
990 return ComputeForSingleOpFunc(I->getOperand(0), DemandedEltsLHS)
991 .intersectWith(ComputeForSingleOpFunc(I->getOperand(1), DemandedEltsRHS));
992}
993
994// Public so this can be used in `SimplifyDemandedUseBits`.
996 const KnownBits &KnownLHS,
997 const KnownBits &KnownRHS,
998 unsigned Depth,
999 const SimplifyQuery &SQ) {
1000 auto *FVTy = dyn_cast<FixedVectorType>(I->getType());
1001 APInt DemandedElts =
1002 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
1003
1004 return getKnownBitsFromAndXorOr(I, DemandedElts, KnownLHS, KnownRHS, Depth,
1005 SQ);
1006}
1007
1009 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange);
1010 // Without vscale_range, we only know that vscale is non-zero.
1011 if (!Attr.isValid())
1013
1014 unsigned AttrMin = Attr.getVScaleRangeMin();
1015 // Minimum is larger than vscale width, result is always poison.
1016 if ((unsigned)llvm::bit_width(AttrMin) > BitWidth)
1017 return ConstantRange::getEmpty(BitWidth);
1018
1019 APInt Min(BitWidth, AttrMin);
1020 std::optional<unsigned> AttrMax = Attr.getVScaleRangeMax();
1021 if (!AttrMax || (unsigned)llvm::bit_width(*AttrMax) > BitWidth)
1023
1024 return ConstantRange(Min, APInt(BitWidth, *AttrMax) + 1);
1025}
1026
1028 Value *Arm, bool Invert, unsigned Depth,
1029 const SimplifyQuery &Q) {
1030 // If we have a constant arm, we are done.
1031 if (Known.isConstant())
1032 return;
1033
1034 // See what condition implies about the bits of the select arm.
1035 KnownBits CondRes(Known.getBitWidth());
1036 computeKnownBitsFromCond(Arm, Cond, CondRes, Depth + 1, Q, Invert);
1037 // If we don't get any information from the condition, no reason to
1038 // proceed.
1039 if (CondRes.isUnknown())
1040 return;
1041
1042 // We can have conflict if the condition is dead. I.e if we have
1043 // (x | 64) < 32 ? (x | 64) : y
1044 // we will have conflict at bit 6 from the condition/the `or`.
1045 // In that case just return. Its not particularly important
1046 // what we do, as this select is going to be simplified soon.
1047 CondRes = CondRes.unionWith(Known);
1048 if (CondRes.hasConflict())
1049 return;
1050
1051 // Finally make sure the information we found is valid. This is relatively
1052 // expensive so it's left for the very end.
1053 if (!isGuaranteedNotToBeUndef(Arm, Q.AC, Q.CxtI, Q.DT, Depth + 1))
1054 return;
1055
1056 // Finally, we know we get information from the condition and its valid,
1057 // so return it.
1058 Known = CondRes;
1059}
1060
1062 const APInt &DemandedElts,
1063 KnownBits &Known, unsigned Depth,
1064 const SimplifyQuery &Q) {
1065 unsigned BitWidth = Known.getBitWidth();
1066
1067 KnownBits Known2(BitWidth);
1068 switch (I->getOpcode()) {
1069 default: break;
1070 case Instruction::Load:
1071 if (MDNode *MD =
1072 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1074 break;
1075 case Instruction::And:
1076 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1077 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1078
1079 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1080 break;
1081 case Instruction::Or:
1082 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1083 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1084
1085 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1086 break;
1087 case Instruction::Xor:
1088 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1089 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1090
1091 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q);
1092 break;
1093 case Instruction::Mul: {
1094 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1095 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1096 Known, Known2, Depth, Q);
1097 break;
1098 }
1099 case Instruction::UDiv: {
1100 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1101 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1102 Known =
1103 KnownBits::udiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
1104 break;
1105 }
1106 case Instruction::SDiv: {
1107 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1108 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1109 Known =
1110 KnownBits::sdiv(Known, Known2, Q.IIQ.isExact(cast<BinaryOperator>(I)));
1111 break;
1112 }
1113 case Instruction::Select: {
1114 auto ComputeForArm = [&](Value *Arm, bool Invert) {
1115 KnownBits Res(Known.getBitWidth());
1116 computeKnownBits(Arm, DemandedElts, Res, Depth + 1, Q);
1117 adjustKnownBitsForSelectArm(Res, I->getOperand(0), Arm, Invert, Depth, Q);
1118 return Res;
1119 };
1120 // Only known if known in both the LHS and RHS.
1121 Known =
1122 ComputeForArm(I->getOperand(1), /*Invert=*/false)
1123 .intersectWith(ComputeForArm(I->getOperand(2), /*Invert=*/true));
1124 break;
1125 }
1126 case Instruction::FPTrunc:
1127 case Instruction::FPExt:
1128 case Instruction::FPToUI:
1129 case Instruction::FPToSI:
1130 case Instruction::SIToFP:
1131 case Instruction::UIToFP:
1132 break; // Can't work with floating point.
1133 case Instruction::PtrToInt:
1134 case Instruction::IntToPtr:
1135 // Fall through and handle them the same as zext/trunc.
1136 [[fallthrough]];
1137 case Instruction::ZExt:
1138 case Instruction::Trunc: {
1139 Type *SrcTy = I->getOperand(0)->getType();
1140
1141 unsigned SrcBitWidth;
1142 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1143 // which fall through here.
1144 Type *ScalarTy = SrcTy->getScalarType();
1145 SrcBitWidth = ScalarTy->isPointerTy() ?
1146 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1147 Q.DL.getTypeSizeInBits(ScalarTy);
1148
1149 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1150 Known = Known.anyextOrTrunc(SrcBitWidth);
1151 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1152 if (auto *Inst = dyn_cast<PossiblyNonNegInst>(I);
1153 Inst && Inst->hasNonNeg() && !Known.isNegative())
1154 Known.makeNonNegative();
1155 Known = Known.zextOrTrunc(BitWidth);
1156 break;
1157 }
1158 case Instruction::BitCast: {
1159 Type *SrcTy = I->getOperand(0)->getType();
1160 if (SrcTy->isIntOrPtrTy() &&
1161 // TODO: For now, not handling conversions like:
1162 // (bitcast i64 %x to <2 x i32>)
1163 !I->getType()->isVectorTy()) {
1164 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1165 break;
1166 }
1167
1168 const Value *V;
1169 // Handle bitcast from floating point to integer.
1170 if (match(I, m_ElementWiseBitCast(m_Value(V))) &&
1171 V->getType()->isFPOrFPVectorTy()) {
1172 Type *FPType = V->getType()->getScalarType();
1173 KnownFPClass Result =
1174 computeKnownFPClass(V, DemandedElts, fcAllFlags, Depth + 1, Q);
1175 FPClassTest FPClasses = Result.KnownFPClasses;
1176
1177 // TODO: Treat it as zero/poison if the use of I is unreachable.
1178 if (FPClasses == fcNone)
1179 break;
1180
1181 if (Result.isKnownNever(fcNormal | fcSubnormal | fcNan)) {
1182 Known.Zero.setAllBits();
1183 Known.One.setAllBits();
1184
1185 if (FPClasses & fcInf)
1188
1189 if (FPClasses & fcZero)
1192
1193 Known.Zero.clearSignBit();
1194 Known.One.clearSignBit();
1195 }
1196
1197 if (Result.SignBit) {
1198 if (*Result.SignBit)
1199 Known.makeNegative();
1200 else
1201 Known.makeNonNegative();
1202 }
1203
1204 break;
1205 }
1206
1207 // Handle cast from vector integer type to scalar or vector integer.
1208 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1209 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1210 !I->getType()->isIntOrIntVectorTy() ||
1211 isa<ScalableVectorType>(I->getType()))
1212 break;
1213
1214 // Look through a cast from narrow vector elements to wider type.
1215 // Examples: v4i32 -> v2i64, v3i8 -> v24
1216 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1217 if (BitWidth % SubBitWidth == 0) {
1218 // Known bits are automatically intersected across demanded elements of a
1219 // vector. So for example, if a bit is computed as known zero, it must be
1220 // zero across all demanded elements of the vector.
1221 //
1222 // For this bitcast, each demanded element of the output is sub-divided
1223 // across a set of smaller vector elements in the source vector. To get
1224 // the known bits for an entire element of the output, compute the known
1225 // bits for each sub-element sequentially. This is done by shifting the
1226 // one-set-bit demanded elements parameter across the sub-elements for
1227 // consecutive calls to computeKnownBits. We are using the demanded
1228 // elements parameter as a mask operator.
1229 //
1230 // The known bits of each sub-element are then inserted into place
1231 // (dependent on endian) to form the full result of known bits.
1232 unsigned NumElts = DemandedElts.getBitWidth();
1233 unsigned SubScale = BitWidth / SubBitWidth;
1234 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1235 for (unsigned i = 0; i != NumElts; ++i) {
1236 if (DemandedElts[i])
1237 SubDemandedElts.setBit(i * SubScale);
1238 }
1239
1240 KnownBits KnownSrc(SubBitWidth);
1241 for (unsigned i = 0; i != SubScale; ++i) {
1242 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1243 Depth + 1, Q);
1244 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1245 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1246 }
1247 }
1248 break;
1249 }
1250 case Instruction::SExt: {
1251 // Compute the bits in the result that are not present in the input.
1252 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1253
1254 Known = Known.trunc(SrcBitWidth);
1255 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1256 // If the sign bit of the input is known set or clear, then we know the
1257 // top bits of the result.
1258 Known = Known.sext(BitWidth);
1259 break;
1260 }
1261 case Instruction::Shl: {
1262 bool NUW = Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(I));
1263 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1264 auto KF = [NUW, NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt,
1265 bool ShAmtNonZero) {
1266 return KnownBits::shl(KnownVal, KnownAmt, NUW, NSW, ShAmtNonZero);
1267 };
1268 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1269 KF);
1270 // Trailing zeros of a right-shifted constant never decrease.
1271 const APInt *C;
1272 if (match(I->getOperand(0), m_APInt(C)))
1273 Known.Zero.setLowBits(C->countr_zero());
1274 break;
1275 }
1276 case Instruction::LShr: {
1277 bool Exact = Q.IIQ.isExact(cast<BinaryOperator>(I));
1278 auto KF = [Exact](const KnownBits &KnownVal, const KnownBits &KnownAmt,
1279 bool ShAmtNonZero) {
1280 return KnownBits::lshr(KnownVal, KnownAmt, ShAmtNonZero, Exact);
1281 };
1282 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1283 KF);
1284 // Leading zeros of a left-shifted constant never decrease.
1285 const APInt *C;
1286 if (match(I->getOperand(0), m_APInt(C)))
1287 Known.Zero.setHighBits(C->countl_zero());
1288 break;
1289 }
1290 case Instruction::AShr: {
1291 bool Exact = Q.IIQ.isExact(cast<BinaryOperator>(I));
1292 auto KF = [Exact](const KnownBits &KnownVal, const KnownBits &KnownAmt,
1293 bool ShAmtNonZero) {
1294 return KnownBits::ashr(KnownVal, KnownAmt, ShAmtNonZero, Exact);
1295 };
1296 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1297 KF);
1298 break;
1299 }
1300 case Instruction::Sub: {
1301 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1302 bool NUW = Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(I));
1303 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, NUW,
1304 DemandedElts, Known, Known2, Depth, Q);
1305 break;
1306 }
1307 case Instruction::Add: {
1308 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1309 bool NUW = Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(I));
1310 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, NUW,
1311 DemandedElts, Known, Known2, Depth, Q);
1312 break;
1313 }
1314 case Instruction::SRem:
1315 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1316 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1317 Known = KnownBits::srem(Known, Known2);
1318 break;
1319
1320 case Instruction::URem:
1321 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1322 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1323 Known = KnownBits::urem(Known, Known2);
1324 break;
1325 case Instruction::Alloca:
1326 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1327 break;
1328 case Instruction::GetElementPtr: {
1329 // Analyze all of the subscripts of this getelementptr instruction
1330 // to determine if we can prove known low zero bits.
1331 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1332 // Accumulate the constant indices in a separate variable
1333 // to minimize the number of calls to computeForAddSub.
1334 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1335
1337 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1338 // TrailZ can only become smaller, short-circuit if we hit zero.
1339 if (Known.isUnknown())
1340 break;
1341
1342 Value *Index = I->getOperand(i);
1343
1344 // Handle case when index is zero.
1345 Constant *CIndex = dyn_cast<Constant>(Index);
1346 if (CIndex && CIndex->isZeroValue())
1347 continue;
1348
1349 if (StructType *STy = GTI.getStructTypeOrNull()) {
1350 // Handle struct member offset arithmetic.
1351
1352 assert(CIndex &&
1353 "Access to structure field must be known at compile time");
1354
1355 if (CIndex->getType()->isVectorTy())
1356 Index = CIndex->getSplatValue();
1357
1358 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1359 const StructLayout *SL = Q.DL.getStructLayout(STy);
1361 AccConstIndices += Offset;
1362 continue;
1363 }
1364
1365 // Handle array index arithmetic.
1366 Type *IndexedTy = GTI.getIndexedType();
1367 if (!IndexedTy->isSized()) {
1368 Known.resetAll();
1369 break;
1370 }
1371
1372 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1373 KnownBits IndexBits(IndexBitWidth);
1374 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1375 TypeSize IndexTypeSize = GTI.getSequentialElementStride(Q.DL);
1376 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue();
1377 KnownBits ScalingFactor(IndexBitWidth);
1378 // Multiply by current sizeof type.
1379 // &A[i] == A + i * sizeof(*A[i]).
1380 if (IndexTypeSize.isScalable()) {
1381 // For scalable types the only thing we know about sizeof is
1382 // that this is a multiple of the minimum size.
1383 ScalingFactor.Zero.setLowBits(llvm::countr_zero(TypeSizeInBytes));
1384 } else if (IndexBits.isConstant()) {
1385 APInt IndexConst = IndexBits.getConstant();
1386 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1387 IndexConst *= ScalingFactor;
1388 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1389 continue;
1390 } else {
1391 ScalingFactor =
1392 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1393 }
1394 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1395
1396 // If the offsets have a different width from the pointer, according
1397 // to the language reference we need to sign-extend or truncate them
1398 // to the width of the pointer.
1399 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1400
1401 // Note that inbounds does *not* guarantee nsw for the addition, as only
1402 // the offset is signed, while the base address is unsigned.
1403 Known = KnownBits::add(Known, IndexBits);
1404 }
1405 if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1406 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1407 Known = KnownBits::add(Known, Index);
1408 }
1409 break;
1410 }
1411 case Instruction::PHI: {
1412 const PHINode *P = cast<PHINode>(I);
1413 BinaryOperator *BO = nullptr;
1414 Value *R = nullptr, *L = nullptr;
1415 if (matchSimpleRecurrence(P, BO, R, L)) {
1416 // Handle the case of a simple two-predecessor recurrence PHI.
1417 // There's a lot more that could theoretically be done here, but
1418 // this is sufficient to catch some interesting cases.
1419 unsigned Opcode = BO->getOpcode();
1420
1421 // If this is a shift recurrence, we know the bits being shifted in.
1422 // We can combine that with information about the start value of the
1423 // recurrence to conclude facts about the result.
1424 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1425 Opcode == Instruction::Shl) &&
1426 BO->getOperand(0) == I) {
1427
1428 // We have matched a recurrence of the form:
1429 // %iv = [R, %entry], [%iv.next, %backedge]
1430 // %iv.next = shift_op %iv, L
1431
1432 // Recurse with the phi context to avoid concern about whether facts
1433 // inferred hold at original context instruction. TODO: It may be
1434 // correct to use the original context. IF warranted, explore and
1435 // add sufficient tests to cover.
1437 RecQ.CxtI = P;
1438 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1439 switch (Opcode) {
1440 case Instruction::Shl:
1441 // A shl recurrence will only increase the tailing zeros
1442 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1443 break;
1444 case Instruction::LShr:
1445 // A lshr recurrence will preserve the leading zeros of the
1446 // start value
1447 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1448 break;
1449 case Instruction::AShr:
1450 // An ashr recurrence will extend the initial sign bit
1451 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1452 Known.One.setHighBits(Known2.countMinLeadingOnes());
1453 break;
1454 };
1455 }
1456
1457 // Check for operations that have the property that if
1458 // both their operands have low zero bits, the result
1459 // will have low zero bits.
1460 if (Opcode == Instruction::Add ||
1461 Opcode == Instruction::Sub ||
1462 Opcode == Instruction::And ||
1463 Opcode == Instruction::Or ||
1464 Opcode == Instruction::Mul) {
1465 // Change the context instruction to the "edge" that flows into the
1466 // phi. This is important because that is where the value is actually
1467 // "evaluated" even though it is used later somewhere else. (see also
1468 // D69571).
1470
1471 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1472 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1473 Instruction *LInst = P->getIncomingBlock(1 - OpNum)->getTerminator();
1474
1475 // Ok, we have a PHI of the form L op= R. Check for low
1476 // zero bits.
1477 RecQ.CxtI = RInst;
1478 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1479
1480 // We need to take the minimum number of known bits
1481 KnownBits Known3(BitWidth);
1482 RecQ.CxtI = LInst;
1483 computeKnownBits(L, DemandedElts, Known3, Depth + 1, RecQ);
1484
1485 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1486 Known3.countMinTrailingZeros()));
1487
1488 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1489 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1490 // If initial value of recurrence is nonnegative, and we are adding
1491 // a nonnegative number with nsw, the result can only be nonnegative
1492 // or poison value regardless of the number of times we execute the
1493 // add in phi recurrence. If initial value is negative and we are
1494 // adding a negative number with nsw, the result can only be
1495 // negative or poison value. Similar arguments apply to sub and mul.
1496 //
1497 // (add non-negative, non-negative) --> non-negative
1498 // (add negative, negative) --> negative
1499 if (Opcode == Instruction::Add) {
1500 if (Known2.isNonNegative() && Known3.isNonNegative())
1501 Known.makeNonNegative();
1502 else if (Known2.isNegative() && Known3.isNegative())
1503 Known.makeNegative();
1504 }
1505
1506 // (sub nsw non-negative, negative) --> non-negative
1507 // (sub nsw negative, non-negative) --> negative
1508 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1509 if (Known2.isNonNegative() && Known3.isNegative())
1510 Known.makeNonNegative();
1511 else if (Known2.isNegative() && Known3.isNonNegative())
1512 Known.makeNegative();
1513 }
1514
1515 // (mul nsw non-negative, non-negative) --> non-negative
1516 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1517 Known3.isNonNegative())
1518 Known.makeNonNegative();
1519 }
1520
1521 break;
1522 }
1523 }
1524
1525 // Unreachable blocks may have zero-operand PHI nodes.
1526 if (P->getNumIncomingValues() == 0)
1527 break;
1528
1529 // Otherwise take the unions of the known bit sets of the operands,
1530 // taking conservative care to avoid excessive recursion.
1531 if (Depth < MaxAnalysisRecursionDepth - 1 && Known.isUnknown()) {
1532 // Skip if every incoming value references to ourself.
1533 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1534 break;
1535
1536 Known.Zero.setAllBits();
1537 Known.One.setAllBits();
1538 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1539 Value *IncValue = P->getIncomingValue(u);
1540 // Skip direct self references.
1541 if (IncValue == P) continue;
1542
1543 // Change the context instruction to the "edge" that flows into the
1544 // phi. This is important because that is where the value is actually
1545 // "evaluated" even though it is used later somewhere else. (see also
1546 // D69571).
1548 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1549
1550 Known2 = KnownBits(BitWidth);
1551
1552 // Recurse, but cap the recursion to one level, because we don't
1553 // want to waste time spinning around in loops.
1554 // TODO: See if we can base recursion limiter on number of incoming phi
1555 // edges so we don't overly clamp analysis.
1556 computeKnownBits(IncValue, DemandedElts, Known2,
1557 MaxAnalysisRecursionDepth - 1, RecQ);
1558
1559 // See if we can further use a conditional branch into the phi
1560 // to help us determine the range of the value.
1561 if (!Known2.isConstant()) {
1563 const APInt *RHSC;
1564 BasicBlock *TrueSucc, *FalseSucc;
1565 // TODO: Use RHS Value and compute range from its known bits.
1566 if (match(RecQ.CxtI,
1567 m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)),
1568 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
1569 // Check for cases of duplicate successors.
1570 if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) {
1571 // If we're using the false successor, invert the predicate.
1572 if (FalseSucc == P->getParent())
1573 Pred = CmpInst::getInversePredicate(Pred);
1574 // Get the knownbits implied by the incoming phi condition.
1575 auto CR = ConstantRange::makeExactICmpRegion(Pred, *RHSC);
1576 KnownBits KnownUnion = Known2.unionWith(CR.toKnownBits());
1577 // We can have conflicts here if we are analyzing deadcode (its
1578 // impossible for us reach this BB based the icmp).
1579 if (KnownUnion.hasConflict()) {
1580 // No reason to continue analyzing in a known dead region, so
1581 // just resetAll and break. This will cause us to also exit the
1582 // outer loop.
1583 Known.resetAll();
1584 break;
1585 }
1586 Known2 = KnownUnion;
1587 }
1588 }
1589 }
1590
1591 Known = Known.intersectWith(Known2);
1592 // If all bits have been ruled out, there's no need to check
1593 // more operands.
1594 if (Known.isUnknown())
1595 break;
1596 }
1597 }
1598 break;
1599 }
1600 case Instruction::Call:
1601 case Instruction::Invoke: {
1602 // If range metadata is attached to this call, set known bits from that,
1603 // and then intersect with known bits based on other properties of the
1604 // function.
1605 if (MDNode *MD =
1606 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1608
1609 const auto *CB = cast<CallBase>(I);
1610
1611 if (std::optional<ConstantRange> Range = CB->getRange())
1612 Known = Known.unionWith(Range->toKnownBits());
1613
1614 if (const Value *RV = CB->getReturnedArgOperand()) {
1615 if (RV->getType() == I->getType()) {
1616 computeKnownBits(RV, Known2, Depth + 1, Q);
1617 Known = Known.unionWith(Known2);
1618 // If the function doesn't return properly for all input values
1619 // (e.g. unreachable exits) then there might be conflicts between the
1620 // argument value and the range metadata. Simply discard the known bits
1621 // in case of conflicts.
1622 if (Known.hasConflict())
1623 Known.resetAll();
1624 }
1625 }
1626 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1627 switch (II->getIntrinsicID()) {
1628 default:
1629 break;
1630 case Intrinsic::abs: {
1631 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1632 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1633 Known = Known2.abs(IntMinIsPoison);
1634 break;
1635 }
1636 case Intrinsic::bitreverse:
1637 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1638 Known.Zero |= Known2.Zero.reverseBits();
1639 Known.One |= Known2.One.reverseBits();
1640 break;
1641 case Intrinsic::bswap:
1642 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1643 Known.Zero |= Known2.Zero.byteSwap();
1644 Known.One |= Known2.One.byteSwap();
1645 break;
1646 case Intrinsic::ctlz: {
1647 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1648 // If we have a known 1, its position is our upper bound.
1649 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1650 // If this call is poison for 0 input, the result will be less than 2^n.
1651 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1652 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1653 unsigned LowBits = llvm::bit_width(PossibleLZ);
1654 Known.Zero.setBitsFrom(LowBits);
1655 break;
1656 }
1657 case Intrinsic::cttz: {
1658 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1659 // If we have a known 1, its position is our upper bound.
1660 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1661 // If this call is poison for 0 input, the result will be less than 2^n.
1662 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1663 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1664 unsigned LowBits = llvm::bit_width(PossibleTZ);
1665 Known.Zero.setBitsFrom(LowBits);
1666 break;
1667 }
1668 case Intrinsic::ctpop: {
1669 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1670 // We can bound the space the count needs. Also, bits known to be zero
1671 // can't contribute to the population.
1672 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1673 unsigned LowBits = llvm::bit_width(BitsPossiblySet);
1674 Known.Zero.setBitsFrom(LowBits);
1675 // TODO: we could bound KnownOne using the lower bound on the number
1676 // of bits which might be set provided by popcnt KnownOne2.
1677 break;
1678 }
1679 case Intrinsic::fshr:
1680 case Intrinsic::fshl: {
1681 const APInt *SA;
1682 if (!match(I->getOperand(2), m_APInt(SA)))
1683 break;
1684
1685 // Normalize to funnel shift left.
1686 uint64_t ShiftAmt = SA->urem(BitWidth);
1687 if (II->getIntrinsicID() == Intrinsic::fshr)
1688 ShiftAmt = BitWidth - ShiftAmt;
1689
1690 KnownBits Known3(BitWidth);
1691 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1692 computeKnownBits(I->getOperand(1), DemandedElts, Known3, Depth + 1, Q);
1693
1694 Known.Zero =
1695 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1696 Known.One =
1697 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1698 break;
1699 }
1700 case Intrinsic::uadd_sat:
1701 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1702 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1703 Known = KnownBits::uadd_sat(Known, Known2);
1704 break;
1705 case Intrinsic::usub_sat:
1706 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1707 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1708 Known = KnownBits::usub_sat(Known, Known2);
1709 break;
1710 case Intrinsic::sadd_sat:
1711 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1712 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1713 Known = KnownBits::sadd_sat(Known, Known2);
1714 break;
1715 case Intrinsic::ssub_sat:
1716 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1717 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1718 Known = KnownBits::ssub_sat(Known, Known2);
1719 break;
1720 // Vec reverse preserves bits from input vec.
1721 case Intrinsic::vector_reverse:
1722 computeKnownBits(I->getOperand(0), DemandedElts.reverseBits(), Known,
1723 Depth + 1, Q);
1724 break;
1725 // for min/max/and/or reduce, any bit common to each element in the
1726 // input vec is set in the output.
1727 case Intrinsic::vector_reduce_and:
1728 case Intrinsic::vector_reduce_or:
1729 case Intrinsic::vector_reduce_umax:
1730 case Intrinsic::vector_reduce_umin:
1731 case Intrinsic::vector_reduce_smax:
1732 case Intrinsic::vector_reduce_smin:
1733 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1734 break;
1735 case Intrinsic::vector_reduce_xor: {
1736 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1737 // The zeros common to all vecs are zero in the output.
1738 // If the number of elements is odd, then the common ones remain. If the
1739 // number of elements is even, then the common ones becomes zeros.
1740 auto *VecTy = cast<VectorType>(I->getOperand(0)->getType());
1741 // Even, so the ones become zeros.
1742 bool EvenCnt = VecTy->getElementCount().isKnownEven();
1743 if (EvenCnt)
1744 Known.Zero |= Known.One;
1745 // Maybe even element count so need to clear ones.
1746 if (VecTy->isScalableTy() || EvenCnt)
1747 Known.One.clearAllBits();
1748 break;
1749 }
1750 case Intrinsic::umin:
1751 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1752 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1753 Known = KnownBits::umin(Known, Known2);
1754 break;
1755 case Intrinsic::umax:
1756 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1757 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1758 Known = KnownBits::umax(Known, Known2);
1759 break;
1760 case Intrinsic::smin:
1761 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1762 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1763 Known = KnownBits::smin(Known, Known2);
1764 break;
1765 case Intrinsic::smax:
1766 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1767 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1768 Known = KnownBits::smax(Known, Known2);
1769 break;
1770 case Intrinsic::ptrmask: {
1771 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1772
1773 const Value *Mask = I->getOperand(1);
1774 Known2 = KnownBits(Mask->getType()->getScalarSizeInBits());
1775 computeKnownBits(Mask, DemandedElts, Known2, Depth + 1, Q);
1776 // TODO: 1-extend would be more precise.
1777 Known &= Known2.anyextOrTrunc(BitWidth);
1778 break;
1779 }
1780 case Intrinsic::x86_sse2_pmulh_w:
1781 case Intrinsic::x86_avx2_pmulh_w:
1782 case Intrinsic::x86_avx512_pmulh_w_512:
1783 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1784 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1785 Known = KnownBits::mulhs(Known, Known2);
1786 break;
1787 case Intrinsic::x86_sse2_pmulhu_w:
1788 case Intrinsic::x86_avx2_pmulhu_w:
1789 case Intrinsic::x86_avx512_pmulhu_w_512:
1790 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q);
1791 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q);
1792 Known = KnownBits::mulhu(Known, Known2);
1793 break;
1794 case Intrinsic::x86_sse42_crc32_64_64:
1795 Known.Zero.setBitsFrom(32);
1796 break;
1797 case Intrinsic::x86_ssse3_phadd_d_128:
1798 case Intrinsic::x86_ssse3_phadd_w_128:
1799 case Intrinsic::x86_avx2_phadd_d:
1800 case Intrinsic::x86_avx2_phadd_w: {
1802 I, DemandedElts, Depth, Q,
1803 [](const KnownBits &KnownLHS, const KnownBits &KnownRHS) {
1804 return KnownBits::add(KnownLHS, KnownRHS);
1805 });
1806 break;
1807 }
1808 case Intrinsic::x86_ssse3_phadd_sw_128:
1809 case Intrinsic::x86_avx2_phadd_sw: {
1810 Known = computeKnownBitsForHorizontalOperation(I, DemandedElts, Depth,
1812 break;
1813 }
1814 case Intrinsic::x86_ssse3_phsub_d_128:
1815 case Intrinsic::x86_ssse3_phsub_w_128:
1816 case Intrinsic::x86_avx2_phsub_d:
1817 case Intrinsic::x86_avx2_phsub_w: {
1819 I, DemandedElts, Depth, Q,
1820 [](const KnownBits &KnownLHS, const KnownBits &KnownRHS) {
1821 return KnownBits::sub(KnownLHS, KnownRHS);
1822 });
1823 break;
1824 }
1825 case Intrinsic::x86_ssse3_phsub_sw_128:
1826 case Intrinsic::x86_avx2_phsub_sw: {
1827 Known = computeKnownBitsForHorizontalOperation(I, DemandedElts, Depth,
1829 break;
1830 }
1831 case Intrinsic::riscv_vsetvli:
1832 case Intrinsic::riscv_vsetvlimax: {
1833 bool HasAVL = II->getIntrinsicID() == Intrinsic::riscv_vsetvli;
1834 const ConstantRange Range = getVScaleRange(II->getFunction(), BitWidth);
1836 cast<ConstantInt>(II->getArgOperand(HasAVL))->getZExtValue());
1837 RISCVII::VLMUL VLMUL = static_cast<RISCVII::VLMUL>(
1838 cast<ConstantInt>(II->getArgOperand(1 + HasAVL))->getZExtValue());
1839 uint64_t MaxVLEN =
1841 uint64_t MaxVL = MaxVLEN / RISCVVType::getSEWLMULRatio(SEW, VLMUL);
1842
1843 // Result of vsetvli must be not larger than AVL.
1844 if (HasAVL)
1845 if (auto *CI = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1846 MaxVL = std::min(MaxVL, CI->getZExtValue());
1847
1848 unsigned KnownZeroFirstBit = Log2_32(MaxVL) + 1;
1849 if (BitWidth > KnownZeroFirstBit)
1850 Known.Zero.setBitsFrom(KnownZeroFirstBit);
1851 break;
1852 }
1853 case Intrinsic::vscale: {
1854 if (!II->getParent() || !II->getFunction())
1855 break;
1856
1857 Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits();
1858 break;
1859 }
1860 }
1861 }
1862 break;
1863 }
1864 case Instruction::ShuffleVector: {
1865 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1866 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1867 if (!Shuf) {
1868 Known.resetAll();
1869 return;
1870 }
1871 // For undef elements, we don't know anything about the common state of
1872 // the shuffle result.
1873 APInt DemandedLHS, DemandedRHS;
1874 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1875 Known.resetAll();
1876 return;
1877 }
1878 Known.One.setAllBits();
1879 Known.Zero.setAllBits();
1880 if (!!DemandedLHS) {
1881 const Value *LHS = Shuf->getOperand(0);
1882 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1883 // If we don't know any bits, early out.
1884 if (Known.isUnknown())
1885 break;
1886 }
1887 if (!!DemandedRHS) {
1888 const Value *RHS = Shuf->getOperand(1);
1889 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1890 Known = Known.intersectWith(Known2);
1891 }
1892 break;
1893 }
1894 case Instruction::InsertElement: {
1895 if (isa<ScalableVectorType>(I->getType())) {
1896 Known.resetAll();
1897 return;
1898 }
1899 const Value *Vec = I->getOperand(0);
1900 const Value *Elt = I->getOperand(1);
1901 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1902 unsigned NumElts = DemandedElts.getBitWidth();
1903 APInt DemandedVecElts = DemandedElts;
1904 bool NeedsElt = true;
1905 // If we know the index we are inserting too, clear it from Vec check.
1906 if (CIdx && CIdx->getValue().ult(NumElts)) {
1907 DemandedVecElts.clearBit(CIdx->getZExtValue());
1908 NeedsElt = DemandedElts[CIdx->getZExtValue()];
1909 }
1910
1911 Known.One.setAllBits();
1912 Known.Zero.setAllBits();
1913 if (NeedsElt) {
1914 computeKnownBits(Elt, Known, Depth + 1, Q);
1915 // If we don't know any bits, early out.
1916 if (Known.isUnknown())
1917 break;
1918 }
1919
1920 if (!DemandedVecElts.isZero()) {
1921 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1922 Known = Known.intersectWith(Known2);
1923 }
1924 break;
1925 }
1926 case Instruction::ExtractElement: {
1927 // Look through extract element. If the index is non-constant or
1928 // out-of-range demand all elements, otherwise just the extracted element.
1929 const Value *Vec = I->getOperand(0);
1930 const Value *Idx = I->getOperand(1);
1931 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1932 if (isa<ScalableVectorType>(Vec->getType())) {
1933 // FIXME: there's probably *something* we can do with scalable vectors
1934 Known.resetAll();
1935 break;
1936 }
1937 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1938 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1939 if (CIdx && CIdx->getValue().ult(NumElts))
1940 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1941 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1942 break;
1943 }
1944 case Instruction::ExtractValue:
1945 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1946 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1947 if (EVI->getNumIndices() != 1) break;
1948 if (EVI->getIndices()[0] == 0) {
1949 switch (II->getIntrinsicID()) {
1950 default: break;
1951 case Intrinsic::uadd_with_overflow:
1952 case Intrinsic::sadd_with_overflow:
1954 true, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false,
1955 /* NUW=*/false, DemandedElts, Known, Known2, Depth, Q);
1956 break;
1957 case Intrinsic::usub_with_overflow:
1958 case Intrinsic::ssub_with_overflow:
1960 false, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false,
1961 /* NUW=*/false, DemandedElts, Known, Known2, Depth, Q);
1962 break;
1963 case Intrinsic::umul_with_overflow:
1964 case Intrinsic::smul_with_overflow:
1965 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1966 DemandedElts, Known, Known2, Depth, Q);
1967 break;
1968 }
1969 }
1970 }
1971 break;
1972 case Instruction::Freeze:
1973 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1974 Depth + 1))
1975 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1976 break;
1977 }
1978}
1979
1980/// Determine which bits of V are known to be either zero or one and return
1981/// them.
1982KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
1983 unsigned Depth, const SimplifyQuery &Q) {
1984 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1985 ::computeKnownBits(V, DemandedElts, Known, Depth, Q);
1986 return Known;
1987}
1988
1989/// Determine which bits of V are known to be either zero or one and return
1990/// them.
1992 const SimplifyQuery &Q) {
1993 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1994 computeKnownBits(V, Known, Depth, Q);
1995 return Known;
1996}
1997
1998/// Determine which bits of V are known to be either zero or one and return
1999/// them in the Known bit set.
2000///
2001/// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
2002/// we cannot optimize based on the assumption that it is zero without changing
2003/// it to be an explicit zero. If we don't change it to zero, other code could
2004/// optimized based on the contradictory assumption that it is non-zero.
2005/// Because instcombine aggressively folds operations with undef args anyway,
2006/// this won't lose us code quality.
2007///
2008/// This function is defined on values with integer type, values with pointer
2009/// type, and vectors of integers. In the case
2010/// where V is a vector, known zero, and known one values are the
2011/// same width as the vector element, and the bit is set only if it is true
2012/// for all of the demanded elements in the vector specified by DemandedElts.
2013void computeKnownBits(const Value *V, const APInt &DemandedElts,
2014 KnownBits &Known, unsigned Depth,
2015 const SimplifyQuery &Q) {
2016 if (!DemandedElts) {
2017 // No demanded elts, better to assume we don't know anything.
2018 Known.resetAll();
2019 return;
2020 }
2021
2022 assert(V && "No Value?");
2023 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2024
2025#ifndef NDEBUG
2026 Type *Ty = V->getType();
2027 unsigned BitWidth = Known.getBitWidth();
2028
2030 "Not integer or pointer type!");
2031
2032 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2033 assert(
2034 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2035 "DemandedElt width should equal the fixed vector number of elements");
2036 } else {
2037 assert(DemandedElts == APInt(1, 1) &&
2038 "DemandedElt width should be 1 for scalars or scalable vectors");
2039 }
2040
2041 Type *ScalarTy = Ty->getScalarType();
2042 if (ScalarTy->isPointerTy()) {
2043 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
2044 "V and Known should have same BitWidth");
2045 } else {
2046 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
2047 "V and Known should have same BitWidth");
2048 }
2049#endif
2050
2051 const APInt *C;
2052 if (match(V, m_APInt(C))) {
2053 // We know all of the bits for a scalar constant or a splat vector constant!
2054 Known = KnownBits::makeConstant(*C);
2055 return;
2056 }
2057 // Null and aggregate-zero are all-zeros.
2058 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
2059 Known.setAllZero();
2060 return;
2061 }
2062 // Handle a constant vector by taking the intersection of the known bits of
2063 // each element.
2064 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
2065 assert(!isa<ScalableVectorType>(V->getType()));
2066 // We know that CDV must be a vector of integers. Take the intersection of
2067 // each element.
2068 Known.Zero.setAllBits(); Known.One.setAllBits();
2069 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
2070 if (!DemandedElts[i])
2071 continue;
2072 APInt Elt = CDV->getElementAsAPInt(i);
2073 Known.Zero &= ~Elt;
2074 Known.One &= Elt;
2075 }
2076 if (Known.hasConflict())
2077 Known.resetAll();
2078 return;
2079 }
2080
2081 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
2082 assert(!isa<ScalableVectorType>(V->getType()));
2083 // We know that CV must be a vector of integers. Take the intersection of
2084 // each element.
2085 Known.Zero.setAllBits(); Known.One.setAllBits();
2086 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
2087 if (!DemandedElts[i])
2088 continue;
2089 Constant *Element = CV->getAggregateElement(i);
2090 if (isa<PoisonValue>(Element))
2091 continue;
2092 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
2093 if (!ElementCI) {
2094 Known.resetAll();
2095 return;
2096 }
2097 const APInt &Elt = ElementCI->getValue();
2098 Known.Zero &= ~Elt;
2099 Known.One &= Elt;
2100 }
2101 if (Known.hasConflict())
2102 Known.resetAll();
2103 return;
2104 }
2105
2106 // Start out not knowing anything.
2107 Known.resetAll();
2108
2109 // We can't imply anything about undefs.
2110 if (isa<UndefValue>(V))
2111 return;
2112
2113 // There's no point in looking through other users of ConstantData for
2114 // assumptions. Confirm that we've handled them all.
2115 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
2116
2117 if (const auto *A = dyn_cast<Argument>(V))
2118 if (std::optional<ConstantRange> Range = A->getRange())
2119 Known = Range->toKnownBits();
2120
2121 // All recursive calls that increase depth must come after this.
2123 return;
2124
2125 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2126 // the bits of its aliasee.
2127 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2128 if (!GA->isInterposable())
2129 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2130 return;
2131 }
2132
2133 if (const Operator *I = dyn_cast<Operator>(V))
2134 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2135 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2136 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
2137 Known = CR->toKnownBits();
2138 }
2139
2140 // Aligned pointers have trailing zeros - refine Known.Zero set
2141 if (isa<PointerType>(V->getType())) {
2142 Align Alignment = V->getPointerAlignment(Q.DL);
2143 Known.Zero.setLowBits(Log2(Alignment));
2144 }
2145
2146 // computeKnownBitsFromContext strictly refines Known.
2147 // Therefore, we run them after computeKnownBitsFromOperator.
2148
2149 // Check whether we can determine known bits from context such as assumes.
2150 computeKnownBitsFromContext(V, Known, Depth, Q);
2151}
2152
2153/// Try to detect a recurrence that the value of the induction variable is
2154/// always a power of two (or zero).
2155static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
2156 unsigned Depth, SimplifyQuery &Q) {
2157 BinaryOperator *BO = nullptr;
2158 Value *Start = nullptr, *Step = nullptr;
2159 if (!matchSimpleRecurrence(PN, BO, Start, Step))
2160 return false;
2161
2162 // Initial value must be a power of two.
2163 for (const Use &U : PN->operands()) {
2164 if (U.get() == Start) {
2165 // Initial value comes from a different BB, need to adjust context
2166 // instruction for analysis.
2167 Q.CxtI = PN->getIncomingBlock(U)->getTerminator();
2168 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q))
2169 return false;
2170 }
2171 }
2172
2173 // Except for Mul, the induction variable must be on the left side of the
2174 // increment expression, otherwise its value can be arbitrary.
2175 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step)
2176 return false;
2177
2178 Q.CxtI = BO->getParent()->getTerminator();
2179 switch (BO->getOpcode()) {
2180 case Instruction::Mul:
2181 // Power of two is closed under multiplication.
2182 return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) ||
2183 Q.IIQ.hasNoSignedWrap(BO)) &&
2184 isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q);
2185 case Instruction::SDiv:
2186 // Start value must not be signmask for signed division, so simply being a
2187 // power of two is not sufficient, and it has to be a constant.
2188 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2189 return false;
2190 [[fallthrough]];
2191 case Instruction::UDiv:
2192 // Divisor must be a power of two.
2193 // If OrZero is false, cannot guarantee induction variable is non-zero after
2194 // division, same for Shr, unless it is exact division.
2195 return (OrZero || Q.IIQ.isExact(BO)) &&
2196 isKnownToBeAPowerOfTwo(Step, false, Depth, Q);
2197 case Instruction::Shl:
2198 return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO);
2199 case Instruction::AShr:
2200 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2201 return false;
2202 [[fallthrough]];
2203 case Instruction::LShr:
2204 return OrZero || Q.IIQ.isExact(BO);
2205 default:
2206 return false;
2207 }
2208}
2209
2210/// Return true if the given value is known to have exactly one
2211/// bit set when defined. For vectors return true if every element is known to
2212/// be a power of two when defined. Supports values with integer or pointer
2213/// types and vectors of integers.
2214bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2215 const SimplifyQuery &Q) {
2216 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2217
2218 if (isa<Constant>(V))
2219 return OrZero ? match(V, m_Power2OrZero()) : match(V, m_Power2());
2220
2221 // i1 is by definition a power of 2 or zero.
2222 if (OrZero && V->getType()->getScalarSizeInBits() == 1)
2223 return true;
2224
2225 auto *I = dyn_cast<Instruction>(V);
2226 if (!I)
2227 return false;
2228
2229 if (Q.CxtI && match(V, m_VScale())) {
2230 const Function *F = Q.CxtI->getFunction();
2231 // The vscale_range indicates vscale is a power-of-two.
2232 return F->hasFnAttribute(Attribute::VScaleRange);
2233 }
2234
2235 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2236 // it is shifted off the end then the result is undefined.
2237 if (match(I, m_Shl(m_One(), m_Value())))
2238 return true;
2239
2240 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2241 // the bottom. If it is shifted off the bottom then the result is undefined.
2242 if (match(I, m_LShr(m_SignMask(), m_Value())))
2243 return true;
2244
2245 // The remaining tests are all recursive, so bail out if we hit the limit.
2247 return false;
2248
2249 switch (I->getOpcode()) {
2250 case Instruction::ZExt:
2251 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2252 case Instruction::Trunc:
2253 return OrZero && isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2254 case Instruction::Shl:
2255 if (OrZero || Q.IIQ.hasNoUnsignedWrap(I) || Q.IIQ.hasNoSignedWrap(I))
2256 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2257 return false;
2258 case Instruction::LShr:
2259 if (OrZero || Q.IIQ.isExact(cast<BinaryOperator>(I)))
2260 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2261 return false;
2262 case Instruction::UDiv:
2263 if (Q.IIQ.isExact(cast<BinaryOperator>(I)))
2264 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q);
2265 return false;
2266 case Instruction::Mul:
2267 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) &&
2268 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q) &&
2269 (OrZero || isKnownNonZero(I, Q, Depth));
2270 case Instruction::And:
2271 // A power of two and'd with anything is a power of two or zero.
2272 if (OrZero &&
2273 (isKnownToBeAPowerOfTwo(I->getOperand(1), /*OrZero*/ true, Depth, Q) ||
2274 isKnownToBeAPowerOfTwo(I->getOperand(0), /*OrZero*/ true, Depth, Q)))
2275 return true;
2276 // X & (-X) is always a power of two or zero.
2277 if (match(I->getOperand(0), m_Neg(m_Specific(I->getOperand(1)))) ||
2278 match(I->getOperand(1), m_Neg(m_Specific(I->getOperand(0)))))
2279 return OrZero || isKnownNonZero(I->getOperand(0), Q, Depth);
2280 return false;
2281 case Instruction::Add: {
2282 // Adding a power-of-two or zero to the same power-of-two or zero yields
2283 // either the original power-of-two, a larger power-of-two or zero.
2284 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2285 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2286 Q.IIQ.hasNoSignedWrap(VOBO)) {
2287 if (match(I->getOperand(0),
2288 m_c_And(m_Specific(I->getOperand(1)), m_Value())) &&
2289 isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q))
2290 return true;
2291 if (match(I->getOperand(1),
2292 m_c_And(m_Specific(I->getOperand(0)), m_Value())) &&
2293 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q))
2294 return true;
2295
2296 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2297 KnownBits LHSBits(BitWidth);
2298 computeKnownBits(I->getOperand(0), LHSBits, Depth, Q);
2299
2300 KnownBits RHSBits(BitWidth);
2301 computeKnownBits(I->getOperand(1), RHSBits, Depth, Q);
2302 // If i8 V is a power of two or zero:
2303 // ZeroBits: 1 1 1 0 1 1 1 1
2304 // ~ZeroBits: 0 0 0 1 0 0 0 0
2305 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2306 // If OrZero isn't set, we cannot give back a zero result.
2307 // Make sure either the LHS or RHS has a bit set.
2308 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2309 return true;
2310 }
2311
2312 // LShr(UINT_MAX, Y) + 1 is a power of two (if add is nuw) or zero.
2313 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO))
2314 if (match(I, m_Add(m_LShr(m_AllOnes(), m_Value()), m_One())))
2315 return true;
2316 return false;
2317 }
2318 case Instruction::Select:
2319 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) &&
2320 isKnownToBeAPowerOfTwo(I->getOperand(2), OrZero, Depth, Q);
2321 case Instruction::PHI: {
2322 // A PHI node is power of two if all incoming values are power of two, or if
2323 // it is an induction variable where in each step its value is a power of
2324 // two.
2325 auto *PN = cast<PHINode>(I);
2327
2328 // Check if it is an induction variable and always power of two.
2329 if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ))
2330 return true;
2331
2332 // Recursively check all incoming values. Limit recursion to 2 levels, so
2333 // that search complexity is limited to number of operands^2.
2334 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2335 return llvm::all_of(PN->operands(), [&](const Use &U) {
2336 // Value is power of 2 if it is coming from PHI node itself by induction.
2337 if (U.get() == PN)
2338 return true;
2339
2340 // Change the context instruction to the incoming block where it is
2341 // evaluated.
2342 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2343 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
2344 });
2345 }
2346 case Instruction::Invoke:
2347 case Instruction::Call: {
2348 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
2349 switch (II->getIntrinsicID()) {
2350 case Intrinsic::umax:
2351 case Intrinsic::smax:
2352 case Intrinsic::umin:
2353 case Intrinsic::smin:
2354 return isKnownToBeAPowerOfTwo(II->getArgOperand(1), OrZero, Depth, Q) &&
2355 isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q);
2356 // bswap/bitreverse just move around bits, but don't change any 1s/0s
2357 // thus dont change pow2/non-pow2 status.
2358 case Intrinsic::bitreverse:
2359 case Intrinsic::bswap:
2360 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q);
2361 case Intrinsic::fshr:
2362 case Intrinsic::fshl:
2363 // If Op0 == Op1, this is a rotate. is_pow2(rotate(x, y)) == is_pow2(x)
2364 if (II->getArgOperand(0) == II->getArgOperand(1))
2365 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q);
2366 break;
2367 default:
2368 break;
2369 }
2370 }
2371 return false;
2372 }
2373 default:
2374 return false;
2375 }
2376}
2377
2378/// Test whether a GEP's result is known to be non-null.
2379///
2380/// Uses properties inherent in a GEP to try to determine whether it is known
2381/// to be non-null.
2382///
2383/// Currently this routine does not support vector GEPs.
2384static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2385 const SimplifyQuery &Q) {
2386 const Function *F = nullptr;
2387 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2388 F = I->getFunction();
2389
2390 // If the gep is nuw or inbounds with invalid null pointer, then the GEP
2391 // may be null iff the base pointer is null and the offset is zero.
2392 if (!GEP->hasNoUnsignedWrap() &&
2393 !(GEP->isInBounds() &&
2394 !NullPointerIsDefined(F, GEP->getPointerAddressSpace())))
2395 return false;
2396
2397 // FIXME: Support vector-GEPs.
2398 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2399
2400 // If the base pointer is non-null, we cannot walk to a null address with an
2401 // inbounds GEP in address space zero.
2402 if (isKnownNonZero(GEP->getPointerOperand(), Q, Depth))
2403 return true;
2404
2405 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2406 // If so, then the GEP cannot produce a null pointer, as doing so would
2407 // inherently violate the inbounds contract within address space zero.
2409 GTI != GTE; ++GTI) {
2410 // Struct types are easy -- they must always be indexed by a constant.
2411 if (StructType *STy = GTI.getStructTypeOrNull()) {
2412 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2413 unsigned ElementIdx = OpC->getZExtValue();
2414 const StructLayout *SL = Q.DL.getStructLayout(STy);
2415 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2416 if (ElementOffset > 0)
2417 return true;
2418 continue;
2419 }
2420
2421 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2422 if (GTI.getSequentialElementStride(Q.DL).isZero())
2423 continue;
2424
2425 // Fast path the constant operand case both for efficiency and so we don't
2426 // increment Depth when just zipping down an all-constant GEP.
2427 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2428 if (!OpC->isZero())
2429 return true;
2430 continue;
2431 }
2432
2433 // We post-increment Depth here because while isKnownNonZero increments it
2434 // as well, when we pop back up that increment won't persist. We don't want
2435 // to recurse 10k times just because we have 10k GEP operands. We don't
2436 // bail completely out because we want to handle constant GEPs regardless
2437 // of depth.
2439 continue;
2440
2441 if (isKnownNonZero(GTI.getOperand(), Q, Depth))
2442 return true;
2443 }
2444
2445 return false;
2446}
2447
2449 const Instruction *CtxI,
2450 const DominatorTree *DT) {
2451 assert(!isa<Constant>(V) && "Called for constant?");
2452
2453 if (!CtxI || !DT)
2454 return false;
2455
2456 unsigned NumUsesExplored = 0;
2457 for (const auto *U : V->users()) {
2458 // Avoid massive lists
2459 if (NumUsesExplored >= DomConditionsMaxUses)
2460 break;
2461 NumUsesExplored++;
2462
2463 // If the value is used as an argument to a call or invoke, then argument
2464 // attributes may provide an answer about null-ness.
2465 if (const auto *CB = dyn_cast<CallBase>(U))
2466 if (auto *CalledFunc = CB->getCalledFunction())
2467 for (const Argument &Arg : CalledFunc->args())
2468 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2469 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2470 DT->dominates(CB, CtxI))
2471 return true;
2472
2473 // If the value is used as a load/store, then the pointer must be non null.
2474 if (V == getLoadStorePointerOperand(U)) {
2475 const Instruction *I = cast<Instruction>(U);
2476 if (!NullPointerIsDefined(I->getFunction(),
2477 V->getType()->getPointerAddressSpace()) &&
2478 DT->dominates(I, CtxI))
2479 return true;
2480 }
2481
2482 if ((match(U, m_IDiv(m_Value(), m_Specific(V))) ||
2483 match(U, m_IRem(m_Value(), m_Specific(V)))) &&
2484 isValidAssumeForContext(cast<Instruction>(U), CtxI, DT))
2485 return true;
2486
2487 // Consider only compare instructions uniquely controlling a branch
2488 Value *RHS;
2489 CmpInst::Predicate Pred;
2490 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2491 continue;
2492
2493 bool NonNullIfTrue;
2494 if (cmpExcludesZero(Pred, RHS))
2495 NonNullIfTrue = true;
2497 NonNullIfTrue = false;
2498 else
2499 continue;
2500
2503 for (const auto *CmpU : U->users()) {
2504 assert(WorkList.empty() && "Should be!");
2505 if (Visited.insert(CmpU).second)
2506 WorkList.push_back(CmpU);
2507
2508 while (!WorkList.empty()) {
2509 auto *Curr = WorkList.pop_back_val();
2510
2511 // If a user is an AND, add all its users to the work list. We only
2512 // propagate "pred != null" condition through AND because it is only
2513 // correct to assume that all conditions of AND are met in true branch.
2514 // TODO: Support similar logic of OR and EQ predicate?
2515 if (NonNullIfTrue)
2516 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2517 for (const auto *CurrU : Curr->users())
2518 if (Visited.insert(CurrU).second)
2519 WorkList.push_back(CurrU);
2520 continue;
2521 }
2522
2523 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2524 assert(BI->isConditional() && "uses a comparison!");
2525
2526 BasicBlock *NonNullSuccessor =
2527 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2528 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2529 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2530 return true;
2531 } else if (NonNullIfTrue && isGuard(Curr) &&
2532 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2533 return true;
2534 }
2535 }
2536 }
2537 }
2538
2539 return false;
2540}
2541
2542/// Does the 'Range' metadata (which must be a valid MD_range operand list)
2543/// ensure that the value it's attached to is never Value? 'RangeType' is
2544/// is the type of the value described by the range.
2545static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2546 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2547 assert(NumRanges >= 1);
2548 for (unsigned i = 0; i < NumRanges; ++i) {
2550 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2552 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2553 ConstantRange Range(Lower->getValue(), Upper->getValue());
2554 if (Range.contains(Value))
2555 return false;
2556 }
2557 return true;
2558}
2559
2560/// Try to detect a recurrence that monotonically increases/decreases from a
2561/// non-zero starting value. These are common as induction variables.
2562static bool isNonZeroRecurrence(const PHINode *PN) {
2563 BinaryOperator *BO = nullptr;
2564 Value *Start = nullptr, *Step = nullptr;
2565 const APInt *StartC, *StepC;
2566 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2567 !match(Start, m_APInt(StartC)) || StartC->isZero())
2568 return false;
2569
2570 switch (BO->getOpcode()) {
2571 case Instruction::Add:
2572 // Starting from non-zero and stepping away from zero can never wrap back
2573 // to zero.
2574 return BO->hasNoUnsignedWrap() ||
2575 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2576 StartC->isNegative() == StepC->isNegative());
2577 case Instruction::Mul:
2578 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2579 match(Step, m_APInt(StepC)) && !StepC->isZero();
2580 case Instruction::Shl:
2581 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2582 case Instruction::AShr:
2583 case Instruction::LShr:
2584 return BO->isExact();
2585 default:
2586 return false;
2587 }
2588}
2589
2590static bool matchOpWithOpEqZero(Value *Op0, Value *Op1) {
2591 return match(Op0, m_ZExtOrSExt(m_SpecificICmp(ICmpInst::ICMP_EQ,
2592 m_Specific(Op1), m_Zero()))) ||
2593 match(Op1, m_ZExtOrSExt(m_SpecificICmp(ICmpInst::ICMP_EQ,
2594 m_Specific(Op0), m_Zero())));
2595}
2596
2597static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
2598 const SimplifyQuery &Q, unsigned BitWidth, Value *X,
2599 Value *Y, bool NSW, bool NUW) {
2600 // (X + (X != 0)) is non zero
2601 if (matchOpWithOpEqZero(X, Y))
2602 return true;
2603
2604 if (NUW)
2605 return isKnownNonZero(Y, DemandedElts, Q, Depth) ||
2606 isKnownNonZero(X, DemandedElts, Q, Depth);
2607
2608 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2609 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2610
2611 // If X and Y are both non-negative (as signed values) then their sum is not
2612 // zero unless both X and Y are zero.
2613 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2614 if (isKnownNonZero(Y, DemandedElts, Q, Depth) ||
2615 isKnownNonZero(X, DemandedElts, Q, Depth))
2616 return true;
2617
2618 // If X and Y are both negative (as signed values) then their sum is not
2619 // zero unless both X and Y equal INT_MIN.
2620 if (XKnown.isNegative() && YKnown.isNegative()) {
2622 // The sign bit of X is set. If some other bit is set then X is not equal
2623 // to INT_MIN.
2624 if (XKnown.One.intersects(Mask))
2625 return true;
2626 // The sign bit of Y is set. If some other bit is set then Y is not equal
2627 // to INT_MIN.
2628 if (YKnown.One.intersects(Mask))
2629 return true;
2630 }
2631
2632 // The sum of a non-negative number and a power of two is not zero.
2633 if (XKnown.isNonNegative() &&
2634 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2635 return true;
2636 if (YKnown.isNonNegative() &&
2637 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2638 return true;
2639
2640 return KnownBits::add(XKnown, YKnown, NSW, NUW).isNonZero();
2641}
2642
2643static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
2644 const SimplifyQuery &Q, unsigned BitWidth, Value *X,
2645 Value *Y) {
2646 // (X - (X != 0)) is non zero
2647 // ((X != 0) - X) is non zero
2648 if (matchOpWithOpEqZero(X, Y))
2649 return true;
2650
2651 // TODO: Move this case into isKnownNonEqual().
2652 if (auto *C = dyn_cast<Constant>(X))
2653 if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Q, Depth))
2654 return true;
2655
2656 return ::isKnownNonEqual(X, Y, DemandedElts, Depth, Q);
2657}
2658
2659static bool isNonZeroMul(const APInt &DemandedElts, unsigned Depth,
2660 const SimplifyQuery &Q, unsigned BitWidth, Value *X,
2661 Value *Y, bool NSW, bool NUW) {
2662 // If X and Y are non-zero then so is X * Y as long as the multiplication
2663 // does not overflow.
2664 if (NSW || NUW)
2665 return isKnownNonZero(X, DemandedElts, Q, Depth) &&
2666 isKnownNonZero(Y, DemandedElts, Q, Depth);
2667
2668 // If either X or Y is odd, then if the other is non-zero the result can't
2669 // be zero.
2670 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2671 if (XKnown.One[0])
2672 return isKnownNonZero(Y, DemandedElts, Q, Depth);
2673
2674 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2675 if (YKnown.One[0])
2676 return XKnown.isNonZero() || isKnownNonZero(X, DemandedElts, Q, Depth);
2677
2678 // If there exists any subset of X (sX) and subset of Y (sY) s.t sX * sY is
2679 // non-zero, then X * Y is non-zero. We can find sX and sY by just taking
2680 // the lowest known One of X and Y. If they are non-zero, the result
2681 // must be non-zero. We can check if LSB(X) * LSB(Y) != 0 by doing
2682 // X.CountLeadingZeros + Y.CountLeadingZeros < BitWidth.
2683 return (XKnown.countMaxTrailingZeros() + YKnown.countMaxTrailingZeros()) <
2684 BitWidth;
2685}
2686
2687static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts,
2688 unsigned Depth, const SimplifyQuery &Q,
2689 const KnownBits &KnownVal) {
2690 auto ShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
2691 switch (I->getOpcode()) {
2692 case Instruction::Shl:
2693 return Lhs.shl(Rhs);
2694 case Instruction::LShr:
2695 return Lhs.lshr(Rhs);
2696 case Instruction::AShr:
2697 return Lhs.ashr(Rhs);
2698 default:
2699 llvm_unreachable("Unknown Shift Opcode");
2700 }
2701 };
2702
2703 auto InvShiftOp = [&](const APInt &Lhs, const APInt &Rhs) {
2704 switch (I->getOpcode()) {
2705 case Instruction::Shl:
2706 return Lhs.lshr(Rhs);
2707 case Instruction::LShr:
2708 case Instruction::AShr:
2709 return Lhs.shl(Rhs);
2710 default:
2711 llvm_unreachable("Unknown Shift Opcode");
2712 }
2713 };
2714
2715 if (KnownVal.isUnknown())
2716 return false;
2717
2718 KnownBits KnownCnt =
2719 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2720 APInt MaxShift = KnownCnt.getMaxValue();
2721 unsigned NumBits = KnownVal.getBitWidth();
2722 if (MaxShift.uge(NumBits))
2723 return false;
2724
2725 if (!ShiftOp(KnownVal.One, MaxShift).isZero())
2726 return true;
2727
2728 // If all of the bits shifted out are known to be zero, and Val is known
2729 // non-zero then at least one non-zero bit must remain.
2730 if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift)
2731 .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) &&
2732 isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth))
2733 return true;
2734
2735 return false;
2736}
2737
2739 const APInt &DemandedElts,
2740 unsigned Depth, const SimplifyQuery &Q) {
2741 unsigned BitWidth = getBitWidth(I->getType()->getScalarType(), Q.DL);
2742 switch (I->getOpcode()) {
2743 case Instruction::Alloca:
2744 // Alloca never returns null, malloc might.
2745 return I->getType()->getPointerAddressSpace() == 0;
2746 case Instruction::GetElementPtr:
2747 if (I->getType()->isPointerTy())
2748 return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q);
2749 break;
2750 case Instruction::BitCast: {
2751 // We need to be a bit careful here. We can only peek through the bitcast
2752 // if the scalar size of elements in the operand are smaller than and a
2753 // multiple of the size they are casting too. Take three cases:
2754 //
2755 // 1) Unsafe:
2756 // bitcast <2 x i16> %NonZero to <4 x i8>
2757 //
2758 // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a
2759 // <4 x i8> requires that all 4 i8 elements be non-zero which isn't
2760 // guranteed (imagine just sign bit set in the 2 i16 elements).
2761 //
2762 // 2) Unsafe:
2763 // bitcast <4 x i3> %NonZero to <3 x i4>
2764 //
2765 // Even though the scalar size of the src (`i3`) is smaller than the
2766 // scalar size of the dst `i4`, because `i3` is not a multiple of `i4`
2767 // its possible for the `3 x i4` elements to be zero because there are
2768 // some elements in the destination that don't contain any full src
2769 // element.
2770 //
2771 // 3) Safe:
2772 // bitcast <4 x i8> %NonZero to <2 x i16>
2773 //
2774 // This is always safe as non-zero in the 4 i8 elements implies
2775 // non-zero in the combination of any two adjacent ones. Since i8 is a
2776 // multiple of i16, each i16 is guranteed to have 2 full i8 elements.
2777 // This all implies the 2 i16 elements are non-zero.
2778 Type *FromTy = I->getOperand(0)->getType();
2779 if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) &&
2780 (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0)
2781 return isKnownNonZero(I->getOperand(0), Q, Depth);
2782 } break;
2783 case Instruction::IntToPtr:
2784 // Note that we have to take special care to avoid looking through
2785 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2786 // as casts that can alter the value, e.g., AddrSpaceCasts.
2787 if (!isa<ScalableVectorType>(I->getType()) &&
2788 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2789 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2790 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
2791 break;
2792 case Instruction::PtrToInt:
2793 // Similar to int2ptr above, we can look through ptr2int here if the cast
2794 // is a no-op or an extend and not a truncate.
2795 if (!isa<ScalableVectorType>(I->getType()) &&
2796 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2797 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2798 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
2799 break;
2800 case Instruction::Trunc:
2801 // nuw/nsw trunc preserves zero/non-zero status of input.
2802 if (auto *TI = dyn_cast<TruncInst>(I))
2803 if (TI->hasNoSignedWrap() || TI->hasNoUnsignedWrap())
2804 return isKnownNonZero(TI->getOperand(0), DemandedElts, Q, Depth);
2805 break;
2806
2807 case Instruction::Sub:
2808 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
2809 I->getOperand(1));
2810 case Instruction::Xor:
2811 // (X ^ (X != 0)) is non zero
2812 if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1)))
2813 return true;
2814 break;
2815 case Instruction::Or:
2816 // (X | (X != 0)) is non zero
2817 if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1)))
2818 return true;
2819 // X | Y != 0 if X != 0 or Y != 0.
2820 return isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth) ||
2821 isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
2822 case Instruction::SExt:
2823 case Instruction::ZExt:
2824 // ext X != 0 if X != 0.
2825 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
2826
2827 case Instruction::Shl: {
2828 // shl nsw/nuw can't remove any non-zero bits.
2829 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(I);
2830 if (Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO))
2831 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
2832
2833 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2834 // if the lowest bit is shifted off the end.
2835 KnownBits Known(BitWidth);
2836 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q);
2837 if (Known.One[0])
2838 return true;
2839
2840 return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
2841 }
2842 case Instruction::LShr:
2843 case Instruction::AShr: {
2844 // shr exact can only shift out zero bits.
2845 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(I);
2846 if (BO->isExact())
2847 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
2848
2849 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2850 // defined if the sign bit is shifted off the end.
2851 KnownBits Known =
2852 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2853 if (Known.isNegative())
2854 return true;
2855
2856 return isNonZeroShift(I, DemandedElts, Depth, Q, Known);
2857 }
2858 case Instruction::UDiv:
2859 case Instruction::SDiv: {
2860 // X / Y
2861 // div exact can only produce a zero if the dividend is zero.
2862 if (cast<PossiblyExactOperator>(I)->isExact())
2863 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
2864
2865 KnownBits XKnown =
2866 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2867 // If X is fully unknown we won't be able to figure anything out so don't
2868 // both computing knownbits for Y.
2869 if (XKnown.isUnknown())
2870 return false;
2871
2872 KnownBits YKnown =
2873 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2874 if (I->getOpcode() == Instruction::SDiv) {
2875 // For signed division need to compare abs value of the operands.
2876 XKnown = XKnown.abs(/*IntMinIsPoison*/ false);
2877 YKnown = YKnown.abs(/*IntMinIsPoison*/ false);
2878 }
2879 // If X u>= Y then div is non zero (0/0 is UB).
2880 std::optional<bool> XUgeY = KnownBits::uge(XKnown, YKnown);
2881 // If X is total unknown or X u< Y we won't be able to prove non-zero
2882 // with compute known bits so just return early.
2883 return XUgeY && *XUgeY;
2884 }
2885 case Instruction::Add: {
2886 // X + Y.
2887
2888 // If Add has nuw wrap flag, then if either X or Y is non-zero the result is
2889 // non-zero.
2890 auto *BO = cast<OverflowingBinaryOperator>(I);
2891 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
2892 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO),
2893 Q.IIQ.hasNoUnsignedWrap(BO));
2894 }
2895 case Instruction::Mul: {
2896 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(I);
2897 return isNonZeroMul(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
2898 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO),
2899 Q.IIQ.hasNoUnsignedWrap(BO));
2900 }
2901 case Instruction::Select: {
2902 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2903
2904 // First check if the arm is non-zero using `isKnownNonZero`. If that fails,
2905 // then see if the select condition implies the arm is non-zero. For example
2906 // (X != 0 ? X : Y), we know the true arm is non-zero as the `X` "return" is
2907 // dominated by `X != 0`.
2908 auto SelectArmIsNonZero = [&](bool IsTrueArm) {
2909 Value *Op;
2910 Op = IsTrueArm ? I->getOperand(1) : I->getOperand(2);
2911 // Op is trivially non-zero.
2912 if (isKnownNonZero(Op, DemandedElts, Q, Depth))
2913 return true;
2914
2915 // The condition of the select dominates the true/false arm. Check if the
2916 // condition implies that a given arm is non-zero.
2917 Value *X;
2918 CmpInst::Predicate Pred;
2919 if (!match(I->getOperand(0), m_c_ICmp(Pred, m_Specific(Op), m_Value(X))))
2920 return false;
2921
2922 if (!IsTrueArm)
2923 Pred = ICmpInst::getInversePredicate(Pred);
2924
2925 return cmpExcludesZero(Pred, X);
2926 };
2927
2928 if (SelectArmIsNonZero(/* IsTrueArm */ true) &&
2929 SelectArmIsNonZero(/* IsTrueArm */ false))
2930 return true;
2931 break;
2932 }
2933 case Instruction::PHI: {
2934 auto *PN = cast<PHINode>(I);
2936 return true;
2937
2938 // Check if all incoming values are non-zero using recursion.
2940 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2941 return llvm::all_of(PN->operands(), [&](const Use &U) {
2942 if (U.get() == PN)
2943 return true;
2944 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2945 // Check if the branch on the phi excludes zero.
2946 ICmpInst::Predicate Pred;
2947 Value *X;
2948 BasicBlock *TrueSucc, *FalseSucc;
2949 if (match(RecQ.CxtI,
2950 m_Br(m_c_ICmp(Pred, m_Specific(U.get()), m_Value(X)),
2951 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
2952 // Check for cases of duplicate successors.
2953 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) {
2954 // If we're using the false successor, invert the predicate.
2955 if (FalseSucc == PN->getParent())
2956 Pred = CmpInst::getInversePredicate(Pred);
2957 if (cmpExcludesZero(Pred, X))
2958 return true;
2959 }
2960 }
2961 // Finally recurse on the edge and check it directly.
2962 return isKnownNonZero(U.get(), DemandedElts, RecQ, NewDepth);
2963 });
2964 }
2965 case Instruction::InsertElement: {
2966 if (isa<ScalableVectorType>(I->getType()))
2967 break;
2968
2969 const Value *Vec = I->getOperand(0);
2970 const Value *Elt = I->getOperand(1);
2971 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
2972
2973 unsigned NumElts = DemandedElts.getBitWidth();
2974 APInt DemandedVecElts = DemandedElts;
2975 bool SkipElt = false;
2976 // If we know the index we are inserting too, clear it from Vec check.
2977 if (CIdx && CIdx->getValue().ult(NumElts)) {
2978 DemandedVecElts.clearBit(CIdx->getZExtValue());
2979 SkipElt = !DemandedElts[CIdx->getZExtValue()];
2980 }
2981
2982 // Result is zero if Elt is non-zero and rest of the demanded elts in Vec
2983 // are non-zero.
2984 return (SkipElt || isKnownNonZero(Elt, Q, Depth)) &&
2985 (DemandedVecElts.isZero() ||
2986 isKnownNonZero(Vec, DemandedVecElts, Q, Depth));
2987 }
2988 case Instruction::ExtractElement:
2989 if (const auto *EEI = dyn_cast<ExtractElementInst>(I)) {
2990 const Value *Vec = EEI->getVectorOperand();
2991 const Value *Idx = EEI->getIndexOperand();
2992 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2993 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2994 unsigned NumElts = VecTy->getNumElements();
2995 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2996 if (CIdx && CIdx->getValue().ult(NumElts))
2997 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2998 return isKnownNonZero(Vec, DemandedVecElts, Q, Depth);
2999 }
3000 }
3001 break;
3002 case Instruction::ShuffleVector: {
3003 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
3004 if (!Shuf)
3005 break;
3006 APInt DemandedLHS, DemandedRHS;
3007 // For undef elements, we don't know anything about the common state of
3008 // the shuffle result.
3009 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3010 break;
3011 // If demanded elements for both vecs are non-zero, the shuffle is non-zero.
3012 return (DemandedRHS.isZero() ||
3013 isKnownNonZero(Shuf->getOperand(1), DemandedRHS, Q, Depth)) &&
3014 (DemandedLHS.isZero() ||
3015 isKnownNonZero(Shuf->getOperand(0), DemandedLHS, Q, Depth));
3016 }
3017 case Instruction::Freeze:
3018 return isKnownNonZero(I->getOperand(0), Q, Depth) &&
3019 isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
3020 Depth);
3021 case Instruction::Load: {
3022 auto *LI = cast<LoadInst>(I);
3023 // A Load tagged with nonnull or dereferenceable with null pointer undefined
3024 // is never null.
3025 if (auto *PtrT = dyn_cast<PointerType>(I->getType())) {
3026 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull) ||
3027 (Q.IIQ.getMetadata(LI, LLVMContext::MD_dereferenceable) &&
3028 !NullPointerIsDefined(LI->getFunction(), PtrT->getAddressSpace())))
3029 return true;
3030 } else if (MDNode *Ranges = Q.IIQ.getMetadata(LI, LLVMContext::MD_range)) {
3032 }
3033
3034 // No need to fall through to computeKnownBits as range metadata is already
3035 // handled in isKnownNonZero.
3036 return false;
3037 }
3038 case Instruction::ExtractValue: {
3039 const WithOverflowInst *WO;
3040 if (match(I, m_ExtractValue<0>(m_WithOverflowInst(WO)))) {
3041 switch (WO->getBinaryOp()) {
3042 default:
3043 break;
3044 case Instruction::Add:
3045 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth,
3046 WO->getArgOperand(0), WO->getArgOperand(1),
3047 /*NSW=*/false,
3048 /*NUW=*/false);
3049 case Instruction::Sub:
3050 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth,
3051 WO->getArgOperand(0), WO->getArgOperand(1));
3052 case Instruction::Mul:
3053 return isNonZeroMul(DemandedElts, Depth, Q, BitWidth,
3054 WO->getArgOperand(0), WO->getArgOperand(1),
3055 /*NSW=*/false, /*NUW=*/false);
3056 break;
3057 }
3058 }
3059 break;
3060 }
3061 case Instruction::Call:
3062 case Instruction::Invoke: {
3063 const auto *Call = cast<CallBase>(I);
3064 if (I->getType()->isPointerTy()) {
3065 if (Call->isReturnNonNull())
3066 return true;
3067 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
3068 return isKnownNonZero(RP, Q, Depth);
3069 } else {
3070 if (MDNode *Ranges = Q.IIQ.getMetadata(Call, LLVMContext::MD_range))
3072 if (std::optional<ConstantRange> Range = Call->getRange()) {
3073 const APInt ZeroValue(Range->getBitWidth(), 0);
3074 if (!Range->contains(ZeroValue))
3075 return true;
3076 }
3077 if (const Value *RV = Call->getReturnedArgOperand())
3078 if (RV->getType() == I->getType() && isKnownNonZero(RV, Q, Depth))
3079 return true;
3080 }
3081
3082 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
3083 switch (II->getIntrinsicID()) {
3084 case Intrinsic::sshl_sat:
3085 case Intrinsic::ushl_sat:
3086 case Intrinsic::abs:
3087 case Intrinsic::bitreverse:
3088 case Intrinsic::bswap:
3089 case Intrinsic::ctpop:
3090 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth);
3091 // NB: We don't do usub_sat here as in any case we can prove its
3092 // non-zero, we will fold it to `sub nuw` in InstCombine.
3093 case Intrinsic::ssub_sat:
3094 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth,
3095 II->getArgOperand(0), II->getArgOperand(1));
3096 case Intrinsic::sadd_sat:
3097 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth,
3098 II->getArgOperand(0), II->getArgOperand(1),
3099 /*NSW=*/true, /* NUW=*/false);
3100 // Vec reverse preserves zero/non-zero status from input vec.
3101 case Intrinsic::vector_reverse:
3102 return isKnownNonZero(II->getArgOperand(0), DemandedElts.reverseBits(),
3103 Q, Depth);
3104 // umin/smin/smax/smin/or of all non-zero elements is always non-zero.
3105 case Intrinsic::vector_reduce_or:
3106 case Intrinsic::vector_reduce_umax:
3107 case Intrinsic::vector_reduce_umin:
3108 case Intrinsic::vector_reduce_smax:
3109 case Intrinsic::vector_reduce_smin:
3110 return isKnownNonZero(II->getArgOperand(0), Q, Depth);
3111 case Intrinsic::umax:
3112 case Intrinsic::uadd_sat:
3113 // umax(X, (X != 0)) is non zero
3114 // X +usat (X != 0) is non zero
3115 if (matchOpWithOpEqZero(II->getArgOperand(0), II->getArgOperand(1)))
3116 return true;
3117
3118 return isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth) ||
3119 isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth);
3120 case Intrinsic::smax: {
3121 // If either arg is strictly positive the result is non-zero. Otherwise
3122 // the result is non-zero if both ops are non-zero.
3123 auto IsNonZero = [&](Value *Op, std::optional<bool> &OpNonZero,
3124 const KnownBits &OpKnown) {
3125 if (!OpNonZero.has_value())
3126 OpNonZero = OpKnown.isNonZero() ||
3127 isKnownNonZero(Op, DemandedElts, Q, Depth);
3128 return *OpNonZero;
3129 };
3130 // Avoid re-computing isKnownNonZero.
3131 std::optional<bool> Op0NonZero, Op1NonZero;
3132 KnownBits Op1Known =
3133 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q);
3134 if (Op1Known.isNonNegative() &&
3135 IsNonZero(II->getArgOperand(1), Op1NonZero, Op1Known))
3136 return true;
3137 KnownBits Op0Known =
3138 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q);
3139 if (Op0Known.isNonNegative() &&
3140 IsNonZero(II->getArgOperand(0), Op0NonZero, Op0Known))
3141 return true;
3142 return IsNonZero(II->getArgOperand(1), Op1NonZero, Op1Known) &&
3143 IsNonZero(II->getArgOperand(0), Op0NonZero, Op0Known);
3144 }
3145 case Intrinsic::smin: {
3146 // If either arg is negative the result is non-zero. Otherwise
3147 // the result is non-zero if both ops are non-zero.
3148 KnownBits Op1Known =
3149 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q);
3150 if (Op1Known.isNegative())
3151 return true;
3152 KnownBits Op0Known =
3153 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q);
3154 if (Op0Known.isNegative())
3155 return true;
3156
3157 if (Op1Known.isNonZero() && Op0Known.isNonZero())
3158 return true;
3159 }
3160 [[fallthrough]];
3161 case Intrinsic::umin:
3162 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth) &&
3163 isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth);
3164 case Intrinsic::cttz:
3165 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
3166 .Zero[0];
3167 case Intrinsic::ctlz:
3168 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q)
3169 .isNonNegative();
3170 case Intrinsic::fshr:
3171 case Intrinsic::fshl:
3172 // If Op0 == Op1, this is a rotate. rotate(x, y) != 0 iff x != 0.
3173 if (II->getArgOperand(0) == II->getArgOperand(1))
3174 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth);
3175 break;
3176 case Intrinsic::vscale:
3177 return true;
3178 case Intrinsic::experimental_get_vector_length:
3179 return isKnownNonZero(I->getOperand(0), Q, Depth);
3180 default:
3181 break;
3182 }
3183 break;
3184 }
3185
3186 return false;
3187 }
3188 }
3189
3190 KnownBits Known(BitWidth);
3191 computeKnownBits(I, DemandedElts, Known, Depth, Q);
3192 return Known.One != 0;
3193}
3194
3195/// Return true if the given value is known to be non-zero when defined. For
3196/// vectors, return true if every demanded element is known to be non-zero when
3197/// defined. For pointers, if the context instruction and dominator tree are
3198/// specified, perform context-sensitive analysis and return true if the
3199/// pointer couldn't possibly be null at the specified instruction.
3200/// Supports values with integer or pointer type and vectors of integers.
3201bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
3202 const SimplifyQuery &Q, unsigned Depth) {
3203 Type *Ty = V->getType();
3204
3205#ifndef NDEBUG
3206 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3207
3208 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3209 assert(
3210 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
3211 "DemandedElt width should equal the fixed vector number of elements");
3212 } else {
3213 assert(DemandedElts == APInt(1, 1) &&
3214 "DemandedElt width should be 1 for scalars");
3215 }
3216#endif
3217
3218 if (auto *C = dyn_cast<Constant>(V)) {
3219 if (C->isNullValue())
3220 return false;
3221 if (isa<ConstantInt>(C))
3222 // Must be non-zero due to null test above.
3223 return true;
3224
3225 // For constant vectors, check that all elements are poison or known
3226 // non-zero to determine that the whole vector is known non-zero.
3227 if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
3228 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
3229 if (!DemandedElts[i])
3230 continue;
3231 Constant *Elt = C->getAggregateElement(i);
3232 if (!Elt || Elt->isNullValue())
3233 return false;
3234 if (!isa<PoisonValue>(Elt) && !isa<ConstantInt>(Elt))
3235 return false;
3236 }
3237 return true;
3238 }
3239
3240 // Constant ptrauth can be null, iff the base pointer can be.
3241 if (auto *CPA = dyn_cast<ConstantPtrAuth>(V))
3242 return isKnownNonZero(CPA->getPointer(), DemandedElts, Q, Depth);
3243
3244 // A global variable in address space 0 is non null unless extern weak
3245 // or an absolute symbol reference. Other address spaces may have null as a
3246 // valid address for a global, so we can't assume anything.
3247 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
3248 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3249 GV->getType()->getAddressSpace() == 0)
3250 return true;
3251 }
3252
3253 // For constant expressions, fall through to the Operator code below.
3254 if (!isa<ConstantExpr>(V))
3255 return false;
3256 }
3257
3258 if (const auto *A = dyn_cast<Argument>(V))
3259 if (std::optional<ConstantRange> Range = A->getRange()) {
3260 const APInt ZeroValue(Range->getBitWidth(), 0);
3261 if (!Range->contains(ZeroValue))
3262 return true;
3263 }
3264
3265 if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q))
3266 return true;
3267
3268 // Some of the tests below are recursive, so bail out if we hit the limit.
3270 return false;
3271
3272 // Check for pointer simplifications.
3273
3274 if (PointerType *PtrTy = dyn_cast<PointerType>(Ty)) {
3275 // A byval, inalloca may not be null in a non-default addres space. A
3276 // nonnull argument is assumed never 0.
3277 if (const Argument *A = dyn_cast<Argument>(V)) {
3278 if (((A->hasPassPointeeByValueCopyAttr() &&
3279 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
3280 A->hasNonNullAttr()))
3281 return true;
3282 }
3283 }
3284
3285 if (const auto *I = dyn_cast<Operator>(V))
3286 if (isKnownNonZeroFromOperator(I, DemandedElts, Depth, Q))
3287 return true;
3288
3289 if (!isa<Constant>(V) &&
3291 return true;
3292
3293 return false;
3294}
3295
3297 unsigned Depth) {
3298 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
3299 APInt DemandedElts =
3300 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
3301 return ::isKnownNonZero(V, DemandedElts, Q, Depth);
3302}
3303
3304/// If the pair of operators are the same invertible function, return the
3305/// the operands of the function corresponding to each input. Otherwise,
3306/// return std::nullopt. An invertible function is one that is 1-to-1 and maps
3307/// every input value to exactly one output value. This is equivalent to
3308/// saying that Op1 and Op2 are equal exactly when the specified pair of
3309/// operands are equal, (except that Op1 and Op2 may be poison more often.)
3310static std::optional<std::pair<Value*, Value*>>
3312 const Operator *Op2) {
3313 if (Op1->getOpcode() != Op2->getOpcode())
3314 return std::nullopt;
3315
3316 auto getOperands = [&](unsigned OpNum) -> auto {
3317 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
3318 };
3319
3320 switch (Op1->getOpcode()) {
3321 default:
3322 break;
3323 case Instruction::Or:
3324 if (!cast<PossiblyDisjointInst>(Op1)->isDisjoint() ||
3325 !cast<PossiblyDisjointInst>(Op2)->isDisjoint())
3326 break;
3327 [[fallthrough]];
3328 case Instruction::Xor:
3329 case Instruction::Add: {
3330 Value *Other;
3331 if (match(Op2, m_c_BinOp(m_Specific(Op1->getOperand(0)), m_Value(Other))))
3332 return std::make_pair(Op1->getOperand(1), Other);
3333 if (match(Op2, m_c_BinOp(m_Specific(Op1->getOperand(1)), m_Value(Other))))
3334 return std::make_pair(Op1->getOperand(0), Other);
3335 break;
3336 }
3337 case Instruction::Sub:
3338 if (Op1->getOperand(0) == Op2->getOperand(0))
3339 return getOperands(1);
3340 if (Op1->getOperand(1) == Op2->getOperand(1))
3341 return getOperands(0);
3342 break;
3343 case Instruction::Mul: {
3344 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
3345 // and N is the bitwdith. The nsw case is non-obvious, but proven by
3346 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
3347 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3348 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3349 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3350 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3351 break;
3352
3353 // Assume operand order has been canonicalized
3354 if (Op1->getOperand(1) == Op2->getOperand(1) &&
3355 isa<ConstantInt>(Op1->getOperand(1)) &&
3356 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
3357 return getOperands(0);
3358 break;
3359 }
3360 case Instruction::Shl: {
3361 // Same as multiplies, with the difference that we don't need to check
3362 // for a non-zero multiply. Shifts always multiply by non-zero.
3363 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
3364 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
3365 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
3366 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
3367 break;
3368
3369 if (Op1->getOperand(1) == Op2->getOperand(1))
3370 return getOperands(0);
3371 break;
3372 }
3373 case Instruction::AShr:
3374 case Instruction::LShr: {
3375 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
3376 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
3377 if (!PEO1->isExact() || !PEO2->isExact())
3378 break;
3379
3380 if (Op1->getOperand(1) == Op2->getOperand(1))
3381 return getOperands(0);
3382 break;
3383 }
3384 case Instruction::SExt:
3385 case Instruction::ZExt:
3386 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
3387 return getOperands(0);
3388 break;
3389 case Instruction::PHI: {
3390 const PHINode *PN1 = cast<PHINode>(Op1);
3391 const PHINode *PN2 = cast<PHINode>(Op2);
3392
3393 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
3394 // are a single invertible function of the start values? Note that repeated
3395 // application of an invertible function is also invertible
3396 BinaryOperator *BO1 = nullptr;
3397 Value *Start1 = nullptr, *Step1 = nullptr;
3398 BinaryOperator *BO2 = nullptr;
3399 Value *Start2 = nullptr, *Step2 = nullptr;
3400 if (PN1->getParent() != PN2->getParent() ||
3401 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
3402 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
3403 break;
3404
3405 auto Values = getInvertibleOperands(cast<Operator>(BO1),
3406 cast<Operator>(BO2));
3407 if (!Values)
3408 break;
3409
3410 // We have to be careful of mutually defined recurrences here. Ex:
3411 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
3412 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
3413 // The invertibility of these is complicated, and not worth reasoning
3414 // about (yet?).
3415 if (Values->first != PN1 || Values->second != PN2)
3416 break;
3417
3418 return std::make_pair(Start1, Start2);
3419 }
3420 }
3421 return std::nullopt;
3422}
3423
3424/// Return true if V1 == (binop V2, X), where X is known non-zero.
3425/// Only handle a small subset of binops where (binop V2, X) with non-zero X
3426/// implies V2 != V1.
3427static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2,
3428 const APInt &DemandedElts, unsigned Depth,
3429 const SimplifyQuery &Q) {
3430 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
3431 if (!BO)
3432 return false;
3433 switch (BO->getOpcode()) {
3434 default:
3435 break;
3436 case Instruction::Or:
3437 if (!cast<PossiblyDisjointInst>(V1)->isDisjoint())
3438 break;
3439 [[fallthrough]];
3440 case Instruction::Xor:
3441 case Instruction::Add:
3442 Value *Op = nullptr;
3443 if (V2 == BO->getOperand(0))
3444 Op = BO->getOperand(1);
3445 else if (V2 == BO->getOperand(1))
3446 Op = BO->getOperand(0);
3447 else
3448 return false;
3449 return isKnownNonZero(Op, DemandedElts, Q, Depth + 1);
3450 }
3451 return false;
3452}
3453
3454/// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
3455/// the multiplication is nuw or nsw.
3456static bool isNonEqualMul(const Value *V1, const Value *V2,
3457 const APInt &DemandedElts, unsigned Depth,
3458 const SimplifyQuery &Q) {
3459 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3460 const APInt *C;
3461 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
3462 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3463 !C->isZero() && !C->isOne() &&
3464 isKnownNonZero(V1, DemandedElts, Q, Depth + 1);
3465 }
3466 return false;
3467}
3468
3469/// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
3470/// the shift is nuw or nsw.
3471static bool isNonEqualShl(const Value *V1, const Value *V2,
3472 const APInt &DemandedElts, unsigned Depth,
3473 const SimplifyQuery &Q) {
3474 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
3475 const APInt *C;
3476 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
3477 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
3478 !C->isZero() && isKnownNonZero(V1, DemandedElts, Q, Depth + 1);
3479 }
3480 return false;
3481}
3482
3483static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
3484 const APInt &DemandedElts, unsigned Depth,
3485 const SimplifyQuery &Q) {
3486 // Check two PHIs are in same block.
3487 if (PN1->getParent() != PN2->getParent())
3488 return false;
3489
3491 bool UsedFullRecursion = false;
3492 for (const BasicBlock *IncomBB : PN1->blocks()) {
3493 if (!VisitedBBs.insert(IncomBB).second)
3494 continue; // Don't reprocess blocks that we have dealt with already.
3495 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
3496 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
3497 const APInt *C1, *C2;
3498 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
3499 continue;
3500
3501 // Only one pair of phi operands is allowed for full recursion.
3502 if (UsedFullRecursion)
3503 return false;
3504
3506 RecQ.CxtI = IncomBB->getTerminator();
3507 if (!isKnownNonEqual(IV1, IV2, DemandedElts, Depth + 1, RecQ))
3508 return false;
3509 UsedFullRecursion = true;
3510 }
3511 return true;
3512}
3513
3514static bool isNonEqualSelect(const Value *V1, const Value *V2,
3515 const APInt &DemandedElts, unsigned Depth,
3516 const SimplifyQuery &Q) {
3517 const SelectInst *SI1 = dyn_cast<SelectInst>(V1);
3518 if (!SI1)
3519 return false;
3520
3521 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2)) {
3522 const Value *Cond1 = SI1->getCondition();
3523 const Value *Cond2 = SI2->getCondition();
3524 if (Cond1 == Cond2)
3525 return isKnownNonEqual(SI1->getTrueValue(), SI2->getTrueValue(),
3526 DemandedElts, Depth + 1, Q) &&
3527 isKnownNonEqual(SI1->getFalseValue(), SI2->getFalseValue(),
3528 DemandedElts, Depth + 1, Q);
3529 }
3530 return isKnownNonEqual(SI1->getTrueValue(), V2, DemandedElts, Depth + 1, Q) &&
3531 isKnownNonEqual(SI1->getFalseValue(), V2, DemandedElts, Depth + 1, Q);
3532}
3533
3534// Check to see if A is both a GEP and is the incoming value for a PHI in the
3535// loop, and B is either a ptr or another GEP. If the PHI has 2 incoming values,
3536// one of them being the recursive GEP A and the other a ptr at same base and at
3537// the same/higher offset than B we are only incrementing the pointer further in
3538// loop if offset of recursive GEP is greater than 0.
3540 const SimplifyQuery &Q) {
3541 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
3542 return false;
3543
3544 auto *GEPA = dyn_cast<GEPOperator>(A);
3545 if (!GEPA || GEPA->getNumIndices() != 1 || !isa<Constant>(GEPA->idx_begin()))
3546 return false;
3547
3548 // Handle 2 incoming PHI values with one being a recursive GEP.
3549 auto *PN = dyn_cast<PHINode>(GEPA->getPointerOperand());
3550 if (!PN || PN->getNumIncomingValues() != 2)
3551 return false;
3552
3553 // Search for the recursive GEP as an incoming operand, and record that as
3554 // Step.
3555 Value *Start = nullptr;
3556 Value *Step = const_cast<Value *>(A);
3557 if (PN->getIncomingValue(0) == Step)
3558 Start = PN->getIncomingValue(1);
3559 else if (PN->getIncomingValue(1) == Step)
3560 Start = PN->getIncomingValue(0);
3561 else
3562 return false;
3563
3564 // Other incoming node base should match the B base.
3565 // StartOffset >= OffsetB && StepOffset > 0?
3566 // StartOffset <= OffsetB && StepOffset < 0?
3567 // Is non-equal if above are true.
3568 // We use stripAndAccumulateInBoundsConstantOffsets to restrict the
3569 // optimisation to inbounds GEPs only.
3570 unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(Start->getType());
3571 APInt StartOffset(IndexWidth, 0);
3572 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.DL, StartOffset);
3573 APInt StepOffset(IndexWidth, 0);
3574 Step = Step->stripAndAccumulateInBoundsConstantOffsets(Q.DL, StepOffset);
3575
3576 // Check if Base Pointer of Step matches the PHI.
3577 if (Step != PN)
3578 return false;
3579 APInt OffsetB(IndexWidth, 0);
3580 B = B->stripAndAccumulateInBoundsConstantOffsets(Q.DL, OffsetB);
3581 return Start == B &&
3582 ((StartOffset.sge(OffsetB) && StepOffset.isStrictlyPositive()) ||
3583 (StartOffset.sle(OffsetB) && StepOffset.isNegative()));
3584}
3585
3586/// Return true if it is known that V1 != V2.
3587static bool isKnownNonEqual(const Value *V1, const Value *V2,
3588 const APInt &DemandedElts, unsigned Depth,
3589 const SimplifyQuery &Q) {
3590 if (V1 == V2)
3591 return false;
3592 if (V1->getType() != V2->getType())
3593 // We can't look through casts yet.
3594 return false;
3595
3597 return false;
3598
3599 // See if we can recurse through (exactly one of) our operands. This
3600 // requires our operation be 1-to-1 and map every input value to exactly
3601 // one output value. Such an operation is invertible.
3602 auto *O1 = dyn_cast<Operator>(V1);
3603 auto *O2 = dyn_cast<Operator>(V2);
3604 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
3605 if (auto Values = getInvertibleOperands(O1, O2))
3606 return isKnownNonEqual(Values->first, Values->second, DemandedElts,
3607 Depth + 1, Q);
3608
3609 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
3610 const PHINode *PN2 = cast<PHINode>(V2);
3611 // FIXME: This is missing a generalization to handle the case where one is
3612 // a PHI and another one isn't.
3613 if (isNonEqualPHIs(PN1, PN2, DemandedElts, Depth, Q))
3614 return true;
3615 };
3616 }
3617
3618 if (isModifyingBinopOfNonZero(V1, V2, DemandedElts, Depth, Q) ||
3619 isModifyingBinopOfNonZero(V2, V1, DemandedElts, Depth, Q))
3620 return true;
3621
3622 if (isNonEqualMul(V1, V2, DemandedElts, Depth, Q) ||
3623 isNonEqualMul(V2, V1, DemandedElts, Depth, Q))
3624 return true;
3625
3626 if (isNonEqualShl(V1, V2, DemandedElts, Depth, Q) ||
3627 isNonEqualShl(V2, V1, DemandedElts, Depth, Q))
3628 return true;
3629
3630 if (V1->getType()->isIntOrIntVectorTy()) {
3631 // Are any known bits in V1 contradictory to known bits in V2? If V1
3632 // has a known zero where V2 has a known one, they must not be equal.
3633 KnownBits Known1 = computeKnownBits(V1, DemandedElts, Depth, Q);
3634 if (!Known1.isUnknown()) {
3635 KnownBits Known2 = computeKnownBits(V2, DemandedElts, Depth, Q);
3636 if (Known1.Zero.intersects(Known2.One) ||
3637 Known2.Zero.intersects(Known1.One))
3638 return true;
3639 }
3640 }
3641
3642 if (isNonEqualSelect(V1, V2, DemandedElts, Depth, Q) ||
3643 isNonEqualSelect(V2, V1, DemandedElts, Depth, Q))
3644 return true;
3645
3646 if (isNonEqualPointersWithRecursiveGEP(V1, V2, Q) ||
3648 return true;
3649
3650 Value *A, *B;
3651 // PtrToInts are NonEqual if their Ptrs are NonEqual.
3652 // Check PtrToInt type matches the pointer size.
3653 if (match(V1, m_PtrToIntSameSize(Q.DL, m_Value(A))) &&
3655 return isKnownNonEqual(A, B, DemandedElts, Depth + 1, Q);
3656
3657 return false;
3658}
3659
3660// Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
3661// Returns the input and lower/upper bounds.
3662static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
3663 const APInt *&CLow, const APInt *&CHigh) {
3664 assert(isa<Operator>(Select) &&
3665 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
3666 "Input should be a Select!");
3667
3668 const Value *LHS = nullptr, *RHS = nullptr;
3670 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
3671 return false;
3672
3673 if (!match(RHS, m_APInt(CLow)))
3674 return false;
3675
3676 const Value *LHS2 = nullptr, *RHS2 = nullptr;
3678 if (getInverseMinMaxFlavor(SPF) != SPF2)
3679 return false;
3680
3681 if (!match(RHS2, m_APInt(CHigh)))
3682 return false;
3683
3684 if (SPF == SPF_SMIN)
3685 std::swap(CLow, CHigh);
3686
3687 In = LHS2;
3688 return CLow->sle(*CHigh);
3689}
3690
3692 const APInt *&CLow,
3693 const APInt *&CHigh) {
3694 assert((II->getIntrinsicID() == Intrinsic::smin ||
3695 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax");
3696
3697 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
3698 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
3699 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
3700 !match(II->getArgOperand(1), m_APInt(CLow)) ||
3701 !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
3702 return false;
3703
3704 if (II->getIntrinsicID() == Intrinsic::smin)
3705 std::swap(CLow, CHigh);
3706 return CLow->sle(*CHigh);
3707}
3708
3709/// For vector constants, loop over the elements and find the constant with the
3710/// minimum number of sign bits. Return 0 if the value is not a vector constant
3711/// or if any element was not analyzed; otherwise, return the count for the
3712/// element with the minimum number of sign bits.
3714 const APInt &DemandedElts,
3715 unsigned TyBits) {
3716 const auto *CV = dyn_cast<Constant>(V);
3717 if (!CV || !isa<FixedVectorType>(CV->getType()))
3718 return 0;
3719
3720 unsigned MinSignBits = TyBits;
3721 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
3722 for (unsigned i = 0; i != NumElts; ++i) {
3723 if (!DemandedElts[i])
3724 continue;
3725 // If we find a non-ConstantInt, bail out.
3726 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
3727 if (!Elt)
3728 return 0;
3729
3730 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
3731 }
3732
3733 return MinSignBits;
3734}
3735
3736static unsigned ComputeNumSignBitsImpl(const Value *V,
3737 const APInt &DemandedElts,
3738 unsigned Depth, const SimplifyQuery &Q);
3739
3740static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
3741 unsigned Depth, const SimplifyQuery &Q) {
3742 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
3743 assert(Result > 0 && "At least one sign bit needs to be present!");
3744 return Result;
3745}
3746
3747/// Return the number of times the sign bit of the register is replicated into
3748/// the other bits. We know that at least 1 bit is always equal to the sign bit
3749/// (itself), but other cases can give us information. For example, immediately
3750/// after an "ashr X, 2", we know that the top 3 bits are all equal to each
3751/// other, so we return 3. For vectors, return the number of sign bits for the
3752/// vector element with the minimum number of known sign bits of the demanded
3753/// elements in the vector specified by DemandedElts.
3754static unsigned ComputeNumSignBitsImpl(const Value *V,
3755 const APInt &DemandedElts,
3756 unsigned Depth, const SimplifyQuery &Q) {
3757 Type *Ty = V->getType();
3758#ifndef NDEBUG
3759 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3760
3761 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3762 assert(
3763 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
3764 "DemandedElt width should equal the fixed vector number of elements");
3765 } else {
3766 assert(DemandedElts == APInt(1, 1) &&
3767 "DemandedElt width should be 1 for scalars");
3768 }
3769#endif
3770
3771 // We return the minimum number of sign bits that are guaranteed to be present
3772 // in V, so for undef we have to conservatively return 1. We don't have the
3773 // same behavior for poison though -- that's a FIXME today.
3774
3775 Type *ScalarTy = Ty->getScalarType();
3776 unsigned TyBits = ScalarTy->isPointerTy() ?
3777 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
3778 Q.DL.getTypeSizeInBits(ScalarTy);
3779
3780 unsigned Tmp, Tmp2;
3781 unsigned FirstAnswer = 1;
3782
3783 // Note that ConstantInt is handled by the general computeKnownBits case
3784 // below.
3785
3787 return 1;
3788
3789 if (auto *U = dyn_cast<Operator>(V)) {
3790 switch (Operator::getOpcode(V)) {
3791 default: break;
3792 case Instruction::SExt:
3793 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3794 return ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q) +
3795 Tmp;
3796
3797 case Instruction::SDiv: {
3798 const APInt *Denominator;
3799 // sdiv X, C -> adds log(C) sign bits.
3800 if (match(U->getOperand(1), m_APInt(Denominator))) {
3801
3802 // Ignore non-positive denominator.
3803 if (!Denominator->isStrictlyPositive())
3804 break;
3805
3806 // Calculate the incoming numerator bits.
3807 unsigned NumBits =
3808 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
3809
3810 // Add floor(log(C)) bits to the numerator bits.
3811 return std::min(TyBits, NumBits + Denominator->logBase2());
3812 }
3813 break;
3814 }
3815
3816 case Instruction::SRem: {
3817 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
3818
3819 const APInt *Denominator;
3820 // srem X, C -> we know that the result is within [-C+1,C) when C is a
3821 // positive constant. This let us put a lower bound on the number of sign
3822 // bits.
3823 if (match(U->getOperand(1), m_APInt(Denominator))) {
3824
3825 // Ignore non-positive denominator.
3826 if (Denominator->isStrictlyPositive()) {
3827 // Calculate the leading sign bit constraints by examining the
3828 // denominator. Given that the denominator is positive, there are two
3829 // cases:
3830 //
3831 // 1. The numerator is positive. The result range is [0,C) and
3832 // [0,C) u< (1 << ceilLogBase2(C)).
3833 //
3834 // 2. The numerator is negative. Then the result range is (-C,0] and
3835 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
3836 //
3837 // Thus a lower bound on the number of sign bits is `TyBits -
3838 // ceilLogBase2(C)`.
3839
3840 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3841 Tmp = std::max(Tmp, ResBits);
3842 }
3843 }
3844 return Tmp;
3845 }
3846
3847 case Instruction::AShr: {
3848 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
3849 // ashr X, C -> adds C sign bits. Vectors too.
3850 const APInt *ShAmt;
3851 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3852 if (ShAmt->uge(TyBits))
3853 break; // Bad shift.
3854 unsigned ShAmtLimited = ShAmt->getZExtValue();
3855 Tmp += ShAmtLimited;
3856 if (Tmp > TyBits) Tmp = TyBits;
3857 }
3858 return Tmp;
3859 }
3860 case Instruction::Shl: {
3861 const APInt *ShAmt;
3862 Value *X = nullptr;
3863 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3864 // shl destroys sign bits.
3865 if (ShAmt->uge(TyBits))
3866 break; // Bad shift.
3867 // We can look through a zext (more or less treating it as a sext) if
3868 // all extended bits are shifted out.
3869 if (match(U->getOperand(0), m_ZExt(m_Value(X))) &&
3870 ShAmt->uge(TyBits - X->getType()->getScalarSizeInBits())) {
3871 Tmp = ComputeNumSignBits(X, DemandedElts, Depth + 1, Q);
3872 Tmp += TyBits - X->getType()->getScalarSizeInBits();
3873 } else
3874 Tmp =
3875 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
3876 if (ShAmt->uge(Tmp))
3877 break; // Shifted all sign bits out.
3878 Tmp2 = ShAmt->getZExtValue();
3879 return Tmp - Tmp2;
3880 }
3881 break;
3882 }
3883 case Instruction::And:
3884 case Instruction::Or:
3885 case Instruction::Xor: // NOT is handled here.
3886 // Logical binary ops preserve the number of sign bits at the worst.
3887 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
3888 if (Tmp != 1) {
3889 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
3890 FirstAnswer = std::min(Tmp, Tmp2);
3891 // We computed what we know about the sign bits as our first
3892 // answer. Now proceed to the generic code that uses
3893 // computeKnownBits, and pick whichever answer is better.
3894 }
3895 break;
3896
3897 case Instruction::Select: {
3898 // If we have a clamp pattern, we know that the number of sign bits will
3899 // be the minimum of the clamp min/max range.
3900 const Value *X;
3901 const APInt *CLow, *CHigh;
3902 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3903 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3904
3905 Tmp = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
3906 if (Tmp == 1)
3907 break;
3908 Tmp2 = ComputeNumSignBits(U->getOperand(2), DemandedElts, Depth + 1, Q);
3909 return std::min(Tmp, Tmp2);
3910 }
3911
3912 case Instruction::Add:
3913 // Add can have at most one carry bit. Thus we know that the output
3914 // is, at worst, one more bit than the inputs.
3915 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3916 if (Tmp == 1) break;
3917
3918 // Special case decrementing a value (ADD X, -1):
3919 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3920 if (CRHS->isAllOnesValue()) {
3921 KnownBits Known(TyBits);
3922 computeKnownBits(U->getOperand(0), DemandedElts, Known, Depth + 1, Q);
3923
3924 // If the input is known to be 0 or 1, the output is 0/-1, which is
3925 // all sign bits set.
3926 if ((Known.Zero | 1).isAllOnes())
3927 return TyBits;
3928
3929 // If we are subtracting one from a positive number, there is no carry
3930 // out of the result.
3931 if (Known.isNonNegative())
3932 return Tmp;
3933 }
3934
3935 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
3936 if (Tmp2 == 1)
3937 break;
3938 return std::min(Tmp, Tmp2) - 1;
3939
3940 case Instruction::Sub:
3941 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
3942 if (Tmp2 == 1)
3943 break;
3944
3945 // Handle NEG.
3946 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3947 if (CLHS->isNullValue()) {
3948 KnownBits Known(TyBits);
3949 computeKnownBits(U->getOperand(1), DemandedElts, Known, Depth + 1, Q);
3950 // If the input is known to be 0 or 1, the output is 0/-1, which is
3951 // all sign bits set.
3952 if ((Known.Zero | 1).isAllOnes())
3953 return TyBits;
3954
3955 // If the input is known to be positive (the sign bit is known clear),
3956 // the output of the NEG has the same number of sign bits as the
3957 // input.
3958 if (Known.isNonNegative())
3959 return Tmp2;
3960
3961 // Otherwise, we treat this like a SUB.
3962 }
3963
3964 // Sub can have at most one carry bit. Thus we know that the output
3965 // is, at worst, one more bit than the inputs.
3966 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
3967 if (Tmp == 1)
3968 break;
3969 return std::min(Tmp, Tmp2) - 1;
3970
3971 case Instruction::Mul: {
3972 // The output of the Mul can be at most twice the valid bits in the
3973 // inputs.
3974 unsigned SignBitsOp0 =
3975 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
3976 if (SignBitsOp0 == 1)
3977 break;
3978 unsigned SignBitsOp1 =
3979 ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q);
3980 if (SignBitsOp1 == 1)
3981 break;
3982 unsigned OutValidBits =
3983 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3984 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3985 }
3986
3987 case Instruction::PHI: {
3988 const PHINode *PN = cast<PHINode>(U);
3989 unsigned NumIncomingValues = PN->getNumIncomingValues();
3990 // Don't analyze large in-degree PHIs.
3991 if (NumIncomingValues > 4) break;
3992 // Unreachable blocks may have zero-operand PHI nodes.
3993 if (NumIncomingValues == 0) break;
3994
3995 // Take the minimum of all incoming values. This can't infinitely loop
3996 // because of our depth threshold.
3998 Tmp = TyBits;
3999 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
4000 if (Tmp == 1) return Tmp;
4001 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
4002 Tmp = std::min(Tmp, ComputeNumSignBits(PN->getIncomingValue(i),
4003 DemandedElts, Depth + 1, RecQ));
4004 }
4005 return Tmp;
4006 }
4007
4008 case Instruction::Trunc: {
4009 // If the input contained enough sign bits that some remain after the
4010 // truncation, then we can make use of that. Otherwise we don't know
4011 // anything.
4012 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
4013 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
4014 if (Tmp > (OperandTyBits - TyBits))
4015 return Tmp - (OperandTyBits - TyBits);
4016
4017 return 1;
4018 }
4019
4020 case Instruction::ExtractElement:
4021 // Look through extract element. At the moment we keep this simple and
4022 // skip tracking the specific element. But at least we might find
4023 // information valid for all elements of the vector (for example if vector
4024 // is sign extended, shifted, etc).
4025 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
4026
4027 case Instruction::ShuffleVector: {
4028 // Collect the minimum number of sign bits that are shared by every vector
4029 // element referenced by the shuffle.
4030 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
4031 if (!Shuf) {
4032 // FIXME: Add support for shufflevector constant expressions.
4033 return 1;
4034 }
4035 APInt DemandedLHS, DemandedRHS;
4036 // For undef elements, we don't know anything about the common state of
4037 // the shuffle result.
4038 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
4039 return 1;
4040 Tmp = std::numeric_limits<unsigned>::max();
4041 if (!!DemandedLHS) {
4042 const Value *LHS = Shuf->getOperand(0);
4043 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
4044 }
4045 // If we don't know anything, early out and try computeKnownBits
4046 // fall-back.
4047 if (Tmp == 1)
4048 break;
4049 if (!!DemandedRHS) {
4050 const Value *RHS = Shuf->getOperand(1);
4051 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
4052 Tmp = std::min(Tmp, Tmp2);
4053 }
4054 // If we don't know anything, early out and try computeKnownBits
4055 // fall-back.
4056 if (Tmp == 1)
4057 break;
4058 assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
4059 return Tmp;
4060 }
4061 case Instruction::Call: {
4062 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
4063 switch (II->getIntrinsicID()) {
4064 default:
4065 break;
4066 case Intrinsic::abs:
4067 Tmp =
4068 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q);
4069 if (Tmp == 1)
4070 break;
4071
4072 // Absolute value reduces number of sign bits by at most 1.
4073 return Tmp - 1;
4074 case Intrinsic::smin:
4075 case Intrinsic::smax: {
4076 const APInt *CLow, *CHigh;
4077 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
4078 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
4079 }
4080 }
4081 }
4082 }
4083 }
4084 }
4085
4086 // Finally, if we can prove that the top bits of the result are 0's or 1's,
4087 // use this information.
4088
4089 // If we can examine all elements of a vector constant successfully, we're
4090 // done (we can't do any better than that). If not, keep trying.
4091 if (unsigned VecSignBits =
4092 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
4093 return VecSignBits;
4094
4095 KnownBits Known(TyBits);
4096 computeKnownBits(V, DemandedElts, Known, Depth, Q);
4097
4098 // If we know that the sign bit is either zero or one, determine the number of
4099 // identical bits in the top of the input value.
4100 return std::max(FirstAnswer, Known.countMinSignBits());
4101}
4102
4104 const TargetLibraryInfo *TLI) {
4105 const Function *F = CB.getCalledFunction();
4106 if (!F)
4108
4109 if (F->isIntrinsic())
4110 return F->getIntrinsicID();
4111
4112 // We are going to infer semantics of a library function based on mapping it
4113 // to an LLVM intrinsic. Check that the library function is available from
4114 // this callbase and in this environment.
4115 LibFunc Func;
4116 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
4117 !CB.onlyReadsMemory())
4119
4120 switch (Func) {
4121 default:
4122 break;
4123 case LibFunc_sin:
4124 case LibFunc_sinf:
4125 case LibFunc_sinl:
4126 return Intrinsic::sin;
4127 case LibFunc_cos:
4128 case LibFunc_cosf:
4129 case LibFunc_cosl:
4130 return Intrinsic::cos;
4131 case LibFunc_tan:
4132 case LibFunc_tanf:
4133 case LibFunc_tanl:
4134 return Intrinsic::tan;
4135 case LibFunc_exp:
4136 case LibFunc_expf:
4137 case LibFunc_expl:
4138 return Intrinsic::exp;
4139 case LibFunc_exp2:
4140 case LibFunc_exp2f:
4141 case LibFunc_exp2l:
4142 return Intrinsic::exp2;
4143 case LibFunc_log:
4144 case LibFunc_logf:
4145 case LibFunc_logl:
4146 return Intrinsic::log;
4147 case LibFunc_log10:
4148 case LibFunc_log10f:
4149 case LibFunc_log10l:
4150 return Intrinsic::log10;
4151 case LibFunc_log2:
4152 case LibFunc_log2f:
4153 case LibFunc_log2l:
4154 return Intrinsic::log2;
4155 case LibFunc_fabs:
4156 case LibFunc_fabsf:
4157 case LibFunc_fabsl:
4158 return Intrinsic::fabs;
4159 case LibFunc_fmin:
4160 case LibFunc_fminf:
4161 case LibFunc_fminl:
4162 return Intrinsic::minnum;
4163 case LibFunc_fmax:
4164 case LibFunc_fmaxf:
4165 case LibFunc_fmaxl:
4166 return Intrinsic::maxnum;
4167 case LibFunc_copysign:
4168 case LibFunc_copysignf:
4169 case LibFunc_copysignl:
4170 return Intrinsic::copysign;
4171 case LibFunc_floor:
4172 case LibFunc_floorf:
4173 case LibFunc_floorl:
4174 return Intrinsic::floor;
4175 case LibFunc_ceil:
4176 case LibFunc_ceilf:
4177 case LibFunc_ceill:
4178 return Intrinsic::ceil;
4179 case LibFunc_trunc:
4180 case LibFunc_truncf:
4181 case LibFunc_truncl:
4182 return Intrinsic::trunc;
4183 case LibFunc_rint:
4184 case LibFunc_rintf:
4185 case LibFunc_rintl:
4186 return Intrinsic::rint;
4187 case LibFunc_nearbyint:
4188 case LibFunc_nearbyintf:
4189 case LibFunc_nearbyintl:
4190 return Intrinsic::nearbyint;
4191 case LibFunc_round:
4192 case LibFunc_roundf:
4193 case LibFunc_roundl:
4194 return Intrinsic::round;
4195 case LibFunc_roundeven:
4196 case LibFunc_roundevenf:
4197 case LibFunc_roundevenl:
4198 return Intrinsic::roundeven;
4199 case LibFunc_pow:
4200 case LibFunc_powf:
4201 case LibFunc_powl:
4202 return Intrinsic::pow;
4203 case LibFunc_sqrt:
4204 case LibFunc_sqrtf:
4205 case LibFunc_sqrtl:
4206 return Intrinsic::sqrt;
4207 }
4208
4210}
4211
4212/// Return true if it's possible to assume IEEE treatment of input denormals in
4213/// \p F for \p Val.
4214static bool inputDenormalIsIEEE(const Function &F, const Type *Ty) {
4215 Ty = Ty->getScalarType();
4216 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE;
4217}
4218
4219static bool inputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) {
4220 Ty = Ty->getScalarType();
4221 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics());
4222 return Mode.Input == DenormalMode::IEEE ||
4223 Mode.Input == DenormalMode::PositiveZero;
4224}
4225
4226static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty) {
4227 Ty = Ty->getScalarType();
4228 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics());
4229 return Mode.Output == DenormalMode::IEEE ||
4230 Mode.Output == DenormalMode::PositiveZero;
4231}
4232
4234 return isKnownNeverZero() &&
4236}
4237
4239 Type *Ty) const {
4240 return isKnownNeverNegZero() &&
4242}
4243
4245 Type *Ty) const {
4246 if (!isKnownNeverPosZero())
4247 return false;
4248
4249 // If we know there are no denormals, nothing can be flushed to zero.
4251 return true;
4252
4253 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics());
4254 switch (Mode.Input) {
4255 case DenormalMode::IEEE:
4256 return true;
4258 // Negative subnormal won't flush to +0
4259 return isKnownNeverPosSubnormal();
4261 default:
4262 // Both positive and negative subnormal could flush to +0
4263 return false;
4264 }
4265
4266 llvm_unreachable("covered switch over denormal mode");
4267}
4268
4270 Type *Ty) {
4271 KnownFPClasses = Src.KnownFPClasses;
4272 // If we aren't assuming the source can't be a zero, we don't have to check if
4273 // a denormal input could be flushed.
4274 if (!Src.isKnownNeverPosZero() && !Src.isKnownNeverNegZero())
4275 return;
4276
4277 // If we know the input can't be a denormal, it can't be flushed to 0.
4278 if (Src.isKnownNeverSubnormal())
4279 return;
4280
4281 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics());
4282
4283 if (!Src.isKnownNeverPosSubnormal() && Mode != DenormalMode::getIEEE())
4285
4286 if (!Src.isKnownNeverNegSubnormal() && Mode != DenormalMode::getIEEE()) {
4287 if (Mode != DenormalMode::getPositiveZero())
4289
4290 if (Mode.Input == DenormalMode::PositiveZero ||
4291 Mode.Output == DenormalMode::PositiveZero ||
4292 Mode.Input == DenormalMode::Dynamic ||
4293 Mode.Output == DenormalMode::Dynamic)
4295 }
4296}
4297
4299 const Function &F, Type *Ty) {
4300 propagateDenormal(Src, F, Ty);
4301 propagateNaN(Src, /*PreserveSign=*/true);
4302}
4303
4304/// Given an exploded icmp instruction, return true if the comparison only
4305/// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if
4306/// the result of the comparison is true when the input value is signed.
4308 bool &TrueIfSigned) {
4309 switch (Pred) {
4310 case ICmpInst::ICMP_SLT: // True if LHS s< 0
4311 TrueIfSigned = true;
4312 return RHS.isZero();
4313 case ICmpInst::ICMP_SLE: // True if LHS s<= -1
4314 TrueIfSigned = true;
4315 return RHS.isAllOnes();
4316 case ICmpInst::ICMP_SGT: // True if LHS s> -1
4317 TrueIfSigned = false;
4318 return RHS.isAllOnes();
4319 case ICmpInst::ICMP_SGE: // True if LHS s>= 0
4320 TrueIfSigned = false;
4321 return RHS.isZero();
4322 case ICmpInst::ICMP_UGT:
4323 // True if LHS u> RHS and RHS == sign-bit-mask - 1
4324 TrueIfSigned = true;
4325 return RHS.isMaxSignedValue();
4326 case ICmpInst::ICMP_UGE:
4327 // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
4328 TrueIfSigned = true;
4329 return RHS.isMinSignedValue();
4330 case ICmpInst::ICMP_ULT:
4331 // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc)
4332 TrueIfSigned = false;
4333 return RHS.isMinSignedValue();
4334 case ICmpInst::ICMP_ULE:
4335 // True if LHS u<= RHS and RHS == sign-bit-mask - 1
4336 TrueIfSigned = false;
4337 return RHS.isMaxSignedValue();
4338 default:
4339 return false;
4340 }
4341}
4342
4343/// Returns a pair of values, which if passed to llvm.is.fpclass, returns the
4344/// same result as an fcmp with the given operands.
4345std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred,
4346 const Function &F,
4347 Value *LHS, Value *RHS,
4348 bool LookThroughSrc) {
4349 const APFloat *ConstRHS;
4350 if (!match(RHS, m_APFloatAllowPoison(ConstRHS)))
4351 return {nullptr, fcAllFlags};
4352
4353 return fcmpToClassTest(Pred, F, LHS, ConstRHS, LookThroughSrc);
4354}
4355
4356std::pair<Value *, FPClassTest>
4358 const APFloat *ConstRHS, bool LookThroughSrc) {
4359
4360 auto [Src, ClassIfTrue, ClassIfFalse] =
4361 fcmpImpliesClass(Pred, F, LHS, *ConstRHS, LookThroughSrc);
4362 if (Src && ClassIfTrue == ~ClassIfFalse)
4363 return {Src, ClassIfTrue};
4364 return {nullptr, fcAllFlags};
4365}
4366
4367/// Return the return value for fcmpImpliesClass for a compare that produces an
4368/// exact class test.
4369static std::tuple<Value *, FPClassTest, FPClassTest> exactClass(Value *V,
4370 FPClassTest M) {
4371 return {V, M, ~M};
4372}
4373
4374std::tuple<Value *, FPClassTest, FPClassTest>
4376 FPClassTest RHSClass, bool LookThroughSrc) {
4377 assert(RHSClass != fcNone);
4378 Value *Src = LHS;
4379
4380 if (Pred == FCmpInst::FCMP_TRUE)
4381 return exactClass(Src, fcAllFlags);
4382
4383 if (Pred == FCmpInst::FCMP_FALSE)
4384 return exactClass(Src, fcNone);
4385
4386 const FPClassTest OrigClass = RHSClass;
4387
4388 const bool IsNegativeRHS = (RHSClass & fcNegative) == RHSClass;
4389 const bool IsPositiveRHS = (RHSClass & fcPositive) == RHSClass;
4390 const bool IsNaN = (RHSClass & ~fcNan) == fcNone;
4391
4392 if (IsNaN) {
4393 // fcmp o__ x, nan -> false
4394 // fcmp u__ x, nan -> true
4395 return exactClass(Src, CmpInst::isOrdered(Pred) ? fcNone : fcAllFlags);
4396 }
4397
4398 // fcmp ord x, zero|normal|subnormal|inf -> ~fcNan
4399 if (Pred == FCmpInst::FCMP_ORD)
4400 return exactClass(Src, ~fcNan);
4401
4402 // fcmp uno x, zero|normal|subnormal|inf -> fcNan
4403 if (Pred == FCmpInst::FCMP_UNO)
4404 return exactClass(Src, fcNan);
4405
4406 const bool IsFabs = LookThroughSrc && match(LHS, m_FAbs(m_Value(Src)));
4407 if (IsFabs)
4408 RHSClass = llvm::inverse_fabs(RHSClass);
4409
4410 const bool IsZero = (OrigClass & fcZero) == OrigClass;
4411 if (IsZero) {
4412 assert(Pred != FCmpInst::FCMP_ORD && Pred != FCmpInst::FCMP_UNO);
4413 // Compares with fcNone are only exactly equal to fcZero if input denormals
4414 // are not flushed.
4415 // TODO: Handle DAZ by expanding masks to cover subnormal cases.
4416 if (!inputDenormalIsIEEE(F, LHS->getType()))
4417 return {nullptr, fcAllFlags, fcAllFlags};
4418
4419 switch (Pred) {
4420 case FCmpInst::FCMP_OEQ: // Match x == 0.0
4421 return exactClass(Src, fcZero);
4422 case FCmpInst::FCMP_UEQ: // Match isnan(x) || (x == 0.0)
4423 return exactClass(Src, fcZero | fcNan);
4424 case FCmpInst::FCMP_UNE: // Match (x != 0.0)
4425 return exactClass(Src, ~fcZero);
4426 case FCmpInst::FCMP_ONE: // Match !isnan(x) && x != 0.0
4427 return exactClass(Src, ~fcNan & ~fcZero);
4428 case FCmpInst::FCMP_ORD:
4429 // Canonical form of ord/uno is with a zero. We could also handle
4430 // non-canonical other non-NaN constants or LHS == RHS.
4431 return exactClass(Src, ~fcNan);
4432 case FCmpInst::FCMP_UNO:
4433 return exactClass(Src, fcNan);
4434 case FCmpInst::FCMP_OGT: // x > 0
4436 case FCmpInst::FCMP_UGT: // isnan(x) || x > 0
4438 case FCmpInst::FCMP_OGE: // x >= 0
4439 return exactClass(Src, fcPositive | fcNegZero);
4440 case FCmpInst::FCMP_UGE: // isnan(x) || x >= 0
4441 return exactClass(Src, fcPositive | fcNegZero | fcNan);
4442 case FCmpInst::FCMP_OLT: // x < 0
4444 case FCmpInst::FCMP_ULT: // isnan(x) || x < 0
4446 case FCmpInst::FCMP_OLE: // x <= 0
4447 return exactClass(Src, fcNegative | fcPosZero);
4448 case FCmpInst::FCMP_ULE: // isnan(x) || x <= 0
4449 return exactClass(Src, fcNegative | fcPosZero | fcNan);
4450 default:
4451 llvm_unreachable("all compare types are handled");
4452 }
4453
4454 return {nullptr, fcAllFlags, fcAllFlags};
4455 }
4456
4457 const bool IsDenormalRHS = (OrigClass & fcSubnormal) == OrigClass;
4458
4459 const bool IsInf = (OrigClass & fcInf) == OrigClass;
4460 if (IsInf) {
4461 FPClassTest Mask = fcAllFlags;
4462
4463 switch (Pred) {
4464 case FCmpInst::FCMP_OEQ:
4465 case FCmpInst::FCMP_UNE: {
4466 // Match __builtin_isinf patterns
4467 //
4468 // fcmp oeq x, +inf -> is_fpclass x, fcPosInf
4469 // fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf
4470 // fcmp oeq x, -inf -> is_fpclass x, fcNegInf
4471 // fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false
4472 //
4473 // fcmp une x, +inf -> is_fpclass x, ~fcPosInf
4474 // fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf
4475 // fcmp une x, -inf -> is_fpclass x, ~fcNegInf
4476 // fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true
4477 if (IsNegativeRHS) {
4478 Mask = fcNegInf;
4479 if (IsFabs)
4480 Mask = fcNone;
4481 } else {
4482 Mask = fcPosInf;
4483 if (IsFabs)
4484 Mask |= fcNegInf;
4485 }
4486 break;
4487 }
4488 case FCmpInst::FCMP_ONE:
4489 case FCmpInst::FCMP_UEQ: {
4490 // Match __builtin_isinf patterns
4491 // fcmp one x, -inf -> is_fpclass x, fcNegInf
4492 // fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan
4493 // fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan
4494 // fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan
4495 //
4496 // fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan
4497 // fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan
4498 // fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan
4499 // fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan
4500 if (IsNegativeRHS) {
4501 Mask = ~fcNegInf & ~fcNan;
4502 if (IsFabs)
4503 Mask = ~fcNan;
4504 } else {
4505 Mask = ~fcPosInf & ~fcNan;
4506 if (IsFabs)
4507 Mask &= ~fcNegInf;
4508 }
4509
4510 break;
4511 }
4512 case FCmpInst::FCMP_OLT:
4513 case FCmpInst::FCMP_UGE: {
4514 if (IsNegativeRHS) {
4515 // No value is ordered and less than negative infinity.
4516 // All values are unordered with or at least negative infinity.
4517 // fcmp olt x, -inf -> false
4518 // fcmp uge x, -inf -> true
4519 Mask = fcNone;
4520 break;
4521 }
4522
4523 // fcmp olt fabs(x), +inf -> fcFinite
4524 // fcmp uge fabs(x), +inf -> ~fcFinite
4525 // fcmp olt x, +inf -> fcFinite|fcNegInf
4526 // fcmp uge x, +inf -> ~(fcFinite|fcNegInf)
4527 Mask = fcFinite;
4528 if (!IsFabs)
4529 Mask |= fcNegInf;
4530 break;
4531 }
4532 case FCmpInst::FCMP_OGE:
4533 case FCmpInst::FCMP_ULT: {
4534 if (IsNegativeRHS) {
4535 // fcmp oge x, -inf -> ~fcNan
4536 // fcmp oge fabs(x), -inf -> ~fcNan
4537 // fcmp ult x, -inf -> fcNan
4538 // fcmp ult fabs(x), -inf -> fcNan
4539 Mask = ~fcNan;
4540 break;
4541 }
4542
4543 // fcmp oge fabs(x), +inf -> fcInf
4544 // fcmp oge x, +inf -> fcPosInf
4545 // fcmp ult fabs(x), +inf -> ~fcInf
4546 // fcmp ult x, +inf -> ~fcPosInf
4547 Mask = fcPosInf;
4548 if (IsFabs)
4549 Mask |= fcNegInf;
4550 break;
4551 }
4552 case FCmpInst::FCMP_OGT:
4553 case FCmpInst::FCMP_ULE: {
4554 if (IsNegativeRHS) {
4555 // fcmp ogt x, -inf -> fcmp one x, -inf
4556 // fcmp ogt fabs(x), -inf -> fcmp ord x, x
4557 // fcmp ule x, -inf -> fcmp ueq x, -inf
4558 // fcmp ule fabs(x), -inf -> fcmp uno x, x
4559 Mask = IsFabs ? ~fcNan : ~(fcNegInf | fcNan);
4560 break;
4561 }
4562
4563 // No value is ordered and greater than infinity.
4564 Mask = fcNone;
4565 break;
4566 }
4567 case FCmpInst::FCMP_OLE:
4568 case FCmpInst::FCMP_UGT: {
4569 if (IsNegativeRHS) {
4570 Mask = IsFabs ? fcNone : fcNegInf;
4571 break;
4572 }
4573
4574 // fcmp ole x, +inf -> fcmp ord x, x
4575 // fcmp ole fabs(x), +inf -> fcmp ord x, x
4576 // fcmp ole x, -inf -> fcmp oeq x, -inf
4577 // fcmp ole fabs(x), -inf -> false
4578 Mask = ~fcNan;
4579 break;
4580 }
4581 default:
4582 llvm_unreachable("all compare types are handled");
4583 }
4584
4585 // Invert the comparison for the unordered cases.
4586 if (FCmpInst::isUnordered(Pred))
4587 Mask = ~Mask;
4588
4589 return exactClass(Src, Mask);
4590 }
4591
4592 if (Pred == FCmpInst::FCMP_OEQ)
4593 return {Src, RHSClass, fcAllFlags};
4594
4595 if (Pred == FCmpInst::FCMP_UEQ) {
4596 FPClassTest Class = RHSClass | fcNan;
4597 return {Src, Class, ~fcNan};
4598 }
4599
4600 if (Pred == FCmpInst::FCMP_ONE)
4601 return {Src, ~fcNan, RHSClass | fcNan};
4602
4603 if (Pred == FCmpInst::FCMP_UNE)
4604 return {Src, fcAllFlags, RHSClass};
4605
4606 assert((RHSClass == fcNone || RHSClass == fcPosNormal ||
4607 RHSClass == fcNegNormal || RHSClass == fcNormal ||
4608 RHSClass == fcPosSubnormal || RHSClass == fcNegSubnormal ||
4609 RHSClass == fcSubnormal) &&
4610 "should have been recognized as an exact class test");
4611
4612 if (IsNegativeRHS) {
4613 // TODO: Handle fneg(fabs)
4614 if (IsFabs) {
4615 // fabs(x) o> -k -> fcmp ord x, x
4616 // fabs(x) u> -k -> true
4617 // fabs(x) o< -k -> false
4618 // fabs(x) u< -k -> fcmp uno x, x
4619 switch (Pred) {
4620 case FCmpInst::FCMP_OGT:
4621 case FCmpInst::FCMP_OGE:
4622 return {Src, ~fcNan, fcNan};
4623 case FCmpInst::FCMP_UGT:
4624 case FCmpInst::FCMP_UGE:
4625 return {Src, fcAllFlags, fcNone};
4626 case FCmpInst::FCMP_OLT:
4627 case FCmpInst::FCMP_OLE:
4628 return {Src, fcNone, fcAllFlags};
4629 case FCmpInst::FCMP_ULT:
4630 case FCmpInst::FCMP_ULE:
4631 return {Src, fcNan, ~fcNan};
4632 default:
4633 break;
4634 }
4635
4636 return {nullptr, fcAllFlags, fcAllFlags};
4637 }
4638
4639 FPClassTest ClassesLE = fcNegInf | fcNegNormal;
4641
4642 if (IsDenormalRHS)
4643 ClassesLE |= fcNegSubnormal;
4644 else
4645 ClassesGE |= fcNegNormal;
4646
4647 switch (Pred) {
4648 case FCmpInst::FCMP_OGT:
4649 case FCmpInst::FCMP_OGE:
4650 return {Src, ClassesGE, ~ClassesGE | RHSClass};
4651 case FCmpInst::FCMP_UGT:
4652 case FCmpInst::FCMP_UGE:
4653 return {Src, ClassesGE | fcNan, ~(ClassesGE | fcNan) | RHSClass};
4654 case FCmpInst::FCMP_OLT:
4655 case FCmpInst::FCMP_OLE:
4656 return {Src, ClassesLE, ~ClassesLE | RHSClass};
4657 case FCmpInst::FCMP_ULT:
4658 case FCmpInst::FCMP_ULE:
4659 return {Src, ClassesLE | fcNan, ~(ClassesLE | fcNan) | RHSClass};
4660 default:
4661 break;
4662 }
4663 } else if (IsPositiveRHS) {
4664 FPClassTest ClassesGE = fcPosNormal | fcPosInf;
4666 if (IsDenormalRHS)
4667 ClassesGE |= fcPosSubnormal;
4668 else
4669 ClassesLE |= fcPosNormal;
4670
4671 if (IsFabs) {
4672 ClassesGE = llvm::inverse_fabs(ClassesGE);
4673 ClassesLE = llvm::inverse_fabs(ClassesLE);
4674 }
4675
4676 switch (Pred) {
4677 case FCmpInst::FCMP_OGT:
4678 case FCmpInst::FCMP_OGE:
4679 return {Src, ClassesGE, ~ClassesGE | RHSClass};
4680 case FCmpInst::FCMP_UGT:
4681 case FCmpInst::FCMP_UGE:
4682 return {Src, ClassesGE | fcNan, ~(ClassesGE | fcNan) | RHSClass};
4683 case FCmpInst::FCMP_OLT:
4684 case FCmpInst::FCMP_OLE:
4685 return {Src, ClassesLE, ~ClassesLE | RHSClass};
4686 case FCmpInst::FCMP_ULT:
4687 case FCmpInst::FCMP_ULE:
4688 return {Src, ClassesLE | fcNan, ~(ClassesLE | fcNan) | RHSClass};
4689 default:
4690 break;
4691 }
4692 }
4693
4694 return {nullptr, fcAllFlags, fcAllFlags};
4695}
4696
4697std::tuple<Value *, FPClassTest, FPClassTest>
4699 const APFloat &ConstRHS, bool LookThroughSrc) {
4700 // We can refine checks against smallest normal / largest denormal to an
4701 // exact class test.
4702 if (!ConstRHS.isNegative() && ConstRHS.isSmallestNormalized()) {
4703 Value *Src = LHS;
4704 const bool IsFabs = LookThroughSrc && match(LHS, m_FAbs(m_Value(Src)));
4705
4706 FPClassTest Mask;
4707 // Match pattern that's used in __builtin_isnormal.
4708 switch (Pred) {
4709 case FCmpInst::FCMP_OLT:
4710 case FCmpInst::FCMP_UGE: {
4711 // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero
4712 // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero
4713 // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf
4714 // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero)
4715 Mask = fcZero | fcSubnormal;
4716 if (!IsFabs)
4717 Mask |= fcNegNormal | fcNegInf;
4718
4719 break;
4720 }
4721 case FCmpInst::FCMP_OGE:
4722 case FCmpInst::FCMP_ULT: {
4723 // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf
4724 // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal
4725 // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf)
4726 // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal)
4727 Mask = fcPosInf | fcPosNormal;
4728 if (IsFabs)
4729 Mask |= fcNegInf | fcNegNormal;
4730 break;
4731 }
4732 default:
4733 return fcmpImpliesClass(Pred, F, LHS, ConstRHS.classify(),
4734 LookThroughSrc);
4735 }
4736
4737 // Invert the comparison for the unordered cases.
4738 if (FCmpInst::isUnordered(Pred))
4739 Mask = ~Mask;
4740
4741 return exactClass(Src, Mask);
4742 }
4743
4744 return fcmpImpliesClass(Pred, F, LHS, ConstRHS.classify(), LookThroughSrc);
4745}
4746
4747std::tuple<Value *, FPClassTest, FPClassTest>
4749 Value *RHS, bool LookThroughSrc) {
4750 const APFloat *ConstRHS;
4751 if (!match(RHS, m_APFloatAllowPoison(ConstRHS)))
4752 return {nullptr, fcAllFlags, fcAllFlags};
4753
4754 // TODO: Just call computeKnownFPClass for RHS to handle non-constants.
4755 return fcmpImpliesClass(Pred, F, LHS, *ConstRHS, LookThroughSrc);
4756}
4757
4759 bool CondIsTrue,
4760 const Instruction *CxtI,
4761 KnownFPClass &KnownFromContext) {
4762 CmpInst::Predicate Pred;
4763 Value *LHS;
4764 uint64_t ClassVal = 0;
4765 const APFloat *CRHS;
4766 const APInt *RHS;
4767 if (match(Cond, m_FCmp(Pred, m_Value(LHS), m_APFloat(CRHS)))) {
4768 auto [CmpVal, MaskIfTrue, MaskIfFalse] = fcmpImpliesClass(
4769 Pred, *CxtI->getParent()->getParent(), LHS, *CRHS, LHS != V);
4770 if (CmpVal == V)
4771 KnownFromContext.knownNot(~(CondIsTrue ? MaskIfTrue : MaskIfFalse));
4772 } else if (match(Cond, m_Intrinsic<Intrinsic::is_fpclass>(
4773 m_Value(LHS), m_ConstantInt(ClassVal)))) {
4774 FPClassTest Mask = static_cast<FPClassTest>(ClassVal);
4775 KnownFromContext.knownNot(CondIsTrue ? ~Mask : Mask);
4776 } else if (match(Cond, m_ICmp(Pred, m_ElementWiseBitCast(m_Value(LHS)),
4777 m_APInt(RHS)))) {
4778 bool TrueIfSigned;
4779 if (!isSignBitCheck(Pred, *RHS, TrueIfSigned))
4780 return;
4781 if (TrueIfSigned == CondIsTrue)
4782 KnownFromContext.signBitMustBeOne();
4783 else
4784 KnownFromContext.signBitMustBeZero();
4785 }
4786}
4787
4789 const SimplifyQuery &Q) {
4790 KnownFPClass KnownFromContext;
4791
4792 if (!Q.CxtI)
4793 return KnownFromContext;
4794
4795 if (Q.DC && Q.DT) {
4796 // Handle dominating conditions.
4797 for (BranchInst *BI : Q.DC->conditionsFor(V)) {
4798 Value *Cond = BI->getCondition();
4799
4800 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
4801 if (Q.DT->dominates(Edge0, Q.CxtI->getParent()))
4802 computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/true, Q.CxtI,
4803 KnownFromContext);
4804
4805 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
4806 if (Q.DT->dominates(Edge1, Q.CxtI->getParent()))
4807 computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/false, Q.CxtI,
4808 KnownFromContext);
4809 }
4810 }
4811
4812 if (!Q.AC)
4813 return KnownFromContext;
4814
4815 // Try to restrict the floating-point classes based on information from
4816 // assumptions.
4817 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
4818 if (!AssumeVH)
4819 continue;
4820 CallInst *I = cast<CallInst>(AssumeVH);
4821
4822 assert(I->getFunction() == Q.CxtI->getParent()->getParent() &&
4823 "Got assumption for the wrong function!");
4824 assert(I->getIntrinsicID() == Intrinsic::assume &&
4825 "must be an assume intrinsic");
4826
4827 if (!isValidAssumeForContext(I, Q.CxtI, Q.DT))
4828 continue;
4829
4830 computeKnownFPClassFromCond(V, I->getArgOperand(0), /*CondIsTrue=*/true,
4831 Q.CxtI, KnownFromContext);
4832 }
4833
4834 return KnownFromContext;
4835}
4836
4837void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4838 FPClassTest InterestedClasses, KnownFPClass &Known,
4839 unsigned Depth, const SimplifyQuery &Q);
4840
4841static void computeKnownFPClass(const Value *V, KnownFPClass &Known,
4842 FPClassTest InterestedClasses, unsigned Depth,
4843 const SimplifyQuery &Q) {
4844 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
4845 APInt DemandedElts =
4846 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
4847 computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q);
4848}
4849
4851 const APInt &DemandedElts,
4852 FPClassTest InterestedClasses,
4853 KnownFPClass &Known, unsigned Depth,
4854 const SimplifyQuery &Q) {
4855 if ((InterestedClasses &
4857 return;
4858
4859 KnownFPClass KnownSrc;
4860 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
4861 KnownSrc, Depth + 1, Q);
4862
4863 // Sign should be preserved
4864 // TODO: Handle cannot be ordered greater than zero
4865 if (KnownSrc.cannotBeOrderedLessThanZero())
4867
4868 Known.propagateNaN(KnownSrc, true);
4869
4870 // Infinity needs a range check.
4871}
4872
4873void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4874 FPClassTest InterestedClasses, KnownFPClass &Known,
4875 unsigned Depth, const SimplifyQuery &Q) {
4876 assert(Known.isUnknown() && "should not be called with known information");
4877
4878 if (!DemandedElts) {
4879 // No demanded elts, better to assume we don't know anything.
4880 Known.resetAll();
4881 return;
4882 }
4883
4884 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
4885
4886 if (auto *CFP = dyn_cast<ConstantFP>(V)) {
4887 Known.KnownFPClasses = CFP->getValueAPF().classify();
4888 Known.SignBit = CFP->isNegative();
4889 return;
4890 }
4891
4892 if (isa<ConstantAggregateZero>(V)) {
4893 Known.KnownFPClasses = fcPosZero;
4894 Known.SignBit = false;
4895 return;
4896 }
4897
4898 if (isa<PoisonValue>(V)) {
4899 Known.KnownFPClasses = fcNone;
4900 Known.SignBit = false;
4901 return;
4902 }
4903
4904 // Try to handle fixed width vector constants
4905 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
4906 const Constant *CV = dyn_cast<Constant>(V);
4907 if (VFVTy && CV) {
4908 Known.KnownFPClasses = fcNone;
4909 bool SignBitAllZero = true;
4910 bool SignBitAllOne = true;
4911
4912 // For vectors, verify that each element is not NaN.
4913 unsigned NumElts = VFVTy->getNumElements();
4914 for (unsigned i = 0; i != NumElts; ++i) {
4915 if (!DemandedElts[i])
4916 continue;
4917
4918 Constant *Elt = CV->getAggregateElement(i);
4919 if (!Elt) {
4920 Known = KnownFPClass();
4921 return;
4922 }
4923 if (isa<PoisonValue>(Elt))
4924 continue;
4925 auto *CElt = dyn_cast<ConstantFP>(Elt);
4926 if (!CElt) {
4927 Known = KnownFPClass();
4928 return;
4929 }
4930
4931 const APFloat &C = CElt->getValueAPF();
4932 Known.KnownFPClasses |= C.classify();
4933 if (C.isNegative())
4934 SignBitAllZero = false;
4935 else
4936 SignBitAllOne = false;
4937 }
4938 if (SignBitAllOne != SignBitAllZero)
4939 Known.SignBit = SignBitAllOne;
4940 return;
4941 }
4942
4943 FPClassTest KnownNotFromFlags = fcNone;
4944 if (const auto *CB = dyn_cast<CallBase>(V))
4945 KnownNotFromFlags |= CB->getRetNoFPClass();
4946 else if (const auto *Arg = dyn_cast<Argument>(V))
4947 KnownNotFromFlags |= Arg->getNoFPClass();
4948
4949 const Operator *Op = dyn_cast<Operator>(V);
4950 if (const FPMathOperator *FPOp = dyn_cast_or_null<FPMathOperator>(Op)) {
4951 if (FPOp->hasNoNaNs())
4952 KnownNotFromFlags |= fcNan;
4953 if (FPOp->hasNoInfs())
4954 KnownNotFromFlags |= fcInf;
4955 }
4956
4957 KnownFPClass AssumedClasses = computeKnownFPClassFromContext(V, Q);
4958 KnownNotFromFlags |= ~AssumedClasses.KnownFPClasses;
4959
4960 // We no longer need to find out about these bits from inputs if we can
4961 // assume this from flags/attributes.
4962 InterestedClasses &= ~KnownNotFromFlags;
4963
4964 auto ClearClassesFromFlags = make_scope_exit([=, &Known] {
4965 Known.knownNot(KnownNotFromFlags);
4966 if (!Known.SignBit && AssumedClasses.SignBit) {
4967 if (*AssumedClasses.SignBit)
4968 Known.signBitMustBeOne();
4969 else
4970 Known.signBitMustBeZero();
4971 }
4972 });
4973
4974 if (!Op)
4975 return;
4976
4977 // All recursive calls that increase depth must come after this.
4979 return;
4980
4981 const unsigned Opc = Op->getOpcode();
4982 switch (Opc) {
4983 case Instruction::FNeg: {
4984 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
4985 Known, Depth + 1, Q);
4986 Known.fneg();
4987 break;
4988 }
4989 case Instruction::Select: {
4990 Value *Cond = Op->getOperand(0);
4991 Value *LHS = Op->getOperand(1);
4992 Value *RHS = Op->getOperand(2);
4993
4994 FPClassTest FilterLHS = fcAllFlags;
4995 FPClassTest FilterRHS = fcAllFlags;
4996
4997 Value *TestedValue = nullptr;
4998 FPClassTest MaskIfTrue = fcAllFlags;
4999 FPClassTest MaskIfFalse = fcAllFlags;
5000 uint64_t ClassVal = 0;
5001 const Function *F = cast<Instruction>(Op)->getFunction();
5002 CmpInst::Predicate Pred;
5003 Value *CmpLHS, *CmpRHS;
5004 if (F && match(Cond, m_FCmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)))) {
5005 // If the select filters out a value based on the class, it no longer
5006 // participates in the class of the result
5007
5008 // TODO: In some degenerate cases we can infer something if we try again
5009 // without looking through sign operations.
5010 bool LookThroughFAbsFNeg = CmpLHS != LHS && CmpLHS != RHS;
5011 std::tie(TestedValue, MaskIfTrue, MaskIfFalse) =
5012 fcmpImpliesClass(Pred, *F, CmpLHS, CmpRHS, LookThroughFAbsFNeg);
5013 } else if (match(Cond,
5014 m_Intrinsic<Intrinsic::is_fpclass>(
5015 m_Value(TestedValue), m_ConstantInt(ClassVal)))) {
5016 FPClassTest TestedMask = static_cast<FPClassTest>(ClassVal);
5017 MaskIfTrue = TestedMask;
5018 MaskIfFalse = ~TestedMask;
5019 }
5020
5021 if (TestedValue == LHS) {
5022 // match !isnan(x) ? x : y
5023 FilterLHS = MaskIfTrue;
5024 } else if (TestedValue == RHS) { // && IsExactClass
5025 // match !isnan(x) ? y : x
5026 FilterRHS = MaskIfFalse;
5027 }
5028
5029 KnownFPClass Known2;
5030 computeKnownFPClass(LHS, DemandedElts, InterestedClasses & FilterLHS, Known,
5031 Depth + 1, Q);
5032 Known.KnownFPClasses &= FilterLHS;
5033
5034 computeKnownFPClass(RHS, DemandedElts, InterestedClasses & FilterRHS,
5035 Known2, Depth + 1, Q);
5036 Known2.KnownFPClasses &= FilterRHS;
5037
5038 Known |= Known2;
5039 break;
5040 }
5041 case Instruction::Call: {
5042 const CallInst *II = cast<CallInst>(Op);
5043 const Intrinsic::ID IID = II->getIntrinsicID();
5044 switch (IID) {
5045 case Intrinsic::fabs: {
5046 if ((InterestedClasses & (fcNan | fcPositive)) != fcNone) {
5047 // If we only care about the sign bit we don't need to inspect the
5048 // operand.
5049 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
5050 InterestedClasses, Known, Depth + 1, Q);
5051 }
5052
5053 Known.fabs();
5054 break;
5055 }
5056 case Intrinsic::copysign: {
5057 KnownFPClass KnownSign;
5058
5059 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5060 Known, Depth + 1, Q);
5061 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses,
5062 KnownSign, Depth + 1, Q);
5063 Known.copysign(KnownSign);
5064 break;
5065 }
5066 case Intrinsic::fma:
5067 case Intrinsic::fmuladd: {
5068 if ((InterestedClasses & fcNegative) == fcNone)
5069 break;
5070
5071 if (II->getArgOperand(0) != II->getArgOperand(1))
5072 break;
5073
5074 // The multiply cannot be -0 and therefore the add can't be -0
5075 Known.knownNot(fcNegZero);
5076
5077 // x * x + y is non-negative if y is non-negative.
5078 KnownFPClass KnownAddend;
5079 computeKnownFPClass(II->getArgOperand(2), DemandedElts, InterestedClasses,
5080 KnownAddend, Depth + 1, Q);
5081
5082 if (KnownAddend.cannotBeOrderedLessThanZero())
5083 Known.knownNot(fcNegative);
5084 break;
5085 }
5086 case Intrinsic::sqrt:
5087 case Intrinsic::experimental_constrained_sqrt: {
5088 KnownFPClass KnownSrc;
5089 FPClassTest InterestedSrcs = InterestedClasses;
5090 if (InterestedClasses & fcNan)
5091 InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
5092
5093 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
5094 KnownSrc, Depth + 1, Q);
5095
5096 if (KnownSrc.isKnownNeverPosInfinity())
5097 Known.knownNot(fcPosInf);
5098 if (KnownSrc.isKnownNever(fcSNan))
5099 Known.knownNot(fcSNan);
5100
5101 // Any negative value besides -0 returns a nan.
5102 if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
5103 Known.knownNot(fcNan);
5104
5105 // The only negative value that can be returned is -0 for -0 inputs.
5107
5108 // If the input denormal mode could be PreserveSign, a negative
5109 // subnormal input could produce a negative zero output.
5110 const Function *F = II->getFunction();
5111 if (Q.IIQ.hasNoSignedZeros(II) ||
5112 (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType())))
5113 Known.knownNot(fcNegZero);
5114
5115 break;
5116 }
5117 case Intrinsic::sin:
5118 case Intrinsic::cos: {
5119 // Return NaN on infinite inputs.
5120 KnownFPClass KnownSrc;
5121 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5122 KnownSrc, Depth + 1, Q);
5123 Known.knownNot(fcInf);
5124 if (KnownSrc.isKnownNeverNaN() && KnownSrc.isKnownNeverInfinity())
5125 Known.knownNot(fcNan);
5126 break;
5127 }
5128 case Intrinsic::maxnum:
5129 case Intrinsic::minnum:
5130 case Intrinsic::minimum:
5131 case Intrinsic::maximum: {
5132 KnownFPClass KnownLHS, KnownRHS;
5133 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5134 KnownLHS, Depth + 1, Q);
5135 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses,
5136 KnownRHS, Depth + 1, Q);
5137
5138 bool NeverNaN = KnownLHS.isKnownNeverNaN() || KnownRHS.isKnownNeverNaN();
5139 Known = KnownLHS | KnownRHS;
5140
5141 // If either operand is not NaN, the result is not NaN.
5142 if (NeverNaN && (IID == Intrinsic::minnum || IID == Intrinsic::maxnum))
5143 Known.knownNot(fcNan);
5144
5145 if (IID == Intrinsic::maxnum) {
5146 // If at least one operand is known to be positive, the result must be
5147 // positive.
5148 if ((KnownLHS.cannotBeOrderedLessThanZero() &&
5149 KnownLHS.isKnownNeverNaN()) ||
5150 (KnownRHS.cannotBeOrderedLessThanZero() &&
5151 KnownRHS.isKnownNeverNaN()))
5153 } else if (IID == Intrinsic::maximum) {
5154 // If at least one operand is known to be positive, the result must be
5155 // positive.
5156 if (KnownLHS.cannotBeOrderedLessThanZero() ||
5157 KnownRHS.cannotBeOrderedLessThanZero())
5159 } else if (IID == Intrinsic::minnum) {
5160 // If at least one operand is known to be negative, the result must be
5161 // negative.
5162 if ((KnownLHS.cannotBeOrderedGreaterThanZero() &&
5163 KnownLHS.isKnownNeverNaN()) ||
5164 (KnownRHS.cannotBeOrderedGreaterThanZero() &&
5165 KnownRHS.isKnownNeverNaN()))
5167 } else {
5168 // If at least one operand is known to be negative, the result must be
5169 // negative.
5170 if (KnownLHS.cannotBeOrderedGreaterThanZero() ||
5173 }
5174
5175 // Fixup zero handling if denormals could be returned as a zero.
5176 //
5177 // As there's no spec for denormal flushing, be conservative with the
5178 // treatment of denormals that could be flushed to zero. For older
5179 // subtargets on AMDGPU the min/max instructions would not flush the
5180 // output and return the original value.
5181 //
5182 if ((Known.KnownFPClasses & fcZero) != fcNone &&
5183 !Known.isKnownNeverSubnormal()) {
5184 const Function *Parent = II->getFunction();
5185 if (!Parent)
5186 break;
5187
5188 DenormalMode Mode = Parent->getDenormalMode(
5189 II->getType()->getScalarType()->getFltSemantics());
5190 if (Mode != DenormalMode::getIEEE())
5191 Known.KnownFPClasses |= fcZero;
5192 }
5193
5194 if (Known.isKnownNeverNaN()) {
5195 if (KnownLHS.SignBit && KnownRHS.SignBit &&
5196 *KnownLHS.SignBit == *KnownRHS.SignBit) {
5197 if (*KnownLHS.SignBit)
5198 Known.signBitMustBeOne();
5199 else
5200 Known.signBitMustBeZero();
5201 } else if ((IID == Intrinsic::maximum || IID == Intrinsic::minimum) ||
5202 ((KnownLHS.isKnownNeverNegZero() ||
5203 KnownRHS.isKnownNeverPosZero()) &&
5204 (KnownLHS.isKnownNeverPosZero() ||
5205 KnownRHS.isKnownNeverNegZero()))) {
5206 if ((IID == Intrinsic::maximum || IID == Intrinsic::maxnum) &&
5207 (KnownLHS.SignBit == false || KnownRHS.SignBit == false))
5208 Known.signBitMustBeZero();
5209 else if ((IID == Intrinsic::minimum || IID == Intrinsic::minnum) &&
5210 (KnownLHS.SignBit == true || KnownRHS.SignBit == true))
5211 Known.signBitMustBeOne();
5212 }
5213 }
5214 break;
5215 }
5216 case Intrinsic::canonicalize: {
5217 KnownFPClass KnownSrc;
5218 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5219 KnownSrc, Depth + 1, Q);
5220
5221 // This is essentially a stronger form of
5222 // propagateCanonicalizingSrc. Other "canonicalizing" operations don't
5223 // actually have an IR canonicalization guarantee.
5224
5225 // Canonicalize may flush denormals to zero, so we have to consider the
5226 // denormal mode to preserve known-not-0 knowledge.
5227 Known.KnownFPClasses = KnownSrc.KnownFPClasses | fcZero | fcQNan;
5228
5229 // Stronger version of propagateNaN
5230 // Canonicalize is guaranteed to quiet signaling nans.
5231 if (KnownSrc.isKnownNeverNaN())
5232 Known.knownNot(fcNan);
5233 else
5234 Known.knownNot(fcSNan);
5235
5236 const Function *F = II->getFunction();
5237 if (!F)
5238 break;
5239
5240 // If the parent function flushes denormals, the canonical output cannot
5241 // be a denormal.
5242 const fltSemantics &FPType =
5243 II->getType()->getScalarType()->getFltSemantics();
5244 DenormalMode DenormMode = F->getDenormalMode(FPType);
5245 if (DenormMode == DenormalMode::getIEEE()) {
5246 if (KnownSrc.isKnownNever(fcPosZero))
5247 Known.knownNot(fcPosZero);
5248 if (KnownSrc.isKnownNever(fcNegZero))
5249 Known.knownNot(fcNegZero);
5250 break;
5251 }
5252
5253 if (DenormMode.inputsAreZero() || DenormMode.outputsAreZero())
5254 Known.knownNot(fcSubnormal);
5255
5256 if (DenormMode.Input == DenormalMode::PositiveZero ||
5257 (DenormMode.Output == DenormalMode::PositiveZero &&
5258 DenormMode.Input == DenormalMode::IEEE))
5259 Known.knownNot(fcNegZero);
5260
5261 break;
5262 }
5263 case Intrinsic::vector_reduce_fmax:
5264 case Intrinsic::vector_reduce_fmin:
5265 case Intrinsic::vector_reduce_fmaximum:
5266 case Intrinsic::vector_reduce_fminimum: {
5267 // reduce min/max will choose an element from one of the vector elements,
5268 // so we can infer and class information that is common to all elements.
5269 Known = computeKnownFPClass(II->getArgOperand(0), II->getFastMathFlags(),
5270 InterestedClasses, Depth + 1, Q);
5271 // Can only propagate sign if output is never NaN.
5272 if (!Known.isKnownNeverNaN())
5273 Known.SignBit.reset();
5274 break;
5275 }
5276 // reverse preserves all characteristics of the input vec's element.
5277 case Intrinsic::vector_reverse:
5278 Known = computeKnownFPClass(
5279 II->getArgOperand(0), DemandedElts.reverseBits(),
5280 II->getFastMathFlags(), InterestedClasses, Depth + 1, Q);
5281 break;
5282 case Intrinsic::trunc:
5283 case Intrinsic::floor:
5284 case Intrinsic::ceil:
5285 case Intrinsic::rint:
5286 case Intrinsic::nearbyint:
5287 case Intrinsic::round:
5288 case Intrinsic::roundeven: {
5289 KnownFPClass KnownSrc;
5290 FPClassTest InterestedSrcs = InterestedClasses;
5291 if (InterestedSrcs & fcPosFinite)
5292 InterestedSrcs |= fcPosFinite;
5293 if (InterestedSrcs & fcNegFinite)
5294 InterestedSrcs |= fcNegFinite;
5295 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
5296 KnownSrc, Depth + 1, Q);
5297
5298 // Integer results cannot be subnormal.
5299 Known.knownNot(fcSubnormal);
5300
5301 Known.propagateNaN(KnownSrc, true);
5302
5303 // Pass through infinities, except PPC_FP128 is a special case for
5304 // intrinsics other than trunc.
5305 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) {
5306 if (KnownSrc.isKnownNeverPosInfinity())
5307 Known.knownNot(fcPosInf);
5308 if (KnownSrc.isKnownNeverNegInfinity())
5309 Known.knownNot(fcNegInf);
5310 }
5311
5312 // Negative round ups to 0 produce -0
5313 if (KnownSrc.isKnownNever(fcPosFinite))
5314 Known.knownNot(fcPosFinite);
5315 if (KnownSrc.isKnownNever(fcNegFinite))
5316 Known.knownNot(fcNegFinite);
5317
5318 break;
5319 }
5320 case Intrinsic::exp:
5321 case Intrinsic::exp2:
5322 case Intrinsic::exp10: {
5323 Known.knownNot(fcNegative);
5324 if ((InterestedClasses & fcNan) == fcNone)
5325 break;
5326
5327 KnownFPClass KnownSrc;
5328 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5329 KnownSrc, Depth + 1, Q);
5330 if (KnownSrc.isKnownNeverNaN()) {
5331 Known.knownNot(fcNan);
5332 Known.signBitMustBeZero();
5333 }
5334
5335 break;
5336 }
5337 case Intrinsic::fptrunc_round: {
5338 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known,
5339 Depth, Q);
5340 break;
5341 }
5342 case Intrinsic::log:
5343 case Intrinsic::log10:
5344 case Intrinsic::log2:
5345 case Intrinsic::experimental_constrained_log:
5346 case Intrinsic::experimental_constrained_log10:
5347 case Intrinsic::experimental_constrained_log2: {
5348 // log(+inf) -> +inf
5349 // log([+-]0.0) -> -inf
5350 // log(-inf) -> nan
5351 // log(-x) -> nan
5352 if ((InterestedClasses & (fcNan | fcInf)) == fcNone)
5353 break;
5354
5355 FPClassTest InterestedSrcs = InterestedClasses;
5356 if ((InterestedClasses & fcNegInf) != fcNone)
5357 InterestedSrcs |= fcZero | fcSubnormal;
5358 if ((InterestedClasses & fcNan) != fcNone)
5359 InterestedSrcs |= fcNan | (fcNegative & ~fcNan);
5360
5361 KnownFPClass KnownSrc;
5362 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs,
5363 KnownSrc, Depth + 1, Q);
5364
5365 if (KnownSrc.isKnownNeverPosInfinity())
5366 Known.knownNot(fcPosInf);
5367
5368 if (KnownSrc.isKnownNeverNaN() && KnownSrc.cannotBeOrderedLessThanZero())
5369 Known.knownNot(fcNan);
5370
5371 const Function *F = II->getFunction();
5372 if (F && KnownSrc.isKnownNeverLogicalZero(*F, II->getType()))
5373 Known.knownNot(fcNegInf);
5374
5375 break;
5376 }
5377 case Intrinsic::powi: {
5378 if ((InterestedClasses & fcNegative) == fcNone)
5379 break;
5380
5381 const Value *Exp = II->getArgOperand(1);
5382 Type *ExpTy = Exp->getType();
5383 unsigned BitWidth = ExpTy->getScalarType()->getIntegerBitWidth();
5384 KnownBits ExponentKnownBits(BitWidth);
5385 computeKnownBits(Exp, isa<VectorType>(ExpTy) ? DemandedElts : APInt(1, 1),
5386 ExponentKnownBits, Depth + 1, Q);
5387
5388 if (ExponentKnownBits.Zero[0]) { // Is even
5389 Known.knownNot(fcNegative);
5390 break;
5391 }
5392
5393 // Given that exp is an integer, here are the
5394 // ways that pow can return a negative value:
5395 //
5396 // pow(-x, exp) --> negative if exp is odd and x is negative.
5397 // pow(-0, exp) --> -inf if exp is negative odd.
5398 // pow(-0, exp) --> -0 if exp is positive odd.
5399 // pow(-inf, exp) --> -0 if exp is negative odd.
5400 // pow(-inf, exp) --> -inf if exp is positive odd.
5401 KnownFPClass KnownSrc;
5402 computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative,
5403 KnownSrc, Depth + 1, Q);
5404 if (KnownSrc.isKnownNever(fcNegative))
5405 Known.knownNot(fcNegative);
5406 break;
5407 }
5408 case Intrinsic::ldexp: {
5409 KnownFPClass KnownSrc;
5410 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5411 KnownSrc, Depth + 1, Q);
5412 Known.propagateNaN(KnownSrc, /*PropagateSign=*/true);
5413
5414 // Sign is preserved, but underflows may produce zeroes.
5415 if (KnownSrc.isKnownNever(fcNegative))
5416 Known.knownNot(fcNegative);
5417 else if (KnownSrc.cannotBeOrderedLessThanZero())
5419
5420 if (KnownSrc.isKnownNever(fcPositive))
5421 Known.knownNot(fcPositive);
5422 else if (KnownSrc.cannotBeOrderedGreaterThanZero())
5424
5425 // Can refine inf/zero handling based on the exponent operand.
5426 const FPClassTest ExpInfoMask = fcZero | fcSubnormal | fcInf;
5427 if ((InterestedClasses & ExpInfoMask) == fcNone)
5428 break;
5429 if ((KnownSrc.KnownFPClasses & ExpInfoMask) == fcNone)
5430 break;
5431
5432 const fltSemantics &Flt =
5433 II->getType()->getScalarType()->getFltSemantics();
5434 unsigned Precision = APFloat::semanticsPrecision(Flt);
5435 const Value *ExpArg = II->getArgOperand(1);
5437 ExpArg, true, Q.IIQ.UseInstrInfo, Q.AC, Q.CxtI, Q.DT, Depth + 1);
5438
5439 const int MantissaBits = Precision - 1;
5440 if (ExpRange.getSignedMin().sge(static_cast<int64_t>(MantissaBits)))
5441 Known.knownNot(fcSubnormal);
5442
5443 const Function *F = II->getFunction();
5444 const APInt *ConstVal = ExpRange.getSingleElement();
5445 if (ConstVal && ConstVal->isZero()) {
5446 // ldexp(x, 0) -> x, so propagate everything.
5447 Known.propagateCanonicalizingSrc(KnownSrc, *F, II->getType());
5448 } else if (ExpRange.isAllNegative()) {
5449 // If we know the power is <= 0, can't introduce inf
5450 if (KnownSrc.isKnownNeverPosInfinity())
5451 Known.knownNot(fcPosInf);
5452 if (KnownSrc.isKnownNeverNegInfinity())
5453 Known.knownNot(fcNegInf);
5454 } else if (ExpRange.isAllNonNegative()) {
5455 // If we know the power is >= 0, can't introduce subnormal or zero
5456 if (KnownSrc.isKnownNeverPosSubnormal())
5457 Known.knownNot(fcPosSubnormal);
5458 if (KnownSrc.isKnownNeverNegSubnormal())
5459 Known.knownNot(fcNegSubnormal);
5460 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, II->getType()))
5461 Known.knownNot(fcPosZero);
5462 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType()))
5463 Known.knownNot(fcNegZero);
5464 }
5465
5466 break;
5467 }
5468 case Intrinsic::arithmetic_fence: {
5469 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses,
5470 Known, Depth + 1, Q);
5471 break;
5472 }
5473 case Intrinsic::experimental_constrained_sitofp:
5474 case Intrinsic::experimental_constrained_uitofp:
5475 // Cannot produce nan
5476 Known.knownNot(fcNan);
5477
5478 // sitofp and uitofp turn into +0.0 for zero.
5479 Known.knownNot(fcNegZero);
5480
5481 // Integers cannot be subnormal
5482 Known.knownNot(fcSubnormal);
5483
5484 if (IID == Intrinsic::experimental_constrained_uitofp)
5485 Known.signBitMustBeZero();
5486
5487 // TODO: Copy inf handling from instructions
5488 break;
5489 default:
5490 break;
5491 }
5492
5493 break;
5494 }
5495 case Instruction::FAdd:
5496 case Instruction::FSub: {
5497 KnownFPClass KnownLHS, KnownRHS;
5498 bool WantNegative =
5499 Op->getOpcode() == Instruction::FAdd &&
5500 (InterestedClasses & KnownFPClass::OrderedLessThanZeroMask) != fcNone;
5501 bool WantNaN = (InterestedClasses & fcNan) != fcNone;
5502 bool WantNegZero = (InterestedClasses & fcNegZero) != fcNone;
5503
5504 if (!WantNaN && !WantNegative && !WantNegZero)
5505 break;
5506
5507 FPClassTest InterestedSrcs = InterestedClasses;
5508 if (WantNegative)
5509 InterestedSrcs |= KnownFPClass::OrderedLessThanZeroMask;
5510 if (InterestedClasses & fcNan)
5511 InterestedSrcs |= fcInf;
5512 computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedSrcs,
5513 KnownRHS, Depth + 1, Q);
5514
5515 if ((WantNaN && KnownRHS.isKnownNeverNaN()) ||
5516 (WantNegative && KnownRHS.cannotBeOrderedLessThanZero()) ||
5517 WantNegZero || Opc == Instruction::FSub) {
5518
5519 // RHS is canonically cheaper to compute. Skip inspecting the LHS if
5520 // there's no point.
5521 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedSrcs,
5522 KnownLHS, Depth + 1, Q);
5523 // Adding positive and negative infinity produces NaN.
5524 // TODO: Check sign of infinities.
5525 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5526 (KnownLHS.isKnownNeverInfinity() || KnownRHS.isKnownNeverInfinity()))
5527 Known.knownNot(fcNan);
5528
5529 // FIXME: Context function should always be passed in separately
5530 const Function *F = cast<Instruction>(Op)->getFunction();
5531
5532 if (Op->getOpcode() == Instruction::FAdd) {
5533 if (KnownLHS.cannotBeOrderedLessThanZero() &&
5534 KnownRHS.cannotBeOrderedLessThanZero())
5536 if (!F)
5537 break;
5538
5539 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
5540 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) ||
5541 KnownRHS.isKnownNeverLogicalNegZero(*F, Op->getType())) &&
5542 // Make sure output negative denormal can't flush to -0
5543 outputDenormalIsIEEEOrPosZero(*F, Op->getType()))
5544 Known.knownNot(fcNegZero);
5545 } else {
5546 if (!F)
5547 break;
5548
5549 // Only fsub -0, +0 can return -0
5550 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) ||
5551 KnownRHS.isKnownNeverLogicalPosZero(*F, Op->getType())) &&
5552 // Make sure output negative denormal can't flush to -0
5553 outputDenormalIsIEEEOrPosZero(*F, Op->getType()))
5554 Known.knownNot(fcNegZero);
5555 }
5556 }
5557
5558 break;
5559 }
5560 case Instruction::FMul: {
5561 // X * X is always non-negative or a NaN.
5562 if (Op->getOperand(0) == Op->getOperand(1))
5563 Known.knownNot(fcNegative);
5564
5565 if ((InterestedClasses & fcNan) != fcNan)
5566 break;
5567
5568 // fcSubnormal is only needed in case of DAZ.
5569 const FPClassTest NeedForNan = fcNan | fcInf | fcZero | fcSubnormal;
5570
5571 KnownFPClass KnownLHS, KnownRHS;
5572 computeKnownFPClass(Op->getOperand(1), DemandedElts, NeedForNan, KnownRHS,
5573 Depth + 1, Q);
5574 if (!KnownRHS.isKnownNeverNaN())
5575 break;
5576
5577 computeKnownFPClass(Op->getOperand(0), DemandedElts, NeedForNan, KnownLHS,
5578 Depth + 1, Q);
5579 if (!KnownLHS.isKnownNeverNaN())
5580 break;
5581
5582 if (KnownLHS.SignBit && KnownRHS.SignBit) {
5583 if (*KnownLHS.SignBit == *KnownRHS.SignBit)
5584 Known.signBitMustBeZero();
5585 else
5586 Known.signBitMustBeOne();
5587 }
5588
5589 // If 0 * +/-inf produces NaN.
5590 if (KnownLHS.isKnownNeverInfinity() && KnownRHS.isKnownNeverInfinity()) {
5591 Known.knownNot(fcNan);
5592 break;
5593 }
5594
5595 const Function *F = cast<Instruction>(Op)->getFunction();
5596 if (!F)
5597 break;
5598
5599 if ((KnownRHS.isKnownNeverInfinity() ||
5600 KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) &&
5601 (KnownLHS.isKnownNeverInfinity() ||
5602 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())))
5603 Known.knownNot(fcNan);
5604
5605 break;
5606 }
5607 case Instruction::FDiv:
5608 case Instruction::FRem: {
5609 if (Op->getOperand(0) == Op->getOperand(1)) {
5610 // TODO: Could filter out snan if we inspect the operand
5611 if (Op->getOpcode() == Instruction::FDiv) {
5612 // X / X is always exactly 1.0 or a NaN.
5614 } else {
5615 // X % X is always exactly [+-]0.0 or a NaN.
5616 Known.KnownFPClasses = fcNan | fcZero;
5617 }
5618
5619 break;
5620 }
5621
5622 const bool WantNan = (InterestedClasses & fcNan) != fcNone;
5623 const bool WantNegative = (InterestedClasses & fcNegative) != fcNone;
5624 const bool WantPositive =
5625 Opc == Instruction::FRem && (InterestedClasses & fcPositive) != fcNone;
5626 if (!WantNan && !WantNegative && !WantPositive)
5627 break;
5628
5629 KnownFPClass KnownLHS, KnownRHS;
5630
5631 computeKnownFPClass(Op->getOperand(1), DemandedElts,
5632 fcNan | fcInf | fcZero | fcNegative, KnownRHS,
5633 Depth + 1, Q);
5634
5635 bool KnowSomethingUseful =
5636 KnownRHS.isKnownNeverNaN() || KnownRHS.isKnownNever(fcNegative);
5637
5638 if (KnowSomethingUseful || WantPositive) {
5639 const FPClassTest InterestedLHS =
5640 WantPositive ? fcAllFlags
5642
5643 computeKnownFPClass(Op->getOperand(0), DemandedElts,
5644 InterestedClasses & InterestedLHS, KnownLHS,
5645 Depth + 1, Q);
5646 }
5647
5648 const Function *F = cast<Instruction>(Op)->getFunction();
5649
5650 if (Op->getOpcode() == Instruction::FDiv) {
5651 // Only 0/0, Inf/Inf produce NaN.
5652 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5653 (KnownLHS.isKnownNeverInfinity() ||
5654 KnownRHS.isKnownNeverInfinity()) &&
5655 ((F && KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) ||
5656 (F && KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())))) {
5657 Known.knownNot(fcNan);
5658 }
5659
5660 // X / -0.0 is -Inf (or NaN).
5661 // +X / +X is +X
5662 if (KnownLHS.isKnownNever(fcNegative) && KnownRHS.isKnownNever(fcNegative))
5663 Known.knownNot(fcNegative);
5664 } else {
5665 // Inf REM x and x REM 0 produce NaN.
5666 if (KnownLHS.isKnownNeverNaN() && KnownRHS.isKnownNeverNaN() &&
5667 KnownLHS.isKnownNeverInfinity() && F &&
5668 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())) {
5669 Known.knownNot(fcNan);
5670 }
5671
5672 // The sign for frem is the same as the first operand.
5673 if (KnownLHS.cannotBeOrderedLessThanZero())
5675 if (KnownLHS.cannotBeOrderedGreaterThanZero())
5677
5678 // See if we can be more aggressive about the sign of 0.
5679 if (KnownLHS.isKnownNever(fcNegative))
5680 Known.knownNot(fcNegative);
5681 if (KnownLHS.isKnownNever(fcPositive))
5682 Known.knownNot(fcPositive);
5683 }
5684
5685 break;
5686 }
5687 case Instruction::FPExt: {
5688 // Infinity, nan and zero propagate from source.
5689 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses,
5690 Known, Depth + 1, Q);
5691
5692 const fltSemantics &DstTy =
5693 Op->getType()->getScalarType()->getFltSemantics();
5694 const fltSemantics &SrcTy =
5695 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics();
5696
5697 // All subnormal inputs should be in the normal range in the result type.
5698 if (APFloat::isRepresentableAsNormalIn(SrcTy, DstTy)) {
5699 if (Known.KnownFPClasses & fcPosSubnormal)
5700 Known.KnownFPClasses |= fcPosNormal;
5701 if (Known.KnownFPClasses & fcNegSubnormal)
5702 Known.KnownFPClasses |= fcNegNormal;
5703 Known.knownNot(fcSubnormal);
5704 }
5705
5706 // Sign bit of a nan isn't guaranteed.
5707 if (!Known.isKnownNeverNaN())
5708 Known.SignBit = std::nullopt;
5709 break;
5710 }
5711 case Instruction::FPTrunc: {
5712 computeKnownFPClassForFPTrunc(Op, DemandedElts, InterestedClasses, Known,
5713 Depth, Q);
5714 break;
5715 }
5716 case Instruction::SIToFP:
5717 case Instruction::UIToFP: {
5718 // Cannot produce nan
5719 Known.knownNot(fcNan);
5720
5721 // Integers cannot be subnormal
5722 Known.knownNot(fcSubnormal);
5723
5724 // sitofp and uitofp turn into +0.0 for zero.
5725 Known.knownNot(fcNegZero);
5726 if (Op->getOpcode() == Instruction::UIToFP)
5727 Known.signBitMustBeZero();
5728
5729 if (InterestedClasses & fcInf) {
5730 // Get width of largest magnitude integer (remove a bit if signed).
5731 // This still works for a signed minimum value because the largest FP
5732 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
5733 int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits();
5734 if (Op->getOpcode() == Instruction::SIToFP)
5735 --IntSize;
5736
5737 // If the exponent of the largest finite FP value can hold the largest
5738 // integer, the result of the cast must be finite.
5739 Type *FPTy = Op->getType()->getScalarType();
5740 if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize)
5741 Known.knownNot(fcInf);
5742 }
5743
5744 break;
5745 }
5746 case Instruction::ExtractElement: {
5747 // Look through extract element. If the index is non-constant or
5748 // out-of-range demand all elements, otherwise just the extracted element.
5749 const Value *Vec = Op->getOperand(0);
5750 const Value *Idx = Op->getOperand(1);
5751 auto *CIdx = dyn_cast<ConstantInt>(Idx);
5752
5753 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
5754 unsigned NumElts = VecTy->getNumElements();
5755 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
5756 if (CIdx && CIdx->getValue().ult(NumElts))
5757 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
5758 return computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known,
5759 Depth + 1, Q);
5760 }
5761
5762 break;
5763 }
5764 case Instruction::InsertElement: {
5765 if (isa<ScalableVectorType>(Op->getType()))
5766 return;
5767
5768 const Value *Vec = Op->getOperand(0);
5769 const Value *Elt = Op->getOperand(1);
5770 auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2));
5771 unsigned NumElts = DemandedElts.getBitWidth();
5772 APInt DemandedVecElts = DemandedElts;
5773 bool NeedsElt = true;
5774 // If we know the index we are inserting to, clear it from Vec check.
5775 if (CIdx && CIdx->getValue().ult(NumElts)) {
5776 DemandedVecElts.clearBit(CIdx->getZExtValue());
5777 NeedsElt = DemandedElts[CIdx->getZExtValue()];
5778 }
5779
5780 // Do we demand the inserted element?
5781 if (NeedsElt) {
5782 computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q);
5783 // If we don't know any bits, early out.
5784 if (Known.isUnknown())
5785 break;
5786 } else {
5787 Known.KnownFPClasses = fcNone;
5788 }
5789
5790 // Do we need anymore elements from Vec?
5791 if (!DemandedVecElts.isZero()) {
5792 KnownFPClass Known2;
5793 computeKnownFPClass(Vec, DemandedVecElts, InterestedClasses, Known2,
5794 Depth + 1, Q);
5795 Known |= Known2;
5796 }
5797
5798 break;
5799 }
5800 case Instruction::ShuffleVector: {
5801 // For undef elements, we don't know anything about the common state of
5802 // the shuffle result.
5803 APInt DemandedLHS, DemandedRHS;
5804 auto *Shuf = dyn_cast<ShuffleVectorInst>(Op);
5805 if (!Shuf || !getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
5806 return;
5807
5808 if (!!DemandedLHS) {
5809 const Value *LHS = Shuf->getOperand(0);
5810 computeKnownFPClass(LHS, DemandedLHS, InterestedClasses, Known,
5811 Depth + 1, Q);
5812
5813 // If we don't know any bits, early out.
5814 if (Known.isUnknown())
5815 break;
5816 } else {
5817 Known.KnownFPClasses = fcNone;
5818 }
5819
5820 if (!!DemandedRHS) {
5821 KnownFPClass Known2;
5822 const Value *RHS = Shuf->getOperand(1);
5823 computeKnownFPClass(RHS, DemandedRHS, InterestedClasses, Known2,
5824 Depth + 1, Q);
5825 Known |= Known2;
5826 }
5827
5828 break;
5829 }
5830 case Instruction::ExtractValue: {
5831 const ExtractValueInst *Extract = cast<ExtractValueInst>(Op);
5832 ArrayRef<unsigned> Indices = Extract->getIndices();
5833 const Value *Src = Extract->getAggregateOperand();
5834 if (isa<StructType>(Src->getType()) && Indices.size() == 1 &&
5835 Indices[0] == 0) {
5836 if (const auto *II = dyn_cast<IntrinsicInst>(Src)) {
5837 switch (II->getIntrinsicID()) {
5838 case Intrinsic::frexp: {
5839 Known.knownNot(fcSubnormal);
5840
5841 KnownFPClass KnownSrc;
5842 computeKnownFPClass(II->getArgOperand(0), DemandedElts,
5843 InterestedClasses, KnownSrc, Depth + 1, Q);
5844
5845 const Function *F = cast<Instruction>(Op)->getFunction();
5846
5847 if (KnownSrc.isKnownNever(fcNegative))
5848 Known.knownNot(fcNegative);
5849 else {
5850 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, Op->getType()))
5851 Known.knownNot(fcNegZero);
5852 if (KnownSrc.isKnownNever(fcNegInf))
5853 Known.knownNot(fcNegInf);
5854 }
5855
5856 if (KnownSrc.isKnownNever(fcPositive))
5857 Known.knownNot(fcPositive);
5858 else {
5859 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, Op->getType()))
5860 Known.knownNot(fcPosZero);
5861 if (KnownSrc.isKnownNever(fcPosInf))
5862 Known.knownNot(fcPosInf);
5863 }
5864
5865 Known.propagateNaN(KnownSrc);
5866 return;
5867 }
5868 default:
5869 break;
5870 }
5871 }
5872 }
5873
5874 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Depth + 1,
5875 Q);
5876 break;
5877 }
5878 case Instruction::PHI: {
5879 const PHINode *P = cast<PHINode>(Op);
5880 // Unreachable blocks may have zero-operand PHI nodes.
5881 if (P->getNumIncomingValues() == 0)
5882 break;
5883
5884 // Otherwise take the unions of the known bit sets of the operands,
5885 // taking conservative care to avoid excessive recursion.
5886 const unsigned PhiRecursionLimit = MaxAnalysisRecursionDepth - 2;
5887
5888 if (Depth < PhiRecursionLimit) {
5889 // Skip if every incoming value references to ourself.
5890 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
5891 break;
5892
5893 bool First = true;
5894
5895 for (const Use &U : P->operands()) {
5896 Value *IncValue = U.get();
5897 // Skip direct self references.
5898 if (IncValue == P)
5899 continue;
5900
5901 KnownFPClass KnownSrc;
5902 // Recurse, but cap the recursion to two levels, because we don't want
5903 // to waste time spinning around in loops. We need at least depth 2 to
5904 // detect known sign bits.
5905 computeKnownFPClass(IncValue, DemandedElts, InterestedClasses, KnownSrc,
5906 PhiRecursionLimit,
5908 P->getIncomingBlock(U)->getTerminator()));
5909
5910 if (First) {
5911 Known = KnownSrc;
5912 First = false;
5913 } else {
5914 Known |= KnownSrc;
5915 }
5916
5917 if (Known.KnownFPClasses == fcAllFlags)
5918 break;
5919 }
5920 }
5921
5922 break;
5923 }
5924 default:
5925 break;
5926 }
5927}
5928
5930 const APInt &DemandedElts,
5931 FPClassTest InterestedClasses,
5932 unsigned Depth,
5933 const SimplifyQuery &SQ) {
5934 KnownFPClass KnownClasses;
5935 ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth,
5936 SQ);
5937 return KnownClasses;
5938}
5939
5941 FPClassTest InterestedClasses,
5942 unsigned Depth,
5943 const SimplifyQuery &SQ) {
5944 KnownFPClass Known;
5945 ::computeKnownFPClass(V, Known, InterestedClasses, Depth, SQ);
5946 return Known;
5947}
5948
5950
5951 // All byte-wide stores are splatable, even of arbitrary variables.
5952 if (V->getType()->isIntegerTy(8))
5953 return V;
5954
5955 LLVMContext &Ctx = V->getContext();
5956
5957 // Undef don't care.
5958 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
5959 if (isa<UndefValue>(V))
5960 return UndefInt8;
5961
5962 // Return Undef for zero-sized type.
5963 if (DL.getTypeStoreSize(V->getType()).isZero())
5964 return UndefInt8;
5965
5966 Constant *C = dyn_cast<Constant>(V);
5967 if (!C) {
5968 // Conceptually, we could handle things like:
5969 // %a = zext i8 %X to i16
5970 // %b = shl i16 %a, 8
5971 // %c = or i16 %a, %b
5972 // but until there is an example that actually needs this, it doesn't seem
5973 // worth worrying about.
5974 return nullptr;
5975 }
5976
5977 // Handle 'null' ConstantArrayZero etc.
5978 if (C->isNullValue())
5980
5981 // Constant floating-point values can be handled as integer values if the
5982 // corresponding integer value is "byteable". An important case is 0.0.
5983 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
5984 Type *Ty = nullptr;
5985 if (CFP->getType()->isHalfTy())
5986 Ty = Type::getInt16Ty(Ctx);
5987 else if (CFP->getType()->isFloatTy())
5988 Ty = Type::getInt32Ty(Ctx);
5989 else if (CFP->getType()->isDoubleTy())
5990 Ty = Type::getInt64Ty(Ctx);
5991 // Don't handle long double formats, which have strange constraints.
5992 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
5993 : nullptr;
5994 }
5995
5996 // We can handle constant integers that are multiple of 8 bits.
5997 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
5998 if (CI->getBitWidth() % 8 == 0) {
5999 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
6000 if (!CI->getValue().isSplat(8))
6001 return nullptr;
6002 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
6003 }
6004 }
6005
6006 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
6007 if (CE->getOpcode() == Instruction::IntToPtr) {
6008 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
6009 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
6011 CE->getOperand(0), Type::getIntNTy(Ctx, BitWidth), false, DL))
6012 return isBytewiseValue(Op, DL);
6013 }
6014 }
6015 }
6016
6017 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
6018 if (LHS == RHS)
6019 return LHS;
6020 if (!LHS || !RHS)
6021 return nullptr;
6022 if (LHS == UndefInt8)
6023 return RHS;
6024 if (RHS == UndefInt8)
6025 return LHS;
6026 return nullptr;
6027 };
6028
6029 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
6030 Value *Val = UndefInt8;
6031 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
6032 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
6033 return nullptr;
6034 return Val;
6035 }
6036
6037 if (isa<ConstantAggregate>(C)) {
6038 Value *Val = UndefInt8;
6039 for (Value *Op : C->operands())
6040 if (!(Val = Merge(Val, isBytewiseValue(Op, DL))))
6041 return nullptr;
6042 return Val;
6043 }
6044
6045 // Don't try to handle the handful of other constants.
6046 return nullptr;
6047}
6048
6049// This is the recursive version of BuildSubAggregate. It takes a few different
6050// arguments. Idxs is the index within the nested struct From that we are
6051// looking at now (which is of type IndexedType). IdxSkip is the number of
6052// indices from Idxs that should be left out when inserting into the resulting
6053// struct. To is the result struct built so far, new insertvalue instructions
6054// build on that.
6055static Value *BuildSubAggregate(Value *From, Value *To, Type *IndexedType,
6057 unsigned IdxSkip,
6058 BasicBlock::iterator InsertBefore) {
6059 StructType *STy = dyn_cast<StructType>(IndexedType);
6060 if (STy) {
6061 // Save the original To argument so we can modify it
6062 Value *OrigTo = To;
6063 // General case, the type indexed by Idxs is a struct
6064 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
6065 // Process each struct element recursively
6066 Idxs.push_back(i);
6067 Value *PrevTo = To;
6068 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
6069 InsertBefore);
6070 Idxs.pop_back();
6071 if (!To) {
6072 // Couldn't find any inserted value for this index? Cleanup
6073 while (PrevTo != OrigTo) {
6074 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
6075 PrevTo = Del->getAggregateOperand();
6076 Del->eraseFromParent();
6077 }
6078 // Stop processing elements
6079 break;
6080 }
6081 }
6082 // If we successfully found a value for each of our subaggregates
6083 if (To)
6084 return To;
6085 }
6086 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
6087 // the struct's elements had a value that was inserted directly. In the latter
6088 // case, perhaps we can't determine each of the subelements individually, but
6089 // we might be able to find the complete struct somewhere.
6090
6091 // Find the value that is at that particular spot
6092 Value *V = FindInsertedValue(From, Idxs);
6093
6094 if (!V)
6095 return nullptr;
6096
6097 // Insert the value in the new (sub) aggregate
6098 return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp",
6099 InsertBefore);
6100}
6101
6102// This helper takes a nested struct and extracts a part of it (which is again a
6103// struct) into a new value. For example, given the struct:
6104// { a, { b, { c, d }, e } }
6105// and the indices "1, 1" this returns
6106// { c, d }.
6107//
6108// It does this by inserting an insertvalue for each element in the resulting
6109// struct, as opposed to just inserting a single struct. This will only work if
6110// each of the elements of the substruct are known (ie, inserted into From by an
6111// insertvalue instruction somewhere).
6112//
6113// All inserted insertvalue instructions are inserted before InsertBefore
6115 BasicBlock::iterator InsertBefore) {
6116 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
6117 idx_range);
6118 Value *To = PoisonValue::get(IndexedType);
6119 SmallVector<unsigned, 10> Idxs(idx_range);
6120 unsigned IdxSkip = Idxs.size();
6121
6122 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
6123}
6124
6125/// Given an aggregate and a sequence of indices, see if the scalar value
6126/// indexed is already around as a register, for example if it was inserted
6127/// directly into the aggregate.
6128///
6129/// If InsertBefore is not null, this function will duplicate (modified)
6130/// insertvalues when a part of a nested struct is extracted.
6131Value *
6133 std::optional<BasicBlock::iterator> InsertBefore) {
6134 // Nothing to index? Just return V then (this is useful at the end of our
6135 // recursion).
6136 if (idx_range.empty())
6137 return V;
6138 // We have indices, so V should have an indexable type.
6139 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
6140 "Not looking at a struct or array?");
6141 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
6142 "Invalid indices for type?");
6143
6144 if (Constant *C = dyn_cast<Constant>(V)) {
6145 C = C->getAggregateElement(idx_range[0]);
6146 if (!C) return nullptr;
6147 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
6148 }
6149
6150 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
6151 // Loop the indices for the insertvalue instruction in parallel with the
6152 // requested indices
6153 const unsigned *req_idx = idx_range.begin();
6154 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
6155 i != e; ++i, ++req_idx) {
6156 if (req_idx == idx_range.end()) {
6157 // We can't handle this without inserting insertvalues
6158 if (!InsertBefore)
6159 return nullptr;
6160
6161 // The requested index identifies a part of a nested aggregate. Handle
6162 // this specially. For example,
6163 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
6164 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
6165 // %C = extractvalue {i32, { i32, i32 } } %B, 1
6166 // This can be changed into
6167 // %A = insertvalue {i32, i32 } undef, i32 10, 0
6168 // %C = insertvalue {i32, i32 } %A, i32 11, 1
6169 // which allows the unused 0,0 element from the nested struct to be
6170 // removed.
6171 return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx),
6172 *InsertBefore);
6173 }
6174
6175 // This insert value inserts something else than what we are looking for.
6176 // See if the (aggregate) value inserted into has the value we are
6177 // looking for, then.
6178 if (*req_idx != *i)
6179 return FindInsertedValue(I->getAggregateOperand(), idx_range,
6180 InsertBefore);
6181 }
6182 // If we end up here, the indices of the insertvalue match with those
6183 // requested (though possibly only partially). Now we recursively look at
6184 // the inserted value, passing any remaining indices.
6185 return FindInsertedValue(I->getInsertedValueOperand(),
6186 ArrayRef(req_idx, idx_range.end()), InsertBefore);
6187 }
6188
6189 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
6190 // If we're extracting a value from an aggregate that was extracted from
6191 // something else, we can extract from that something else directly instead.
6192 // However, we will need to chain I's indices with the requested indices.
6193
6194 // Calculate the number of indices required
6195 unsigned size = I->getNumIndices() + idx_range.size();
6196 // Allocate some space to put the new indices in
6198 Idxs.reserve(size);
6199 // Add indices from the extract value instruction
6200 Idxs.append(I->idx_begin(), I->idx_end());
6201
6202 // Add requested indices
6203 Idxs.append(idx_range.begin(), idx_range.end());
6204
6205 assert(Idxs.size() == size
6206 && "Number of indices added not correct?");
6207
6208 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
6209 }
6210 // Otherwise, we don't know (such as, extracting from a function return value
6211 // or load instruction)
6212 return nullptr;
6213}
6214
6216 unsigned CharSize) {
6217 // Make sure the GEP has exactly three arguments.
6218 if (GEP->getNumOperands() != 3)
6219 return false;
6220
6221 // Make sure the index-ee is a pointer to array of \p CharSize integers.
6222 // CharSize.
6223 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
6224 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
6225 return false;
6226
6227 // Check to make sure that the first operand of the GEP is an integer and
6228 // has value 0 so that we are sure we're indexing into the initializer.
6229 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
6230 if (!FirstIdx || !FirstIdx->isZero())
6231 return false;
6232
6233 return true;
6234}
6235
6236// If V refers to an initialized global constant, set Slice either to
6237// its initializer if the size of its elements equals ElementSize, or,
6238// for ElementSize == 8, to its representation as an array of unsiged
6239// char. Return true on success.
6240// Offset is in the unit "nr of ElementSize sized elements".
6243 unsigned ElementSize, uint64_t Offset) {
6244 assert(V && "V should not be null.");
6245 assert((ElementSize % 8) == 0 &&
6246 "ElementSize expected to be a multiple of the size of a byte.");
6247 unsigned ElementSizeInBytes = ElementSize / 8;
6248
6249 // Drill down into the pointer expression V, ignoring any intervening
6250 // casts, and determine the identity of the object it references along
6251 // with the cumulative byte offset into it.
6252 const GlobalVariable *GV =
6253 dyn_cast<GlobalVariable>(getUnderlyingObject(V));
6254 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
6255 // Fail if V is not based on constant global object.
6256 return false;
6257
6258 const DataLayout &DL = GV->getDataLayout();
6259 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0);
6260
6261 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off,
6262 /*AllowNonInbounds*/ true))
6263 // Fail if a constant offset could not be determined.
6264 return false;
6265
6266 uint64_t StartIdx = Off.getLimitedValue();
6267 if (StartIdx == UINT64_MAX)
6268 // Fail if the constant offset is excessive.
6269 return false;
6270
6271 // Off/StartIdx is in the unit of bytes. So we need to convert to number of
6272 // elements. Simply bail out if that isn't possible.
6273 if ((StartIdx % ElementSizeInBytes) != 0)
6274 return false;
6275
6276 Offset += StartIdx / ElementSizeInBytes;
6277 ConstantDataArray *Array = nullptr;
6278 ArrayType *ArrayTy = nullptr;
6279
6280 if (GV->getInitializer()->isNullValue()) {
6281 Type *GVTy = GV->getValueType();
6282 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue();
6283 uint64_t Length = SizeInBytes / ElementSizeInBytes;
6284
6285 Slice.Array = nullptr;
6286 Slice.Offset = 0;
6287 // Return an empty Slice for undersized constants to let callers
6288 // transform even undefined library calls into simpler, well-defined
6289 // expressions. This is preferable to making the calls although it
6290 // prevents sanitizers from detecting such calls.
6291 Slice.Length = Length < Offset ? 0 : Length - Offset;
6292 return true;
6293 }
6294
6295 auto *Init = const_cast<Constant *>(GV->getInitializer());
6296 if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) {
6297 Type *InitElTy = ArrayInit->getElementType();
6298 if (InitElTy->isIntegerTy(ElementSize)) {
6299 // If Init is an initializer for an array of the expected type
6300 // and size, use it as is.
6301 Array = ArrayInit;
6302 ArrayTy = ArrayInit->getType();
6303 }
6304 }
6305
6306 if (!Array) {
6307 if (ElementSize != 8)
6308 // TODO: Handle conversions to larger integral types.
6309 return false;
6310
6311 // Otherwise extract the portion of the initializer starting
6312 // at Offset as an array of bytes, and reset Offset.
6314 if (!Init)
6315 return false;
6316
6317 Offset = 0;
6318 Array = dyn_cast<ConstantDataArray>(Init);
6319 ArrayTy = dyn_cast<ArrayType>(Init->getType());
6320 }
6321
6322 uint64_t NumElts = ArrayTy->getArrayNumElements();
6323 if (Offset > NumElts)
6324 return false;
6325
6326 Slice.Array = Array;
6327 Slice.Offset = Offset;
6328 Slice.Length = NumElts - Offset;
6329 return true;
6330}
6331
6332/// Extract bytes from the initializer of the constant array V, which need
6333/// not be a nul-terminated string. On success, store the bytes in Str and
6334/// return true. When TrimAtNul is set, Str will contain only the bytes up
6335/// to but not including the first nul. Return false on failure.
6337 bool TrimAtNul) {
6339 if (!getConstantDataArrayInfo(V, Slice, 8))
6340 return false;
6341
6342 if (Slice.Array == nullptr) {
6343 if (TrimAtNul) {
6344 // Return a nul-terminated string even for an empty Slice. This is
6345 // safe because all existing SimplifyLibcalls callers require string
6346 // arguments and the behavior of the functions they fold is undefined
6347 // otherwise. Folding the calls this way is preferable to making
6348 // the undefined library calls, even though it prevents sanitizers
6349 // from reporting such calls.
6350 Str = StringRef();
6351 return true;
6352 }
6353 if (Slice.Length == 1) {
6354 Str = StringRef("", 1);
6355 return true;
6356 }
6357 // We cannot instantiate a StringRef as we do not have an appropriate string
6358 // of 0s at hand.
6359 return false;
6360 }
6361
6362 // Start out with the entire array in the StringRef.
6363 Str = Slice.Array->getAsString();
6364 // Skip over 'offset' bytes.
6365 Str = Str.substr(Slice.Offset);
6366
6367 if (TrimAtNul) {
6368 // Trim off the \0 and anything after it. If the array is not nul
6369 // terminated, we just return the whole end of string. The client may know
6370 // some other way that the string is length-bound.
6371 Str = Str.substr(0, Str.find('\0'));
6372 }
6373 return true;
6374}
6375
6376// These next two are very similar to the above, but also look through PHI
6377// nodes.
6378// TODO: See if we can integrate these two together.
6379
6380/// If we can compute the length of the string pointed to by
6381/// the specified pointer, return 'len+1'. If we can't, return 0.
6384 unsigned CharSize) {
6385 // Look through noop bitcast instructions.
6386 V = V->stripPointerCasts();
6387
6388 // If this is a PHI node, there are two cases: either we have already seen it
6389 // or we haven't.
6390 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
6391 if (!PHIs.insert(PN).second)
6392 return ~0ULL; // already in the set.
6393
6394 // If it was new, see if all the input strings are the same length.
6395 uint64_t LenSoFar = ~0ULL;
6396 for (Value *IncValue : PN->incoming_values()) {
6397 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
6398 if (Len == 0) return 0; // Unknown length -> unknown.
6399
6400 if (Len == ~0ULL) continue;
6401
6402 if (Len != LenSoFar && LenSoFar != ~0ULL)
6403 return 0; // Disagree -> unknown.
6404 LenSoFar = Len;
6405 }
6406
6407 // Success, all agree.
6408 return LenSoFar;
6409 }
6410
6411 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
6412 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
6413 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
6414 if (Len1 == 0) return 0;
6415 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
6416 if (Len2 == 0) return 0;
6417 if (Len1 == ~0ULL) return Len2;
6418 if (Len2 == ~0ULL) return Len1;
6419 if (Len1 != Len2) return 0;
6420 return Len1;
6421 }
6422
6423 // Otherwise, see if we can read the string.
6425 if (!getConstantDataArrayInfo(V, Slice, CharSize))
6426 return 0;
6427
6428 if (Slice.Array == nullptr)
6429 // Zeroinitializer (including an empty one).
6430 return 1;
6431
6432 // Search for the first nul character. Return a conservative result even
6433 // when there is no nul. This is safe since otherwise the string function
6434 // being folded such as strlen is undefined, and can be preferable to
6435 // making the undefined library call.
6436 unsigned NullIndex = 0;
6437 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
6438 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
6439 break;
6440 }
6441
6442 return NullIndex + 1;
6443}
6444
6445/// If we can compute the length of the string pointed to by
6446/// the specified pointer, return 'len+1'. If we can't, return 0.
6447uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
6448 if (!V->getType()->isPointerTy())
6449 return 0;
6450
6452 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
6453 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
6454 // an empty string as a length.
6455 return Len == ~0ULL ? 1 : Len;
6456}
6457
6458const Value *
6460 bool MustPreserveNullness) {
6461 assert(Call &&
6462 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
6463 if (const Value *RV = Call->getReturnedArgOperand())
6464 return RV;
6465 // This can be used only as a aliasing property.
6467 Call, MustPreserveNullness))
6468 return Call->getArgOperand(0);
6469 return nullptr;
6470}
6471
6473 const CallBase *Call, bool MustPreserveNullness) {
6474 switch (Call->getIntrinsicID()) {
6475 case Intrinsic::launder_invariant_group:
6476 case Intrinsic::strip_invariant_group:
6477 case Intrinsic::aarch64_irg:
6478 case Intrinsic::aarch64_tagp:
6479 // The amdgcn_make_buffer_rsrc function does not alter the address of the
6480 // input pointer (and thus preserve null-ness for the purposes of escape
6481 // analysis, which is where the MustPreserveNullness flag comes in to play).
6482 // However, it will not necessarily map ptr addrspace(N) null to ptr
6483 // addrspace(8) null, aka the "null descriptor", which has "all loads return
6484 // 0, all stores are dropped" semantics. Given the context of this intrinsic
6485 // list, no one should be relying on such a strict interpretation of
6486 // MustPreserveNullness (and, at time of writing, they are not), but we
6487 // document this fact out of an abundance of caution.
6488 case Intrinsic::amdgcn_make_buffer_rsrc:
6489 return true;
6490 case Intrinsic::ptrmask:
6491 return !MustPreserveNullness;
6492 case Intrinsic::threadlocal_address:
6493 // The underlying variable changes with thread ID. The Thread ID may change
6494 // at coroutine suspend points.
6495 return !Call->getParent()->getParent()->isPresplitCoroutine();
6496 default:
6497 return false;
6498 }
6499}
6500
6501/// \p PN defines a loop-variant pointer to an object. Check if the
6502/// previous iteration of the loop was referring to the same object as \p PN.
6504 const LoopInfo *LI) {
6505 // Find the loop-defined value.
6506 Loop *L = LI->getLoopFor(PN->getParent());
6507 if (PN->getNumIncomingValues() != 2)
6508 return true;
6509
6510 // Find the value from previous iteration.
6511 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
6512 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
6513 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
6514 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
6515 return true;
6516
6517 // If a new pointer is loaded in the loop, the pointer references a different
6518 // object in every iteration. E.g.:
6519 // for (i)
6520 // int *p = a[i];
6521 // ...
6522 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
6523 if (!L->isLoopInvariant(Load->getPointerOperand()))
6524 return false;
6525 return true;
6526}
6527
6528const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
6529 if (!V->getType()->isPointerTy())
6530 return V;
6531 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
6532 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
6533 V = GEP->getPointerOperand();
6534 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
6535 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
6536 Value *NewV = cast<Operator>(V)->getOperand(0);
6537 if (!NewV->getType()->isPointerTy())
6538 return V;
6539 V = NewV;
6540 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
6541 if (GA->isInterposable())
6542 return V;
6543 V = GA->getAliasee();
6544 } else {
6545 if (auto *PHI = dyn_cast<PHINode>(V)) {
6546 // Look through single-arg phi nodes created by LCSSA.
6547 if (PHI->getNumIncomingValues() == 1) {
6548 V = PHI->getIncomingValue(0);
6549 continue;
6550 }
6551 } else if (auto *Call = dyn_cast<CallBase>(V)) {
6552 // CaptureTracking can know about special capturing properties of some
6553 // intrinsics like launder.invariant.group, that can't be expressed with
6554 // the attributes, but have properties like returning aliasing pointer.
6555 // Because some analysis may assume that nocaptured pointer is not
6556 // returned from some special intrinsic (because function would have to
6557 // be marked with returns attribute), it is crucial to use this function
6558 // because it should be in sync with CaptureTracking. Not using it may
6559 // cause weird miscompilations where 2 aliasing pointers are assumed to
6560 // noalias.
6561 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
6562 V = RP;
6563 continue;
6564 }
6565 }
6566
6567 return V;
6568 }
6569 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
6570 }
6571 return V;
6572}
6573
6576 const LoopInfo *LI, unsigned MaxLookup) {
6579 Worklist.push_back(V);
6580 do {
6581 const Value *P = Worklist.pop_back_val();
6582 P = getUnderlyingObject(P, MaxLookup);
6583
6584 if (!Visited.insert(P).second)
6585 continue;
6586
6587 if (auto *SI = dyn_cast<SelectInst>(P)) {
6588 Worklist.push_back(SI->getTrueValue());
6589 Worklist.push_back(SI->getFalseValue());
6590 continue;
6591 }
6592
6593 if (auto *PN = dyn_cast<PHINode>(P)) {
6594 // If this PHI changes the underlying object in every iteration of the
6595 // loop, don't look through it. Consider:
6596 // int **A;
6597 // for (i) {
6598 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
6599 // Curr = A[i];
6600 // *Prev, *Curr;
6601 //
6602 // Prev is tracking Curr one iteration behind so they refer to different
6603 // underlying objects.
6604 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
6606 append_range(Worklist, PN->incoming_values());
6607 else
6608 Objects.push_back(P);
6609 continue;
6610 }
6611
6612 Objects.push_back(P);
6613 } while (!Worklist.empty());
6614}
6615
6617 const unsigned MaxVisited = 8;
6618
6621 Worklist.push_back(V);
6622 const Value *Object = nullptr;
6623 // Used as fallback if we can't find a common underlying object through
6624 // recursion.
6625 bool First = true;
6626 const Value *FirstObject = getUnderlyingObject(V);
6627 do {
6628 const Value *P = Worklist.pop_back_val();
6629 P = First ? FirstObject : getUnderlyingObject(P);
6630 First = false;
6631
6632 if (!Visited.insert(P).second)
6633 continue;
6634
6635 if (Visited.size() == MaxVisited)
6636 return FirstObject;
6637
6638 if (auto *SI = dyn_cast<SelectInst>(P)) {
6639 Worklist.push_back(SI->getTrueValue());
6640 Worklist.push_back(SI->getFalseValue());
6641 continue;
6642 }
6643
6644 if (auto *PN = dyn_cast<PHINode>(P)) {
6645 append_range(Worklist, PN->incoming_values());
6646 continue;
6647 }
6648
6649 if (!Object)
6650 Object = P;
6651 else if (Object != P)
6652 return FirstObject;
6653 } while (!Worklist.empty());
6654
6655 return Object;
6656}
6657
6658/// This is the function that does the work of looking through basic
6659/// ptrtoint+arithmetic+inttoptr sequences.
6660static const Value *getUnderlyingObjectFromInt(const Value *V) {
6661 do {
6662 if (const Operator *U = dyn_cast<Operator>(V)) {
6663 // If we find a ptrtoint, we can transfer control back to the
6664 // regular getUnderlyingObjectFromInt.
6665 if (U->getOpcode() == Instruction::PtrToInt)
6666 return U->getOperand(0);
6667 // If we find an add of a constant, a multiplied value, or a phi, it's
6668 // likely that the other operand will lead us to the base
6669 // object. We don't have to worry about the case where the
6670 // object address is somehow being computed by the multiply,
6671 // because our callers only care when the result is an
6672 // identifiable object.
6673 if (U->getOpcode() != Instruction::Add ||
6674 (!isa<ConstantInt>(U->getOperand(1)) &&
6675 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
6676 !isa<PHINode>(U->getOperand(1))))
6677 return V;
6678 V = U->getOperand(0);
6679 } else {
6680 return V;
6681 }
6682 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
6683 } while (true);
6684}
6685
6686/// This is a wrapper around getUnderlyingObjects and adds support for basic
6687/// ptrtoint+arithmetic+inttoptr sequences.
6688/// It returns false if unidentified object is found in getUnderlyingObjects.
6690 SmallVectorImpl<Value *> &Objects) {
6692 SmallVector<const Value *, 4> Working(1, V);
6693 do {
6694 V = Working.pop_back_val();
6695
6697 getUnderlyingObjects(V, Objs);
6698
6699 for (const Value *V : Objs) {
6700 if (!Visited.insert(V).second)
6701 continue;
6702 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
6703 const Value *O =
6704 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
6705 if (O->getType()->isPointerTy()) {
6706 Working.push_back(O);
6707 continue;
6708 }
6709 }
6710 // If getUnderlyingObjects fails to find an identifiable object,
6711 // getUnderlyingObjectsForCodeGen also fails for safety.
6712 if (!isIdentifiedObject(V)) {
6713 Objects.clear();
6714 return false;
6715 }
6716 Objects.push_back(const_cast<Value *>(V));
6717 }
6718 } while (!Working.empty());
6719 return true;
6720}
6721
6723 AllocaInst *Result = nullptr;
6725 SmallVector<Value *, 4> Worklist;
6726
6727 auto AddWork = [&](Value *V) {
6728 if (Visited.insert(V).second)
6729 Worklist.push_back(V);
6730 };
6731
6732 AddWork(V);
6733 do {
6734 V = Worklist.pop_back_val();
6735 assert(Visited.count(V));
6736
6737 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
6738 if (Result && Result != AI)
6739 return nullptr;
6740 Result = AI;
6741 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
6742 AddWork(CI->getOperand(0));
6743 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
6744 for (Value *IncValue : PN->incoming_values())
6745 AddWork(IncValue);
6746 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
6747 AddWork(SI->getTrueValue());
6748 AddWork(SI->getFalseValue());
6749 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
6750 if (OffsetZero && !GEP->hasAllZeroIndices())
6751 return nullptr;
6752 AddWork(GEP->getPointerOperand());
6753 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
6754 Value *Returned = CB->getReturnedArgOperand();
6755 if (Returned)
6756 AddWork(Returned);
6757 else
6758 return nullptr;
6759 } else {
6760 return nullptr;
6761 }
6762 } while (!Worklist.empty());
6763
6764 return Result;
6765}
6766
6768 const Value *V, bool AllowLifetime, bool AllowDroppable) {
6769 for (const User *U : V->users()) {
6770 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
6771 if (!II)
6772 return false;
6773
6774 if (AllowLifetime && II->isLifetimeStartOrEnd())
6775 continue;
6776
6777 if (AllowDroppable && II->isDroppable())
6778 continue;
6779
6780 return false;
6781 }
6782 return true;
6783}
6784
6787 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
6788}
6791 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
6792}
6793
6795 const Instruction *CtxI,
6796 AssumptionCache *AC,
6797 const DominatorTree *DT,
6798 const TargetLibraryInfo *TLI,
6799 bool UseVariableInfo) {
6800 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI,
6801 AC, DT, TLI, UseVariableInfo);
6802}
6803
6805 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
6806 AssumptionCache *AC, const DominatorTree *DT, const TargetLibraryInfo *TLI,
6807 bool UseVariableInfo) {
6808#ifndef NDEBUG
6809 if (Inst->getOpcode() != Opcode) {
6810 // Check that the operands are actually compatible with the Opcode override.
6811 auto hasEqualReturnAndLeadingOperandTypes =
6812 [](const Instruction *Inst, unsigned NumLeadingOperands) {
6813 if (Inst->getNumOperands() < NumLeadingOperands)
6814 return false;
6815 const Type *ExpectedType = Inst->getType();
6816 for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
6817 if (Inst->getOperand(ItOp)->getType() != ExpectedType)
6818 return false;
6819 return true;
6820 };
6822 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
6823 assert(!Instruction::isUnaryOp(Opcode) ||
6824 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
6825 }
6826#endif
6827
6828 switch (Opcode) {
6829 default:
6830 return true;
6831 case Instruction::UDiv:
6832 case Instruction::URem: {
6833 // x / y is undefined if y == 0.
6834 const APInt *V;
6835 if (match(Inst->getOperand(1), m_APInt(V)))
6836 return *V != 0;
6837 return false;
6838 }
6839 case Instruction::SDiv:
6840 case Instruction::SRem: {
6841 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
6842 const APInt *Numerator, *Denominator;
6843 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
6844 return false;
6845 // We cannot hoist this division if the denominator is 0.
6846 if (*Denominator == 0)
6847 return false;
6848 // It's safe to hoist if the denominator is not 0 or -1.
6849 if (!Denominator->isAllOnes())
6850 return true;
6851 // At this point we know that the denominator is -1. It is safe to hoist as
6852 // long we know that the numerator is not INT_MIN.
6853 if (match(Inst->getOperand(0), m_APInt(Numerator)))
6854 return !Numerator->isMinSignedValue();
6855 // The numerator *might* be MinSignedValue.
6856 return false;
6857 }
6858 case Instruction::Load: {
6859 if (!UseVariableInfo)
6860 return false;
6861
6862 const LoadInst *LI = dyn_cast<LoadInst>(Inst);
6863 if (!LI)
6864 return false;
6865 if (mustSuppressSpeculation(*LI))
6866 return false;
6867 const DataLayout &DL = LI->getDataLayout();
6869 LI->getType(), LI->getAlign(), DL,
6870 CtxI, AC, DT, TLI);
6871 }
6872 case Instruction::Call: {
6873 auto *CI = dyn_cast<const CallInst>(Inst);
6874 if (!CI)
6875 return false;
6876 const Function *Callee = CI->getCalledFunction();
6877
6878 // The called function could have undefined behavior or side-effects, even
6879 // if marked readnone nounwind.
6880 return Callee && Callee->isSpeculatable();
6881 }
6882 case Instruction::VAArg:
6883 case Instruction::Alloca:
6884 case Instruction::Invoke:
6885 case Instruction::CallBr:
6886 case Instruction::PHI:
6887 case Instruction::Store:
6888 case Instruction::Ret:
6889 case Instruction::Br:
6890 case Instruction::IndirectBr:
6891 case Instruction::Switch:
6892 case Instruction::Unreachable:
6893 case Instruction::Fence:
6894 case Instruction::AtomicRMW:
6895 case Instruction::AtomicCmpXchg:
6896 case Instruction::LandingPad:
6897 case Instruction::Resume:
6898 case Instruction::CatchSwitch:
6899 case Instruction::CatchPad:
6900 case Instruction::CatchRet:
6901 case Instruction::CleanupPad:
6902 case Instruction::CleanupRet:
6903 return false; // Misc instructions which have effects
6904 }
6905}
6906
6908 if (I.mayReadOrWriteMemory())
6909 // Memory dependency possible
6910 return true;
6912 // Can't move above a maythrow call or infinite loop. Or if an
6913 // inalloca alloca, above a stacksave call.
6914 return true;
6916 // 1) Can't reorder two inf-loop calls, even if readonly
6917 // 2) Also can't reorder an inf-loop call below a instruction which isn't
6918 // safe to speculative execute. (Inverse of above)
6919 return true;
6920 return false;
6921}
6922
6923/// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
6925 switch (OR) {
6934 }
6935 llvm_unreachable("Unknown OverflowResult");
6936}
6937
6938/// Combine constant ranges from computeConstantRange() and computeKnownBits().
6941 bool ForSigned,
6942 const SimplifyQuery &SQ) {
6943 ConstantRange CR1 =
6944 ConstantRange::fromKnownBits(V.getKnownBits(SQ), ForSigned);
6945 ConstantRange CR2 = computeConstantRange(V, ForSigned, SQ.IIQ.UseInstrInfo);
6948 return CR1.intersectWith(CR2, RangeType);
6949}
6950
6952 const Value *RHS,
6953 const SimplifyQuery &SQ,
6954 bool IsNSW) {
6955 KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, SQ);
6956 KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, SQ);
6957
6958 // mul nsw of two non-negative numbers is also nuw.
6959 if (IsNSW && LHSKnown.isNonNegative() && RHSKnown.isNonNegative())
6961
6962 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
6963 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
6964 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
6965}
6966
6968 const Value *RHS,
6969 const SimplifyQuery &SQ) {
6970 // Multiplying n * m significant bits yields a result of n + m significant
6971 // bits. If the total number of significant bits does not exceed the
6972 // result bit width (minus 1), there is no overflow.
6973 // This means if we have enough leading sign bits in the operands
6974 // we can guarantee that the result does not overflow.
6975 // Ref: "Hacker's Delight" by Henry Warren
6976 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
6977
6978 // Note that underestimating the number of sign bits gives a more
6979 // conservative answer.
6980 unsigned SignBits =
6982
6983 // First handle the easy case: if we have enough sign bits there's
6984 // definitely no overflow.
6985 if (SignBits > BitWidth + 1)
6987
6988 // There are two ambiguous cases where there can be no overflow:
6989 // SignBits == BitWidth + 1 and
6990 // SignBits == BitWidth
6991 // The second case is difficult to check, therefore we only handle the
6992 // first case.
6993 if (SignBits == BitWidth + 1) {
6994 // It overflows only when both arguments are negative and the true
6995 // product is exactly the minimum negative number.
6996 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
6997 // For simplicity we just check if at least one side is not negative.
6998 KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, SQ);
6999 KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, SQ);
7000 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
7002 }
7004}
7005
7008 const WithCache<const Value *> &RHS,
7009 const SimplifyQuery &SQ) {
7010 ConstantRange LHSRange =
7011 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/false, SQ);
7012 ConstantRange RHSRange =
7013 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/false, SQ);
7014 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
7015}
7016
7017static OverflowResult
7019 const WithCache<const Value *> &RHS,
7020 const AddOperator *Add, const SimplifyQuery &SQ) {
7021 if (Add && Add->hasNoSignedWrap()) {
7023 }
7024
7025 // If LHS and RHS each have at least two sign bits, the addition will look
7026 // like
7027 //
7028 // XX..... +
7029 // YY.....
7030 //
7031 // If the carry into the most significant position is 0, X and Y can't both
7032 // be 1 and therefore the carry out of the addition is also 0.
7033 //
7034 // If the carry into the most significant position is 1, X and Y can't both
7035 // be 0 and therefore the carry out of the addition is also 1.
7036 //
7037 // Since the carry into the most significant position is always equal to
7038 // the carry out of the addition, there is no signed overflow.
7039 if (::ComputeNumSignBits(LHS, 0, SQ) > 1 &&
7040 ::ComputeNumSignBits(RHS, 0, SQ) > 1)
7042
7043 ConstantRange LHSRange =
7044 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/true, SQ);
7045 ConstantRange RHSRange =
7046 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/true, SQ);
7047 OverflowResult OR =
7048 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
7050 return OR;
7051
7052 // The remaining code needs Add to be available. Early returns if not so.
7053 if (!Add)
7055
7056 // If the sign of Add is the same as at least one of the operands, this add
7057 // CANNOT overflow. If this can be determined from the known bits of the
7058 // operands the above signedAddMayOverflow() check will have already done so.
7059 // The only other way to improve on the known bits is from an assumption, so
7060 // call computeKnownBitsFromContext() directly.
7061 bool LHSOrRHSKnownNonNegative =
7062 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
7063 bool LHSOrRHSKnownNegative =
7064 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
7065 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
7066 KnownBits AddKnown(LHSRange.getBitWidth());
7067 computeKnownBitsFromContext(Add, AddKnown, /*Depth=*/0, SQ);
7068 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
7069 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
7071 }
7072
7074}
7075
7077 const Value *RHS,
7078 const SimplifyQuery &SQ) {
7079 // X - (X % ?)
7080 // The remainder of a value can't have greater magnitude than itself,
7081 // so the subtraction can't overflow.
7082
7083 // X - (X -nuw ?)
7084 // In the minimal case, this would simplify to "?", so there's no subtract
7085 // at all. But if this analysis is used to peek through casts, for example,
7086 // then determining no-overflow may allow other transforms.
7087
7088 // TODO: There are other patterns like this.
7089 // See simplifyICmpWithBinOpOnLHS() for candidates.
7090 if (match(RHS, m_URem(m_Specific(LHS), m_Value())) ||
7092 if (isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT))
7094
7096 SQ.DL)) {
7097 if (*C)
7100 }
7101
7102 ConstantRange LHSRange =
7103 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/false, SQ);
7104 ConstantRange RHSRange =
7105 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/false, SQ);
7106 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
7107}
7108
7110 const Value *RHS,
7111 const SimplifyQuery &SQ) {
7112 // X - (X % ?)
7113 // The remainder of a value can't have greater magnitude than itself,
7114 // so the subtraction can't overflow.
7115
7116 // X - (X -nsw ?)
7117 // In the minimal case, this would simplify to "?", so there's no subtract
7118 // at all. But if this analysis is used to peek through casts, for example,
7119 // then determining no-overflow may allow other transforms.
7120 if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) ||
7122 if (isGuaranteedNotToBeUndef(LHS, SQ.AC, SQ.CxtI, SQ.DT))
7124
7125 // If LHS and RHS each have at least two sign bits, the subtraction
7126 // cannot overflow.
7127 if (::ComputeNumSignBits(LHS, 0, SQ) > 1 &&
7128 ::ComputeNumSignBits(RHS, 0, SQ) > 1)
7130
7131 ConstantRange LHSRange =
7132 computeConstantRangeIncludingKnownBits(LHS, /*ForSigned=*/true, SQ);
7133 ConstantRange RHSRange =
7134 computeConstantRangeIncludingKnownBits(RHS, /*ForSigned=*/true, SQ);
7135 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
7136}
7137
7139 const DominatorTree &DT) {
7140 SmallVector<const BranchInst *, 2> GuardingBranches;
7142
7143 for (const User *U : WO->users()) {
7144 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
7145 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
7146
7147 if (EVI->getIndices()[0] == 0)
7148 Results.push_back(EVI);
7149 else {
7150 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
7151
7152 for (const auto *U : EVI->users())
7153 if (const auto *B = dyn_cast<BranchInst>(U)) {
7154 assert(B->isConditional() && "How else is it using an i1?");
7155 GuardingBranches.push_back(B);
7156 }
7157 }
7158 } else {
7159 // We are using the aggregate directly in a way we don't want to analyze
7160 // here (storing it to a global, say).
7161 return false;
7162 }
7163 }
7164
7165 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
7166 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
7167 if (!NoWrapEdge.isSingleEdge())
7168 return false;
7169
7170 // Check if all users of the add are provably no-wrap.
7171 for (const auto *Result : Results) {
7172 // If the extractvalue itself is not executed on overflow, the we don't
7173 // need to check each use separately, since domination is transitive.
7174 if (DT.dominates(NoWrapEdge, Result->getParent()))
7175 continue;
7176
7177 for (const auto &RU : Result->uses())
7178 if (!DT.dominates(NoWrapEdge, RU))
7179 return false;
7180 }
7181
7182 return true;
7183 };
7184
7185 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
7186}
7187
7188/// Shifts return poison if shiftwidth is larger than the bitwidth.
7189static bool shiftAmountKnownInRange(const Value *ShiftAmount) {
7190 auto *C = dyn_cast<Constant>(ShiftAmount);
7191 if (!C)
7192 return false;
7193
7194 // Shifts return poison if shiftwidth is larger than the bitwidth.
7196 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
7197 unsigned NumElts = FVTy->getNumElements();
7198 for (unsigned i = 0; i < NumElts; ++i)
7199 ShiftAmounts.push_back(C->getAggregateElement(i));
7200 } else if (isa<ScalableVectorType>(C->getType()))
7201 return false; // Can't tell, just return false to be safe
7202 else
7203 ShiftAmounts.push_back(C);
7204
7205 bool Safe = llvm::all_of(ShiftAmounts, [](const Constant *C) {
7206 auto *CI = dyn_cast_or_null<ConstantInt>(C);
7207 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
7208 });
7209
7210 return Safe;
7211}
7212
7214 PoisonOnly = (1 << 0),
7215 UndefOnly = (1 << 1),
7217};
7218
7220 return (unsigned(Kind) & unsigned(UndefPoisonKind::PoisonOnly)) != 0;
7221}
7222
7224 return (unsigned(Kind) & unsigned(UndefPoisonKind::UndefOnly)) != 0;
7225}
7226
7228 bool ConsiderFlagsAndMetadata) {
7229
7230 if (ConsiderFlagsAndMetadata && includesPoison(Kind) &&
7231 Op->hasPoisonGeneratingAnnotations())
7232 return true;
7233
7234 unsigned Opcode = Op->getOpcode();
7235
7236 // Check whether opcode is a poison/undef-generating operation
7237 switch (Opcode) {
7238 case Instruction::Shl:
7239 case Instruction::AShr:
7240 case Instruction::LShr:
7241 return includesPoison(Kind) && !shiftAmountKnownInRange(Op->getOperand(1));
7242 case Instruction::FPToSI:
7243 case Instruction::FPToUI:
7244 // fptosi/ui yields poison if the resulting value does not fit in the
7245 // destination type.
7246 return true;
7247 case Instruction::Call:
7248 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
7249 switch (II->getIntrinsicID()) {
7250 // TODO: Add more intrinsics.
7251 case Intrinsic::ctlz:
7252 case Intrinsic::cttz:
7253 case Intrinsic::abs:
7254 if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue())
7255 return false;
7256 break;
7257 case Intrinsic::ctpop:
7258 case Intrinsic::bswap:
7259 case Intrinsic::bitreverse:
7260 case Intrinsic::fshl:
7261 case Intrinsic::fshr:
7262 case Intrinsic::smax:
7263 case Intrinsic::smin:
7264 case Intrinsic::umax:
7265 case Intrinsic::umin:
7266 case Intrinsic::ptrmask:
7267 case Intrinsic::fptoui_sat:
7268 case Intrinsic::fptosi_sat:
7269 case Intrinsic::sadd_with_overflow:
7270 case Intrinsic::ssub_with_overflow:
7271 case Intrinsic::smul_with_overflow:
7272 case Intrinsic::uadd_with_overflow:
7273 case Intrinsic::usub_with_overflow:
7274 case Intrinsic::umul_with_overflow:
7275 case Intrinsic::sadd_sat:
7276 case Intrinsic::uadd_sat:
7277 case Intrinsic::ssub_sat:
7278 case Intrinsic::usub_sat:
7279 return false;
7280 case Intrinsic::sshl_sat:
7281 case Intrinsic::ushl_sat:
7282 return includesPoison(Kind) &&
7283 !shiftAmountKnownInRange(II->getArgOperand(1));
7284 case Intrinsic::fma:
7285 case Intrinsic::fmuladd:
7286 case Intrinsic::sqrt:
7287 case Intrinsic::powi:
7288 case Intrinsic::sin:
7289 case Intrinsic::cos:
7290 case Intrinsic::pow:
7291 case Intrinsic::log:
7292 case Intrinsic::log10:
7293 case Intrinsic::log2:
7294 case Intrinsic::exp:
7295 case Intrinsic::exp2:
7296 case Intrinsic::exp10:
7297 case Intrinsic::fabs:
7298 case Intrinsic::copysign:
7299 case Intrinsic::floor:
7300 case Intrinsic::ceil:
7301 case Intrinsic::trunc:
7302 case Intrinsic::rint:
7303 case Intrinsic::nearbyint:
7304 case Intrinsic::round:
7305 case Intrinsic::roundeven:
7306 case Intrinsic::fptrunc_round:
7307 case Intrinsic::canonicalize:
7308 case Intrinsic::arithmetic_fence:
7309 case Intrinsic::minnum:
7310 case Intrinsic::maxnum:
7311 case Intrinsic::minimum:
7312 case Intrinsic::maximum:
7313 case Intrinsic::is_fpclass:
7314 case Intrinsic::ldexp:
7315 case Intrinsic::frexp:
7316 return false;
7317 case Intrinsic::lround:
7318 case Intrinsic::llround:
7319 case Intrinsic::lrint:
7320 case Intrinsic::llrint:
7321 // If the value doesn't fit an unspecified value is returned (but this
7322 // is not poison).
7323 return false;
7324 }
7325 }
7326 [[fallthrough]];
7327 case Instruction::CallBr:
7328 case Instruction::Invoke: {
7329 const auto *CB = cast<CallBase>(Op);
7330 return !CB->hasRetAttr(Attribute::NoUndef);
7331 }
7332 case Instruction::InsertElement:
7333 case Instruction::ExtractElement: {
7334 // If index exceeds the length of the vector, it returns poison
7335 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
7336 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
7337 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
7338 if (includesPoison(Kind))
7339 return !Idx ||
7340 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue());
7341 return false;
7342 }
7343 case Instruction::ShuffleVector: {
7344 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
7345 ? cast<ConstantExpr>(Op)->getShuffleMask()
7346 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
7347 return includesPoison(Kind) && is_contained(Mask, PoisonMaskElem);
7348 }
7349 case Instruction::FNeg:
7350 case Instruction::PHI:
7351 case Instruction::Select:
7352 case Instruction::URem:
7353 case Instruction::SRem:
7354 case Instruction::ExtractValue:
7355 case Instruction::InsertValue:
7356 case Instruction::Freeze:
7357 case Instruction::ICmp:
7358 case Instruction::FCmp:
7359 case Instruction::FAdd:
7360 case Instruction::FSub:
7361 case Instruction::FMul:
7362 case Instruction::FDiv:
7363 case Instruction::FRem:
7364 return false;
7365 case Instruction::GetElementPtr:
7366 // inbounds is handled above
7367 // TODO: what about inrange on constexpr?
7368 return false;
7369 default: {
7370 const auto *CE = dyn_cast<ConstantExpr>(Op);
7371 if (isa<CastInst>(Op) || (CE && CE->isCast()))
7372 return false;
7373 else if (Instruction::isBinaryOp(Opcode))
7374 return false;
7375 // Be conservative and return true.
7376 return true;
7377 }
7378 }
7379}
7380
7382 bool ConsiderFlagsAndMetadata) {
7383 return ::canCreateUndefOrPoison(Op, UndefPoisonKind::UndefOrPoison,
7384 ConsiderFlagsAndMetadata);
7385}
7386
7387bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) {
7388 return ::canCreateUndefOrPoison(Op, UndefPoisonKind::PoisonOnly,
7389 ConsiderFlagsAndMetadata);
7390}
7391
7392static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V,
7393 unsigned Depth) {
7394 if (ValAssumedPoison == V)
7395 return true;
7396
7397 const unsigned MaxDepth = 2;
7398 if (Depth >= MaxDepth)
7399 return false;
7400
7401 if (const auto *I = dyn_cast<Instruction>(V)) {
7402 if (any_of(I->operands(), [=](const Use &Op) {
7403 return propagatesPoison(Op) &&
7404 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
7405 }))
7406 return true;
7407
7408 // V = extractvalue V0, idx
7409 // V2 = extractvalue V0, idx2
7410 // V0's elements are all poison or not. (e.g., add_with_overflow)
7411 const WithOverflowInst *II;
7413 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
7414 llvm::is_contained(II->args(), ValAssumedPoison)))
7415 return true;
7416 }
7417 return false;
7418}
7419
7420static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
7421 unsigned Depth) {
7422 if (isGuaranteedNotToBePoison(ValAssumedPoison))
7423 return true;
7424
7425 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
7426 return true;
7427
7428 const unsigned MaxDepth = 2;
7429 if (Depth >= MaxDepth)
7430 return false;
7431
7432 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
7433 if (I && !canCreatePoison(cast<Operator>(I))) {
7434 return all_of(I->operands(), [=](const Value *Op) {
7435 return impliesPoison(Op, V, Depth + 1);
7436 });
7437 }
7438 return false;
7439}
7440
7441bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
7442 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
7443}
7444
7445static bool programUndefinedIfUndefOrPoison(const Value *V, bool PoisonOnly);
7446
7448 const Value *V, AssumptionCache *AC, const Instruction *CtxI,
7449 const DominatorTree *DT, unsigned Depth, UndefPoisonKind Kind) {
7451 return false;
7452
7453 if (isa<MetadataAsValue>(V))
7454 return false;
7455
7456 if (const auto *A = dyn_cast<Argument>(V)) {
7457 if (A->hasAttribute(Attribute::NoUndef) ||
7458 A->hasAttribute(Attribute::Dereferenceable) ||
7459 A->hasAttribute(Attribute::DereferenceableOrNull))
7460 return true;
7461 }
7462
7463 if (auto *C = dyn_cast<Constant>(V)) {
7464 if (isa<PoisonValue>(C))
7465 return !includesPoison(Kind);
7466
7467 if (isa<UndefValue>(C))
7468 return !includesUndef(Kind);
7469
7470 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
7471 isa<ConstantPointerNull>(C) || isa<Function>(C))
7472 return true;
7473
7474 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C)) {
7475 if (includesUndef(Kind) && C->containsUndefElement())
7476 return false;
7477 if (includesPoison(Kind) && C->containsPoisonElement())
7478 return false;
7479 return !C->containsConstantExpression();
7480 }
7481 }
7482
7483 // Strip cast operations from a pointer value.
7484 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
7485 // inbounds with zero offset. To guarantee that the result isn't poison, the
7486 // stripped pointer is checked as it has to be pointing into an allocated
7487 // object or be null `null` to ensure `inbounds` getelement pointers with a
7488 // zero offset could not produce poison.
7489 // It can strip off addrspacecast that do not change bit representation as
7490 // well. We believe that such addrspacecast is equivalent to no-op.
7491 auto *StrippedV = V->stripPointerCastsSameRepresentation();
7492 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
7493 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
7494 return true;
7495
7496 auto OpCheck = [&](const Value *V) {
7497 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, Kind);
7498 };
7499
7500 if (auto *Opr = dyn_cast<Operator>(V)) {
7501 // If the value is a freeze instruction, then it can never
7502 // be undef or poison.
7503 if (isa<FreezeInst>(V))
7504 return true;
7505
7506 if (const auto *CB = dyn_cast<CallBase>(V)) {
7507 if (CB->hasRetAttr(Attribute::NoUndef) ||
7508 CB->hasRetAttr(Attribute::Dereferenceable) ||
7509 CB->hasRetAttr(Attribute::DereferenceableOrNull))
7510 return true;
7511 }
7512
7513 if (const auto *PN = dyn_cast<PHINode>(V)) {
7514 unsigned Num = PN->getNumIncomingValues();
7515 bool IsWellDefined = true;
7516 for (unsigned i = 0; i < Num; ++i) {
7517 auto *TI = PN->getIncomingBlock(i)->getTerminator();
7518 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
7519 DT, Depth + 1, Kind)) {
7520 IsWellDefined = false;
7521 break;
7522 }
7523 }
7524 if (IsWellDefined)
7525 return true;
7526 } else if (!::canCreateUndefOrPoison(Opr, Kind,
7527 /*ConsiderFlagsAndMetadata*/ true) &&
7528 all_of(Opr->operands(), OpCheck))
7529 return true;
7530 }
7531
7532 if (auto *I = dyn_cast<LoadInst>(V))
7533 if (I->hasMetadata(LLVMContext::MD_noundef) ||
7534 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
7535 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
7536 return true;
7537
7539 return true;
7540
7541 // CxtI may be null or a cloned instruction.
7542 if (!CtxI || !CtxI->getParent() || !DT)
7543 return false;
7544
7545 auto *DNode = DT->getNode(CtxI->getParent());
7546 if (!DNode)
7547 // Unreachable block
7548 return false;
7549
7550 // If V is used as a branch condition before reaching CtxI, V cannot be
7551 // undef or poison.
7552 // br V, BB1, BB2
7553 // BB1:
7554 // CtxI ; V cannot be undef or poison here
7555 auto *Dominator = DNode->getIDom();
7556 // This check is purely for compile time reasons: we can skip the IDom walk
7557 // if what we are checking for includes undef and the value is not an integer.
7558 if (!includesUndef(Kind) || V->getType()->isIntegerTy())
7559 while (Dominator) {
7560 auto *TI = Dominator->getBlock()->getTerminator();
7561
7562 Value *Cond = nullptr;
7563 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) {
7564 if (BI->isConditional())
7565 Cond = BI->getCondition();
7566 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
7567 Cond = SI->getCondition();
7568 }
7569
7570 if (Cond) {
7571 if (Cond == V)
7572 return true;
7573 else if (!includesUndef(Kind) && isa<Operator>(Cond)) {
7574 // For poison, we can analyze further
7575 auto *Opr = cast<Operator>(Cond);
7576 if (any_of(Opr->operands(), [V](const Use &U) {
7577 return V == U && propagatesPoison(U);
7578 }))
7579 return true;
7580 }
7581 }
7582
7583 Dominator = Dominator->getIDom();
7584 }
7585
7586 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC))
7587 return true;
7588
7589 return false;
7590}
7591
7593 const Instruction *CtxI,
7594 const DominatorTree *DT,
7595 unsigned Depth) {
7596 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth,
7597 UndefPoisonKind::UndefOrPoison);
7598}
7599
7601 const Instruction *CtxI,
7602 const DominatorTree *DT, unsigned Depth) {
7603 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth,
7604 UndefPoisonKind::PoisonOnly);
7605}
7606
7608 const Instruction *CtxI,
7609 const DominatorTree *DT, unsigned Depth) {
7610 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth,
7611 UndefPoisonKind::UndefOnly);
7612}
7613
7614/// Return true if undefined behavior would provably be executed on the path to
7615/// OnPathTo if Root produced a posion result. Note that this doesn't say
7616/// anything about whether OnPathTo is actually executed or whether Root is
7617/// actually poison. This can be used to assess whether a new use of Root can
7618/// be added at a location which is control equivalent with OnPathTo (such as
7619/// immediately before it) without introducing UB which didn't previously
7620/// exist. Note that a false result conveys no information.
7622 Instruction *OnPathTo,
7623 DominatorTree *DT) {
7624 // Basic approach is to assume Root is poison, propagate poison forward
7625 // through all users we can easily track, and then check whether any of those
7626 // users are provable UB and must execute before out exiting block might
7627 // exit.
7628
7629 // The set of all recursive users we've visited (which are assumed to all be
7630 // poison because of said visit)
7631 SmallSet<const Value *, 16> KnownPoison;
7633 Worklist.push_back(Root);
7634 while (!Worklist.empty()) {
7635 const Instruction *I = Worklist.pop_back_val();
7636
7637 // If we know this must trigger UB on a path leading our target.
7638 if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo))
7639 return true;
7640
7641 // If we can't analyze propagation through this instruction, just skip it
7642 // and transitive users. Safe as false is a conservative result.
7643 if (I != Root && !any_of(I->operands(), [&KnownPoison](const Use &U) {
7644 return KnownPoison.contains(U) && propagatesPoison(U);
7645 }))
7646 continue;
7647
7648 if (KnownPoison.insert(I).second)
7649 for (const User *User : I->users())
7650 Worklist.push_back(cast<Instruction>(User));
7651 }
7652
7653 // Might be non-UB, or might have a path we couldn't prove must execute on
7654 // way to exiting bb.
7655 return false;
7656}
7657
7659 const SimplifyQuery &SQ) {
7660 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
7661 Add, SQ);
7662}
7663
7666 const WithCache<const Value *> &RHS,
7667 const SimplifyQuery &SQ) {
7668 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, SQ);
7669}
7670
7672 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
7673 // of time because it's possible for another thread to interfere with it for an
7674 // arbitrary length of time, but programs aren't allowed to rely on that.
7675
7676 // If there is no successor, then execution can't transfer to it.
7677 if (isa<ReturnInst>(I))
7678 return false;
7679 if (isa<UnreachableInst>(I))
7680 return false;
7681
7682 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
7683 // Instruction::willReturn.
7684 //
7685 // FIXME: Move this check into Instruction::willReturn.
7686 if (isa<CatchPadInst>(I)) {
7687 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
7688 default:
7689 // A catchpad may invoke exception object constructors and such, which
7690 // in some languages can be arbitrary code, so be conservative by default.
7691 return false;
7693 // For CoreCLR, it just involves a type test.
7694 return true;
7695 }
7696 }
7697
7698 // An instruction that returns without throwing must transfer control flow
7699 // to a successor.
7700 return !I->mayThrow() && I->willReturn();
7701}
7702
7704 // TODO: This is slightly conservative for invoke instruction since exiting
7705 // via an exception *is* normal control for them.
7706 for (const Instruction &I : *BB)
7708 return false;
7709 return true;
7710}
7711
7714 unsigned ScanLimit) {
7716 ScanLimit);
7717}
7718
7721 assert(ScanLimit && "scan limit must be non-zero");
7722 for (const Instruction &I : Range) {
7723 if (isa<DbgInfoIntrinsic>(I))
7724 continue;
7725 if (--ScanLimit == 0)
7726 return false;
7728 return false;
7729 }
7730 return true;
7731}
7732
7734 const Loop *L) {
7735 // The loop header is guaranteed to be executed for every iteration.
7736 //
7737 // FIXME: Relax this constraint to cover all basic blocks that are
7738 // guaranteed to be executed at every iteration.
7739 if (I->getParent() != L->getHeader()) return false;
7740
7741 for (const Instruction &LI : *L->getHeader()) {
7742 if (&LI == I) return true;
7743 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
7744 }
7745 llvm_unreachable("Instruction not contained in its own parent basic block.");
7746}
7747
7748bool llvm::propagatesPoison(const Use &PoisonOp) {
7749 const Operator *I = cast<Operator>(PoisonOp.getUser());
7750 switch (I->getOpcode()) {
7751 case Instruction::Freeze:
7752 case Instruction::PHI:
7753 case Instruction::Invoke:
7754 return false;
7755 case Instruction::Select:
7756 return PoisonOp.getOperandNo() == 0;
7757 case Instruction::Call:
7758 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
7759 switch (II->getIntrinsicID()) {
7760 // TODO: Add more intrinsics.
7761 case Intrinsic::sadd_with_overflow:
7762 case Intrinsic::ssub_with_overflow:
7763 case Intrinsic::smul_with_overflow:
7764 case Intrinsic::uadd_with_overflow:
7765 case Intrinsic::usub_with_overflow:
7766 case Intrinsic::umul_with_overflow:
7767 // If an input is a vector containing a poison element, the
7768 // two output vectors (calculated results, overflow bits)'
7769 // corresponding lanes are poison.
7770 return true;
7771 case Intrinsic::ctpop:
7772 case Intrinsic::ctlz:
7773 case Intrinsic::cttz:
7774 case Intrinsic::abs:
7775 case Intrinsic::smax:
7776 case Intrinsic::smin:
7777 case Intrinsic::umax:
7778 case Intrinsic::umin:
7779 case Intrinsic::bitreverse:
7780 case Intrinsic::bswap:
7781 case Intrinsic::sadd_sat:
7782 case Intrinsic::ssub_sat:
7783 case Intrinsic::sshl_sat:
7784 case Intrinsic::uadd_sat:
7785 case Intrinsic::usub_sat:
7786 case Intrinsic::ushl_sat:
7787 return true;
7788 }
7789 }
7790 return false;
7791 case Instruction::ICmp:
7792 case Instruction::FCmp:
7793 case Instruction::GetElementPtr:
7794 return true;
7795 default:
7796 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
7797 return true;
7798
7799 // Be conservative and return false.
7800 return false;
7801 }
7802}
7803
7804/// Enumerates all operands of \p I that are guaranteed to not be undef or
7805/// poison. If the callback \p Handle returns true, stop processing and return
7806/// true. Otherwise, return false.
7807template <typename CallableT>
7809 const CallableT &Handle) {
7810 switch (I->getOpcode()) {
7811 case Instruction::Store:
7812 if (Handle(cast<StoreInst>(I)->getPointerOperand()))
7813 return true;
7814 break;
7815
7816 case Instruction::Load:
7817 if (Handle(cast<LoadInst>(I)->getPointerOperand()))
7818 return true;
7819 break;
7820
7821 // Since dereferenceable attribute imply noundef, atomic operations
7822 // also implicitly have noundef pointers too
7823 case Instruction::AtomicCmpXchg:
7824 if (Handle(cast<AtomicCmpXchgInst>(I)->getPointerOperand()))
7825 return true;
7826 break;
7827
7828 case Instruction::AtomicRMW:
7829 if (Handle(cast<AtomicRMWInst>(I)->getPointerOperand()))
7830 return true;
7831 break;
7832
7833 case Instruction::Call:
7834 case Instruction::Invoke: {
7835 const CallBase *CB = cast<CallBase>(I);
7836 if (CB->isIndirectCall() && Handle(CB->getCalledOperand()))
7837 return true;
7838 for (unsigned i = 0; i < CB->arg_size(); ++i)
7839 if ((CB->paramHasAttr(i, Attribute::NoUndef) ||
7840 CB->paramHasAttr(i, Attribute::Dereferenceable) ||
7841 CB->paramHasAttr(i, Attribute::DereferenceableOrNull)) &&
7842 Handle(CB->getArgOperand(i)))
7843 return true;
7844 break;
7845 }
7846 case Instruction::Ret:
7847 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef) &&
7848 Handle(I->getOperand(0)))
7849 return true;
7850 break;
7851 case Instruction::Switch:
7852 if (Handle(cast<SwitchInst>(I)->getCondition()))
7853 return true;
7854 break;
7855 case Instruction::Br: {
7856 auto *BR = cast<BranchInst>(I);
7857 if (BR->isConditional() && Handle(BR->getCondition()))
7858 return true;
7859 break;
7860 }
7861 default:
7862 break;
7863 }
7864
7865 return false;
7866}
7867
7870 handleGuaranteedWellDefinedOps(I, [&](const Value *V) {
7871 Operands.push_back(V);
7872 return false;
7873 });
7874}
7875
7876/// Enumerates all operands of \p I that are guaranteed to not be poison.
7877template <typename CallableT>
7879 const CallableT &Handle) {
7880 if (handleGuaranteedWellDefinedOps(I, Handle))
7881 return true;
7882 switch (I->getOpcode()) {
7883 // Divisors of these operations are allowed to be partially undef.
7884 case Instruction::UDiv:
7885 case Instruction::SDiv:
7886 case Instruction::URem:
7887 case Instruction::SRem:
7888 return Handle(I->getOperand(1));
7889 default:
7890 return false;
7891 }
7892}
7893
7896 handleGuaranteedNonPoisonOps(I, [&](const Value *V) {
7897 Operands.push_back(V);
7898 return false;
7899 });
7900}
7901
7903 const SmallPtrSetImpl<const Value *> &KnownPoison) {
7905 I, [&](const Value *V) { return KnownPoison.count(V); });
7906}
7907
7909 bool PoisonOnly) {
7910 // We currently only look for uses of values within the same basic
7911 // block, as that makes it easier to guarantee that the uses will be
7912 // executed given that Inst is executed.
7913 //
7914 // FIXME: Expand this to consider uses beyond the same basic block. To do
7915 // this, look out for the distinction between post-dominance and strong
7916 // post-dominance.
7917 const BasicBlock *BB = nullptr;
7919 if (const auto *Inst = dyn_cast<Instruction>(V)) {
7920 BB = Inst->getParent();
7921 Begin = Inst->getIterator();
7922 Begin++;
7923 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
7924 if (Arg->getParent()->isDeclaration())
7925 return false;
7926 BB = &Arg->getParent()->getEntryBlock();
7927 Begin = BB->begin();
7928 } else {
7929 return false;
7930 }
7931
7932 // Limit number of instructions we look at, to avoid scanning through large
7933 // blocks. The current limit is chosen arbitrarily.
7934 unsigned ScanLimit = 32;
7936
7937 if (!PoisonOnly) {
7938 // Since undef does not propagate eagerly, be conservative & just check
7939 // whether a value is directly passed to an instruction that must take
7940 // well-defined operands.
7941
7942 for (const auto &I : make_range(Begin, End)) {
7943 if (isa<DbgInfoIntrinsic>(I))
7944 continue;
7945 if (--ScanLimit == 0)
7946 break;
7947
7948 if (handleGuaranteedWellDefinedOps(&I, [V](const Value *WellDefinedOp) {
7949 return WellDefinedOp == V;
7950 }))
7951 return true;
7952
7954 break;
7955 }
7956 return false;
7957 }
7958
7959 // Set of instructions that we have proved will yield poison if Inst
7960 // does.
7961 SmallSet<const Value *, 16> YieldsPoison;
7963
7964 YieldsPoison.insert(V);
7965 Visited.insert(BB);
7966
7967 while (true) {
7968 for (const auto &I : make_range(Begin, End)) {
7969 if (isa<DbgInfoIntrinsic>(I))
7970 continue;
7971 if (--ScanLimit == 0)
7972 return false;
7973 if (mustTriggerUB(&I, YieldsPoison))
7974 return true;
7976 return false;
7977
7978 // If an operand is poison and propagates it, mark I as yielding poison.
7979 for (const Use &Op : I.operands()) {
7980 if (YieldsPoison.count(Op) && propagatesPoison(Op)) {
7981 YieldsPoison.insert(&I);
7982 break;
7983 }
7984 }
7985
7986 // Special handling for select, which returns poison if its operand 0 is
7987 // poison (handled in the loop above) *or* if both its true/false operands
7988 // are poison (handled here).
7989 if (I.getOpcode() == Instruction::Select &&
7990 YieldsPoison.count(I.getOperand(1)) &&
7991 YieldsPoison.count(I.getOperand(2))) {
7992 YieldsPoison.insert(&I);
7993 }
7994 }
7995
7996 BB = BB->getSingleSuccessor();
7997 if (!BB || !Visited.insert(BB).second)
7998 break;
7999
8000 Begin = BB->getFirstNonPHI()->getIterator();
8001 End = BB->end();
8002 }
8003 return false;
8004}
8005
8007 return ::programUndefinedIfUndefOrPoison(Inst, false);
8008}
8009
8011 return ::programUndefinedIfUndefOrPoison(Inst, true);
8012}
8013
8014static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
8015 if (FMF.noNaNs())
8016 return true;
8017
8018 if (auto *C = dyn_cast<ConstantFP>(V))
8019 return !C->isNaN();
8020
8021 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
8022 if (!C->getElementType()->isFloatingPointTy())
8023 return false;
8024 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
8025 if (C->getElementAsAPFloat(I).isNaN())
8026 return false;
8027 }
8028 return true;
8029 }
8030
8031 if (isa<ConstantAggregateZero>(V))
8032 return true;
8033
8034 return false;
8035}
8036
8037static bool isKnownNonZero(const Value *V) {
8038 if (auto *C = dyn_cast<ConstantFP>(V))
8039 return !C->isZero();
8040
8041 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
8042 if (!C->getElementType()->isFloatingPointTy())
8043 return false;
8044 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
8045 if (C->getElementAsAPFloat(I).isZero())
8046 return false;
8047 }
8048 return true;
8049 }
8050
8051 return false;
8052}
8053
8054/// Match clamp pattern for float types without care about NaNs or signed zeros.
8055/// Given non-min/max outer cmp/select from the clamp pattern this
8056/// function recognizes if it can be substitued by a "canonical" min/max
8057/// pattern.
8059 Value *CmpLHS, Value *CmpRHS,
8060 Value *TrueVal, Value *FalseVal,
8061 Value *&LHS, Value *&RHS) {
8062 // Try to match
8063 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
8064 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
8065 // and return description of the outer Max/Min.
8066
8067 // First, check if select has inverse order:
8068 if (CmpRHS == FalseVal) {
8069 std::swap(TrueVal, FalseVal);
8070 Pred = CmpInst::getInversePredicate(Pred);
8071 }
8072
8073 // Assume success now. If there's no match, callers should not use these anyway.
8074 LHS = TrueVal;
8075 RHS = FalseVal;
8076
8077 const APFloat *FC1;
8078 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
8079 return {SPF_UNKNOWN, SPNB_NA, false};
8080
8081 const APFloat *FC2;
8082 switch (Pred) {
8083 case CmpInst::FCMP_OLT:
8084 case CmpInst::FCMP_OLE:
8085 case CmpInst::FCMP_ULT:
8086 case CmpInst::FCMP_ULE:
8087 if (match(FalseVal,
8089 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
8090 *FC1 < *FC2)
8091 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
8092 break;
8093 case CmpInst::FCMP_OGT:
8094 case CmpInst::FCMP_OGE:
8095 case CmpInst::FCMP_UGT:
8096 case CmpInst::FCMP_UGE:
8097 if (match(FalseVal,
8099 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
8100 *FC1 > *FC2)
8101 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
8102 break;
8103 default:
8104 break;
8105 }
8106
8107 return {SPF_UNKNOWN, SPNB_NA, false};
8108}
8109
8110/// Recognize variations of:
8111/// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
8113 Value *CmpLHS, Value *CmpRHS,
8114 Value *TrueVal, Value *FalseVal) {
8115 // Swap the select operands and predicate to match the patterns below.
8116 if (CmpRHS != TrueVal) {
8117 Pred = ICmpInst::getSwappedPredicate(Pred);
8118 std::swap(TrueVal, FalseVal);
8119 }
8120 const APInt *C1;
8121 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
8122 const APInt *C2;
8123 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
8124 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
8125 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
8126 return {SPF_SMAX, SPNB_NA, false};
8127
8128 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
8129 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
8130 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
8131 return {SPF_SMIN, SPNB_NA, false};
8132
8133 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
8134 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
8135 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
8136 return {SPF_UMAX, SPNB_NA, false};
8137
8138 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
8139 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
8140 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
8141 return {SPF_UMIN, SPNB_NA, false};
8142 }
8143 return {SPF_UNKNOWN, SPNB_NA, false};
8144}
8145
8146/// Recognize variations of:
8147/// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
8149 Value *CmpLHS, Value *CmpRHS,
8150 Value *TVal, Value *FVal,
8151 unsigned Depth) {
8152 // TODO: Allow FP min/max with nnan/nsz.
8153 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
8154
8155 Value *A = nullptr, *B = nullptr;
8156 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
8157 if (!SelectPatternResult::isMinOrMax(L.Flavor))
8158 return {SPF_UNKNOWN, SPNB_NA, false};
8159
8160 Value *C = nullptr, *D = nullptr;
8161 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
8162 if (L.Flavor != R.Flavor)
8163 return {SPF_UNKNOWN, SPNB_NA, false};
8164
8165 // We have something like: x Pred y ? min(a, b) : min(c, d).
8166 // Try to match the compare to the min/max operations of the select operands.
8167 // First, make sure we have the right compare predicate.
8168 switch (L.Flavor) {
8169 case SPF_SMIN:
8170 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
8171 Pred = ICmpInst::getSwappedPredicate(Pred);
8172 std::swap(CmpLHS, CmpRHS);
8173 }
8174 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
8175 break;
8176 return {SPF_UNKNOWN, SPNB_NA, false};
8177 case SPF_SMAX:
8178 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
8179 Pred = ICmpInst::getSwappedPredicate(Pred);
8180 std::swap(CmpLHS, CmpRHS);
8181 }
8182 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
8183 break;
8184 return {SPF_UNKNOWN, SPNB_NA, false};
8185 case SPF_UMIN:
8186 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
8187 Pred = ICmpInst::getSwappedPredicate(Pred);
8188 std::swap(CmpLHS, CmpRHS);
8189 }
8190 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
8191 break;
8192 return {SPF_UNKNOWN, SPNB_NA, false};
8193 case SPF_UMAX:
8194 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
8195 Pred = ICmpInst::getSwappedPredicate(Pred);
8196 std::swap(CmpLHS, CmpRHS);
8197 }
8198 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
8199 break;
8200 return {SPF_UNKNOWN, SPNB_NA, false};
8201 default:
8202 return {SPF_UNKNOWN, SPNB_NA, false};
8203 }
8204
8205 // If there is a common operand in the already matched min/max and the other
8206 // min/max operands match the compare operands (either directly or inverted),
8207 // then this is min/max of the same flavor.
8208
8209 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
8210 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
8211 if (D == B) {
8212 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
8213 match(A, m_Not(m_Specific(CmpRHS)))))
8214 return {L.Flavor, SPNB_NA, false};
8215 }
8216 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
8217 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
8218 if (C == B) {
8219 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
8220 match(A, m_Not(m_Specific(CmpRHS)))))
8221 return {L.Flavor, SPNB_NA, false};
8222 }
8223 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
8224 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
8225 if (D == A) {
8226 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
8227 match(B, m_Not(m_Specific(CmpRHS)))))
8228 return {L.Flavor, SPNB_NA, false};
8229 }
8230 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
8231 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
8232 if (C == A) {
8233 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
8234 match(B, m_Not(m_Specific(CmpRHS)))))
8235 return {L.Flavor, SPNB_NA, false};
8236 }
8237
8238 return {SPF_UNKNOWN, SPNB_NA, false};
8239}
8240
8241/// If the input value is the result of a 'not' op, constant integer, or vector
8242/// splat of a constant integer, return the bitwise-not source value.
8243/// TODO: This could be extended to handle non-splat vector integer constants.
8245 Value *NotV;
8246 if (match(V, m_Not(m_Value(NotV))))
8247 return NotV;
8248
8249 const APInt *C;
8250 if (match(V, m_APInt(C)))
8251 return ConstantInt::get(V->getType(), ~(*C));
8252
8253 return nullptr;
8254}
8255
8256/// Match non-obvious integer minimum and maximum sequences.
8258 Value *CmpLHS, Value *CmpRHS,
8259 Value *TrueVal, Value *FalseVal,
8260 Value *&LHS, Value *&RHS,
8261 unsigned Depth) {
8262 // Assume success. If there's no match, callers should not use these anyway.
8263 LHS = TrueVal;
8264 RHS = FalseVal;
8265
8266 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
8268 return SPR;
8269
8270 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
8272 return SPR;
8273
8274 // Look through 'not' ops to find disguised min/max.
8275 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
8276 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
8277 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
8278 switch (Pred) {
8279 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
8280 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
8281 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
8282 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
8283 default: break;
8284 }
8285 }
8286
8287 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
8288 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
8289 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
8290 switch (Pred) {
8291 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
8292 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
8293 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
8294 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
8295 default: break;
8296 }
8297 }
8298
8299 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
8300 return {SPF_UNKNOWN, SPNB_NA, false};
8301
8302 const APInt *C1;
8303 if (!match(CmpRHS, m_APInt(C1)))
8304 return {SPF_UNKNOWN, SPNB_NA, false};
8305
8306 // An unsigned min/max can be written with a signed compare.
8307 const APInt *C2;
8308 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
8309 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
8310 // Is the sign bit set?
8311 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
8312 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
8313 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
8314 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
8315
8316 // Is the sign bit clear?
8317 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
8318 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
8319 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
8320 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
8321 }
8322
8323 return {SPF_UNKNOWN, SPNB_NA, false};
8324}
8325
8326bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW,
8327 bool AllowPoison) {
8328 assert(X && Y && "Invalid operand");
8329
8330 auto IsNegationOf = [&](const Value *X, const Value *Y) {
8331 if (!match(X, m_Neg(m_Specific(Y))))
8332 return false;
8333
8334 auto *BO = cast<BinaryOperator>(X);
8335 if (NeedNSW && !BO->hasNoSignedWrap())
8336 return false;
8337
8338 auto *Zero = cast<Constant>(BO->getOperand(0));
8339 if (!AllowPoison && !Zero->isNullValue())
8340 return false;
8341
8342 return true;
8343 };
8344
8345 // X = -Y or Y = -X
8346 if (IsNegationOf(X, Y) || IsNegationOf(Y, X))
8347 return true;
8348
8349 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
8350 Value *A, *B;
8351 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
8352 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
8353 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
8355}
8356
8357bool llvm::isKnownInversion(const Value *X, const Value *Y) {
8358 // Handle X = icmp pred A, B, Y = icmp pred A, C.
8359 Value *A, *B, *C;
8360 ICmpInst::Predicate Pred1, Pred2;
8361 if (!match(X, m_ICmp(Pred1, m_Value(A), m_Value(B))) ||
8362 !match(Y, m_c_ICmp(Pred2, m_Specific(A), m_Value(C))))
8363 return false;
8364
8365 if (B == C)
8366 return Pred1 == ICmpInst::getInversePredicate(Pred2);
8367
8368 // Try to infer the relationship from constant ranges.
8369 const APInt *RHSC1, *RHSC2;
8370 if (!match(B, m_APInt(RHSC1)) || !match(C, m_APInt(RHSC2)))
8371 return false;
8372
8373 const auto CR1 = ConstantRange::makeExactICmpRegion(Pred1, *RHSC1);
8374 const auto CR2 = ConstantRange::makeExactICmpRegion(Pred2, *RHSC2);
8375
8376 return CR1.inverse() == CR2;
8377}
8378
8380 FastMathFlags FMF,
8381 Value *CmpLHS, Value *CmpRHS,
8382 Value *TrueVal, Value *FalseVal,
8383 Value *&LHS, Value *&RHS,
8384 unsigned Depth) {
8385 bool HasMismatchedZeros = false;
8386 if (CmpInst::isFPPredicate(Pred)) {
8387 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
8388 // 0.0 operand, set the compare's 0.0 operands to that same value for the
8389 // purpose of identifying min/max. Disregard vector constants with undefined
8390 // elements because those can not be back-propagated for analysis.
8391 Value *OutputZeroVal = nullptr;
8392 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
8393 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
8394 OutputZeroVal = TrueVal;
8395 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
8396 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
8397 OutputZeroVal = FalseVal;
8398
8399 if (OutputZeroVal) {
8400 if (match(CmpLHS, m_AnyZeroFP()) && CmpLHS != OutputZeroVal) {
8401 HasMismatchedZeros = true;
8402 CmpLHS = OutputZeroVal;
8403 }
8404 if (match(CmpRHS, m_AnyZeroFP()) && CmpRHS != OutputZeroVal) {
8405 HasMismatchedZeros = true;
8406 CmpRHS = OutputZeroVal;
8407 }
8408 }
8409 }
8410
8411 LHS = CmpLHS;
8412 RHS = CmpRHS;
8413
8414 // Signed zero may return inconsistent results between implementations.
8415 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
8416 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
8417 // Therefore, we behave conservatively and only proceed if at least one of the
8418 // operands is known to not be zero or if we don't care about signed zero.
8419 switch (Pred) {
8420 default: break;
8423 if (!HasMismatchedZeros)
8424 break;
8425 [[fallthrough]];
8428 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
8429 !isKnownNonZero(CmpRHS))
8430 return {SPF_UNKNOWN, SPNB_NA, false};
8431 }
8432
8433 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
8434 bool Ordered = false;
8435
8436 // When given one NaN and one non-NaN input:
8437 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
8438 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
8439 // ordered comparison fails), which could be NaN or non-NaN.
8440 // so here we discover exactly what NaN behavior is required/accepted.
8441 if (CmpInst::isFPPredicate(Pred)) {
8442 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
8443 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
8444
8445 if (LHSSafe && RHSSafe) {
8446 // Both operands are known non-NaN.
8447 NaNBehavior = SPNB_RETURNS_ANY;
8448 } else if (CmpInst::isOrdered(Pred)) {
8449 // An ordered comparison will return false when given a NaN, so it
8450 // returns the RHS.
8451 Ordered = true;
8452 if (LHSSafe)
8453 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
8454 NaNBehavior = SPNB_RETURNS_NAN;
8455 else if (RHSSafe)
8456 NaNBehavior = SPNB_RETURNS_OTHER;
8457 else
8458 // Completely unsafe.
8459 return {SPF_UNKNOWN, SPNB_NA, false};
8460 } else {
8461 Ordered = false;
8462 // An unordered comparison will return true when given a NaN, so it
8463 // returns the LHS.
8464 if (LHSSafe)
8465 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
8466 NaNBehavior = SPNB_RETURNS_OTHER;
8467 else if (RHSSafe)
8468 NaNBehavior = SPNB_RETURNS_NAN;
8469 else
8470 // Completely unsafe.
8471 return {SPF_UNKNOWN, SPNB_NA, false};
8472 }
8473 }
8474
8475 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
8476 std::swap(CmpLHS, CmpRHS);
8477 Pred = CmpInst::getSwappedPredicate(Pred);
8478 if (NaNBehavior == SPNB_RETURNS_NAN)
8479 NaNBehavior = SPNB_RETURNS_OTHER;
8480 else if (NaNBehavior == SPNB_RETURNS_OTHER)
8481 NaNBehavior = SPNB_RETURNS_NAN;
8482 Ordered = !Ordered;
8483 }
8484
8485 // ([if]cmp X, Y) ? X : Y
8486 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
8487 switch (Pred) {
8488 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
8489 case ICmpInst::ICMP_UGT:
8490 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
8491 case ICmpInst::ICMP_SGT:
8492 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
8493 case ICmpInst::ICMP_ULT:
8494 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
8495 case ICmpInst::ICMP_SLT:
8496 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
8497 case FCmpInst::FCMP_UGT:
8498 case FCmpInst::FCMP_UGE:
8499 case FCmpInst::FCMP_OGT:
8500 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
8501 case FCmpInst::FCMP_ULT:
8502 case FCmpInst::FCMP_ULE:
8503 case FCmpInst::FCMP_OLT:
8504 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
8505 }
8506 }
8507
8508 if (isKnownNegation(TrueVal, FalseVal)) {
8509 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
8510 // match against either LHS or sext(LHS).
8511 auto MaybeSExtCmpLHS =
8512 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
8513 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
8514 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
8515 if (match(TrueVal, MaybeSExtCmpLHS)) {
8516 // Set the return values. If the compare uses the negated value (-X >s 0),
8517 // swap the return values because the negated value is always 'RHS'.
8518 LHS = TrueVal;
8519 RHS = FalseVal;
8520 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
8521 std::swap(LHS, RHS);
8522
8523 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
8524 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
8525 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
8526 return {SPF_ABS, SPNB_NA, false};
8527
8528 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
8529 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
8530 return {SPF_ABS, SPNB_NA, false};
8531
8532 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
8533 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
8534 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
8535 return {SPF_NABS, SPNB_NA, false};
8536 }
8537 else if (match(FalseVal, MaybeSExtCmpLHS)) {
8538 // Set the return values. If the compare uses the negated value (-X >s 0),
8539 // swap the return values because the negated value is always 'RHS'.
8540 LHS = FalseVal;
8541 RHS = TrueVal;
8542 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
8543 std::swap(LHS, RHS);
8544
8545 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
8546 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
8547 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
8548 return {SPF_NABS, SPNB_NA, false};
8549
8550 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
8551 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
8552 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
8553 return {SPF_ABS, SPNB_NA, false};
8554 }
8555 }
8556
8557 if (CmpInst::isIntPredicate(Pred))
8558 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
8559
8560 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
8561 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
8562 // semantics than minNum. Be conservative in such case.
8563 if (NaNBehavior != SPNB_RETURNS_ANY ||
8564 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
8565 !isKnownNonZero(CmpRHS)))
8566 return {SPF_UNKNOWN, SPNB_NA, false};
8567
8568 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
8569}
8570
8571/// Helps to match a select pattern in case of a type mismatch.
8572///
8573/// The function processes the case when type of true and false values of a
8574/// select instruction differs from type of the cmp instruction operands because
8575/// of a cast instruction. The function checks if it is legal to move the cast
8576/// operation after "select". If yes, it returns the new second value of
8577/// "select" (with the assumption that cast is moved):
8578/// 1. As operand of cast instruction when both values of "select" are same cast
8579/// instructions.
8580/// 2. As restored constant (by applying reverse cast operation) when the first
8581/// value of the "select" is a cast operation and the second value is a
8582/// constant.
8583/// NOTE: We return only the new second value because the first value could be
8584/// accessed as operand of cast instruction.
8585static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
8586 Instruction::CastOps *CastOp) {
8587 auto *Cast1 = dyn_cast<CastInst>(V1);
8588 if (!Cast1)
8589 return nullptr;
8590
8591 *CastOp = Cast1->getOpcode();
8592 Type *SrcTy = Cast1->getSrcTy();
8593 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
8594 // If V1 and V2 are both the same cast from the same type, look through V1.
8595 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
8596 return Cast2->getOperand(0);
8597 return nullptr;
8598 }
8599
8600 auto *C = dyn_cast<Constant>(V2);
8601 if (!C)
8602 return nullptr;
8603
8604 const DataLayout &DL = CmpI->getDataLayout();
8605 Constant *CastedTo = nullptr;
8606 switch (*CastOp) {
8607 case Instruction::ZExt:
8608 if (CmpI->isUnsigned())
8609 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
8610 break;
8611 case Instruction::SExt:
8612 if (CmpI->isSigned())
8613 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
8614 break;
8615 case Instruction::Trunc:
8616 Constant *CmpConst;
8617 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
8618 CmpConst->getType() == SrcTy) {
8619 // Here we have the following case:
8620 //
8621 // %cond = cmp iN %x, CmpConst
8622 // %tr = trunc iN %x to iK
8623 // %narrowsel = select i1 %cond, iK %t, iK C
8624 //
8625 // We can always move trunc after select operation:
8626 //
8627 // %cond = cmp iN %x, CmpConst
8628 // %widesel = select i1 %cond, iN %x, iN CmpConst
8629 // %tr = trunc iN %widesel to iK
8630 //
8631 // Note that C could be extended in any way because we don't care about
8632 // upper bits after truncation. It can't be abs pattern, because it would
8633 // look like:
8634 //
8635 // select i1 %cond, x, -x.
8636 //
8637 // So only min/max pattern could be matched. Such match requires widened C
8638 // == CmpConst. That is why set widened C = CmpConst, condition trunc
8639 // CmpConst == C is checked below.
8640 CastedTo = CmpConst;
8641 } else {
8642 unsigned ExtOp = CmpI->isSigned() ? Instruction::SExt : Instruction::ZExt;
8643 CastedTo = ConstantFoldCastOperand(ExtOp, C, SrcTy, DL);
8644 }
8645 break;
8646 case Instruction::FPTrunc:
8647 CastedTo = ConstantFoldCastOperand(Instruction::FPExt, C, SrcTy, DL);
8648 break;
8649 case Instruction::FPExt:
8650 CastedTo = ConstantFoldCastOperand(Instruction::FPTrunc, C, SrcTy, DL);
8651 break;
8652 case Instruction::FPToUI:
8653 CastedTo = ConstantFoldCastOperand(Instruction::UIToFP, C, SrcTy, DL);
8654 break;
8655 case Instruction::FPToSI:
8656 CastedTo = ConstantFoldCastOperand(Instruction::SIToFP, C, SrcTy, DL);
8657 break;
8658 case Instruction::UIToFP:
8659 CastedTo = ConstantFoldCastOperand(Instruction::FPToUI, C, SrcTy, DL);
8660 break;
8661 case Instruction::SIToFP:
8662 CastedTo = ConstantFoldCastOperand(Instruction::FPToSI, C, SrcTy, DL);
8663 break;
8664 default:
8665 break;
8666 }
8667
8668 if (!CastedTo)
8669 return nullptr;
8670
8671 // Make sure the cast doesn't lose any information.
8672 Constant *CastedBack =
8673 ConstantFoldCastOperand(*CastOp, CastedTo, C->getType(), DL);
8674 if (CastedBack && CastedBack != C)
8675 return nullptr;
8676
8677 return CastedTo;
8678}
8679
8681 Instruction::CastOps *CastOp,
8682 unsigned Depth) {
8684 return {SPF_UNKNOWN, SPNB_NA, false};
8685
8686 SelectInst *SI = dyn_cast<SelectInst>(V);
8687 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
8688
8689 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
8690 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
8691
8692 Value *TrueVal = SI->getTrueValue();
8693 Value *FalseVal = SI->getFalseValue();
8694
8695 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
8696 CastOp, Depth);
8697}
8698
8700 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
8701 Instruction::CastOps *CastOp, unsigned Depth) {
8702 CmpInst::Predicate Pred = CmpI->getPredicate();
8703 Value *CmpLHS = CmpI->getOperand(0);
8704 Value *CmpRHS = CmpI->getOperand(1);
8705 FastMathFlags FMF;
8706 if (isa<FPMathOperator>(CmpI))
8707 FMF = CmpI->getFastMathFlags();
8708
8709 // Bail out early.
8710 if (CmpI->isEquality())
8711 return {SPF_UNKNOWN, SPNB_NA, false};
8712
8713 // Deal with type mismatches.
8714 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
8715 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
8716 // If this is a potential fmin/fmax with a cast to integer, then ignore
8717 // -0.0 because there is no corresponding integer value.
8718 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
8719 FMF.setNoSignedZeros();
8720 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
8721 cast<CastInst>(TrueVal)->getOperand(0), C,
8722 LHS, RHS, Depth);
8723 }
8724 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
8725 // If this is a potential fmin/fmax with a cast to integer, then ignore
8726 // -0.0 because there is no corresponding integer value.
8727 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
8728 FMF.setNoSignedZeros();
8729 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
8730 C, cast<CastInst>(FalseVal)->getOperand(0),
8731 LHS, RHS, Depth);
8732 }
8733 }
8734 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
8735 LHS, RHS, Depth);
8736}
8737
8739 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
8740 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
8741 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
8742 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
8743 if (SPF == SPF_FMINNUM)
8744 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
8745 if (SPF == SPF_FMAXNUM)
8746 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
8747 llvm_unreachable("unhandled!");
8748}
8749
8751 if (SPF == SPF_SMIN) return SPF_SMAX;
8752 if (SPF == SPF_UMIN) return SPF_UMAX;
8753 if (SPF == SPF_SMAX) return SPF_SMIN;
8754 if (SPF == SPF_UMAX) return SPF_UMIN;
8755 llvm_unreachable("unhandled!");
8756}
8757
8759 switch (MinMaxID) {
8760 case Intrinsic::smax: return Intrinsic::smin;
8761 case Intrinsic::smin: return Intrinsic::smax;
8762 case Intrinsic::umax: return Intrinsic::umin;
8763 case Intrinsic::umin: return Intrinsic::umax;
8764 // Please note that next four intrinsics may produce the same result for
8765 // original and inverted case even if X != Y due to NaN is handled specially.
8766 case Intrinsic::maximum: return Intrinsic::minimum;
8767 case Intrinsic::minimum: return Intrinsic::maximum;
8768 case Intrinsic::maxnum: return Intrinsic::minnum;
8769 case Intrinsic::minnum: return Intrinsic::maxnum;
8770 default: llvm_unreachable("Unexpected intrinsic");
8771 }
8772}
8773
8775 switch (SPF) {
8778 case SPF_UMAX: return APInt::getMaxValue(BitWidth);
8779 case SPF_UMIN: return APInt::getMinValue(BitWidth);
8780 default: llvm_unreachable("Unexpected flavor");
8781 }
8782}
8783
8784std::pair<Intrinsic::ID, bool>
8786 // Check if VL contains select instructions that can be folded into a min/max
8787 // vector intrinsic and return the intrinsic if it is possible.
8788 // TODO: Support floating point min/max.
8789 bool AllCmpSingleUse = true;
8790 SelectPatternResult SelectPattern;
8791 SelectPattern.Flavor = SPF_UNKNOWN;
8792 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
8793 Value *LHS, *RHS;
8794 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
8795 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor))
8796 return false;
8797 if (SelectPattern.Flavor != SPF_UNKNOWN &&
8798 SelectPattern.Flavor != CurrentPattern.Flavor)
8799 return false;
8800 SelectPattern = CurrentPattern;
8801 AllCmpSingleUse &=
8803 return true;
8804 })) {
8805 switch (SelectPattern.Flavor) {
8806 case SPF_SMIN:
8807 return {Intrinsic::smin, AllCmpSingleUse};
8808 case SPF_UMIN:
8809 return {Intrinsic::umin, AllCmpSingleUse};
8810 case SPF_SMAX:
8811 return {Intrinsic::smax, AllCmpSingleUse};
8812 case SPF_UMAX:
8813 return {Intrinsic::umax, AllCmpSingleUse};
8814 case SPF_FMAXNUM:
8815 return {Intrinsic::maxnum, AllCmpSingleUse};
8816 case SPF_FMINNUM:
8817 return {Intrinsic::minnum, AllCmpSingleUse};
8818 default:
8819 llvm_unreachable("unexpected select pattern flavor");
8820 }
8821 }
8822 return {Intrinsic::not_intrinsic, false};
8823}
8824
8826 Value *&Start, Value *&Step) {
8827 // Handle the case of a simple two-predecessor recurrence PHI.
8828 // There's a lot more that could theoretically be done here, but
8829 // this is sufficient to catch some interesting cases.
8830 if (P->getNumIncomingValues() != 2)
8831 return false;
8832
8833 for (unsigned i = 0; i != 2; ++i) {
8834 Value *L = P->getIncomingValue(i);
8835 Value *R = P->getIncomingValue(!i);
8836 auto *LU = dyn_cast<BinaryOperator>(L);
8837 if (!LU)
8838 continue;
8839 unsigned Opcode = LU->getOpcode();
8840
8841 switch (Opcode) {
8842 default:
8843 continue;
8844 // TODO: Expand list -- xor, div, gep, uaddo, etc..
8845 case Instruction::LShr:
8846 case Instruction::AShr:
8847 case Instruction::Shl:
8848 case Instruction::Add:
8849 case Instruction::Sub:
8850 case Instruction::And:
8851 case Instruction::Or:
8852 case Instruction::Mul:
8853 case Instruction::FMul: {
8854 Value *LL = LU->getOperand(0);
8855 Value *LR = LU->getOperand(1);
8856 // Find a recurrence.
8857 if (LL == P)
8858 L = LR;
8859 else if (LR == P)
8860 L = LL;
8861 else
8862 continue; // Check for recurrence with L and R flipped.
8863
8864 break; // Match!
8865 }
8866 };
8867
8868 // We have matched a recurrence of the form:
8869 // %iv = [R, %entry], [%iv.next, %backedge]
8870 // %iv.next = binop %iv, L
8871 // OR
8872 // %iv = [R, %entry], [%iv.next, %backedge]
8873 // %iv.next = binop L, %iv
8874 BO = LU;
8875 Start = R;
8876 Step = L;
8877 return true;
8878 }
8879 return false;
8880}
8881
8883 Value *&Start, Value *&Step) {
8884 BinaryOperator *BO = nullptr;
8885 P = dyn_cast<PHINode>(I->getOperand(0));
8886 if (!P)
8887 P = dyn_cast<PHINode>(I->getOperand(1));
8888 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
8889}
8890
8891/// Return true if "icmp Pred LHS RHS" is always true.
8892static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
8893 const Value *RHS) {
8894 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
8895 return true;
8896
8897 switch (Pred) {
8898 default:
8899 return false;
8900
8901 case CmpInst::ICMP_SLE: {
8902 const APInt *C;
8903
8904 // LHS s<= LHS +_{nsw} C if C >= 0
8905 // LHS s<= LHS | C if C >= 0
8906 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))) ||
8908 return !C->isNegative();
8909
8910 // LHS s<= smax(LHS, V) for any V
8912 return true;
8913
8914 // smin(RHS, V) s<= RHS for any V
8916 return true;
8917
8918 // Match A to (X +_{nsw} CA) and B to (X +_{nsw} CB)
8919 const Value *X;
8920 const APInt *CLHS, *CRHS;
8921 if (match(LHS, m_NSWAddLike(m_Value(X), m_APInt(CLHS))) &&
8923 return CLHS->sle(*CRHS);
8924
8925 return false;
8926 }
8927
8928 case CmpInst::ICMP_ULE: {
8929 // LHS u<= LHS +_{nuw} V for any V
8930 if (match(RHS, m_c_Add(m_Specific(LHS), m_Value())) &&
8931 cast<OverflowingBinaryOperator>(RHS)->hasNoUnsignedWrap())
8932 return true;
8933
8934 // LHS u<= LHS | V for any V
8935 if (match(RHS, m_c_Or(m_Specific(LHS), m_Value())))
8936 return true;
8937
8938 // LHS u<= umax(LHS, V) for any V
8940 return true;
8941
8942 // RHS >> V u<= RHS for any V
8943 if (match(LHS, m_LShr(m_Specific(RHS), m_Value())))
8944 return true;
8945
8946 // RHS u/ C_ugt_1 u<= RHS
8947 const APInt *C;
8948 if (match(LHS, m_UDiv(m_Specific(RHS), m_APInt(C))) && C->ugt(1))
8949 return true;
8950
8951 // RHS & V u<= RHS for any V
8953 return true;
8954
8955 // umin(RHS, V) u<= RHS for any V
8957 return true;
8958
8959 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
8960 const Value *X;
8961 const APInt *CLHS, *CRHS;
8962 if (match(LHS, m_NUWAddLike(m_Value(X), m_APInt(CLHS))) &&
8964 return CLHS->ule(*CRHS);
8965
8966 return false;
8967 }
8968 }
8969}
8970
8971/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
8972/// ALHS ARHS" is true. Otherwise, return std::nullopt.
8973static std::optional<bool>
8975 const Value *ARHS, const Value *BLHS, const Value *BRHS) {
8976 switch (Pred) {
8977 default:
8978 return std::nullopt;
8979
8980 case CmpInst::ICMP_SLT:
8981 case CmpInst::ICMP_SLE:
8982 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS) &&
8984 return true;
8985 return std::nullopt;
8986
8987 case CmpInst::ICMP_SGT:
8988 case CmpInst::ICMP_SGE:
8989 if (isTruePredicate(CmpInst::ICMP_SLE, ALHS, BLHS) &&
8991 return true;
8992 return std::nullopt;
8993
8994 case CmpInst::ICMP_ULT:
8995 case CmpInst::ICMP_ULE:
8996 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS) &&
8998 return true;
8999 return std::nullopt;
9000
9001 case CmpInst::ICMP_UGT:
9002 case CmpInst::ICMP_UGE:
9003 if (isTruePredicate(CmpInst::ICMP_ULE, ALHS, BLHS) &&
9005 return true;
9006 return std::nullopt;
9007 }
9008}
9009
9010/// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
9011/// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false.
9012/// Otherwise, return std::nullopt if we can't infer anything.
9013static std::optional<bool>
9015 CmpInst::Predicate RPred) {
9016 if (CmpInst::isImpliedTrueByMatchingCmp(LPred, RPred))
9017 return true;
9018 if (CmpInst::isImpliedFalseByMatchingCmp(LPred, RPred))
9019 return false;
9020
9021 return std::nullopt;
9022}
9023
9024/// Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
9025/// Return false if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is false.
9026/// Otherwise, return std::nullopt if we can't infer anything.
9027static std::optional<bool> isImpliedCondCommonOperandWithCR(
9028 CmpInst::Predicate LPred, const ConstantRange &LCR,
9029 CmpInst::Predicate RPred, const ConstantRange &RCR) {
9031 // If all true values for lhs and true for rhs, lhs implies rhs
9032 if (DomCR.icmp(RPred, RCR))
9033 return true;
9034
9035 // If there is no overlap, lhs implies not rhs
9036 if (DomCR.icmp(CmpInst::getInversePredicate(RPred), RCR))
9037 return false;
9038 return std::nullopt;
9039}
9040
9041/// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
9042/// is true. Return false if LHS implies RHS is false. Otherwise, return
9043/// std::nullopt if we can't infer anything.
9044static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
9045 CmpInst::Predicate RPred,
9046 const Value *R0, const Value *R1,
9047 const DataLayout &DL,
9048 bool LHSIsTrue) {
9049 Value *L0 = LHS->getOperand(0);
9050 Value *L1 = LHS->getOperand(1);
9051
9052 // The rest of the logic assumes the LHS condition is true. If that's not the
9053 // case, invert the predicate to make it so.
9054 CmpInst::Predicate LPred =
9055 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
9056
9057 // We can have non-canonical operands, so try to normalize any common operand
9058 // to L0/R0.
9059 if (L0 == R1) {
9060 std::swap(R0, R1);
9061 RPred = ICmpInst::getSwappedPredicate(RPred);
9062 }
9063 if (R0 == L1) {
9064 std::swap(L0, L1);
9065 LPred = ICmpInst::getSwappedPredicate(LPred);
9066 }
9067 if (L1 == R1) {
9068 // If we have L0 == R0 and L1 == R1, then make L1/R1 the constants.
9069 if (L0 != R0 || match(L0, m_ImmConstant())) {
9070 std::swap(L0, L1);
9071 LPred = ICmpInst::getSwappedPredicate(LPred);
9072 std::swap(R0, R1);
9073 RPred = ICmpInst::getSwappedPredicate(RPred);
9074 }
9075 }
9076
9077 // See if we can infer anything if operand-0 matches and we have at least one
9078 // constant.
9079 const APInt *Unused;
9080 if (L0 == R0 && (match(L1, m_APInt(Unused)) || match(R1, m_APInt(Unused)))) {
9081 // Potential TODO: We could also further use the constant range of L0/R0 to
9082 // further constraint the constant ranges. At the moment this leads to
9083 // several regressions related to not transforming `multi_use(A + C0) eq/ne
9084 // C1` (see discussion: D58633).
9086 L1, ICmpInst::isSigned(LPred), /* UseInstrInfo=*/true, /*AC=*/nullptr,
9087 /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1);
9089 R1, ICmpInst::isSigned(RPred), /* UseInstrInfo=*/true, /*AC=*/nullptr,
9090 /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1);
9091 // Even if L1/R1 are not both constant, we can still sometimes deduce
9092 // relationship from a single constant. For example X u> Y implies X != 0.
9093 if (auto R = isImpliedCondCommonOperandWithCR(LPred, LCR, RPred, RCR))
9094 return R;
9095 // If both L1/R1 were exact constant ranges and we didn't get anything
9096 // here, we won't be able to deduce this.
9097 if (match(L1, m_APInt(Unused)) && match(R1, m_APInt(Unused)))
9098 return std::nullopt;
9099 }
9100
9101 // Can we infer anything when the two compares have matching operands?
9102 if (L0 == R0 && L1 == R1)
9103 return isImpliedCondMatchingOperands(LPred, RPred);
9104
9105 // It only really makes sense in the context of signed comparison for "X - Y
9106 // must be positive if X >= Y and no overflow".
9107 // Take SGT as an example: L0:x > L1:y and C >= 0
9108 // ==> R0:(x -nsw y) < R1:(-C) is false
9109 if ((LPred == ICmpInst::ICMP_SGT || LPred == ICmpInst::ICMP_SGE) &&
9110 match(R0, m_NSWSub(m_Specific(L0), m_Specific(L1)))) {
9111 if (match(R1, m_NonPositive()) &&
9112 isImpliedCondMatchingOperands(LPred, RPred) == false)
9113 return false;
9114 }
9115
9116 // Take SLT as an example: L0:x < L1:y and C <= 0
9117 // ==> R0:(x -nsw y) < R1:(-C) is true
9118 if ((LPred == ICmpInst::ICMP_SLT || LPred == ICmpInst::ICMP_SLE) &&
9119 match(R0, m_NSWSub(m_Specific(L0), m_Specific(L1)))) {
9120 if (match(R1, m_NonNegative()) &&
9121 isImpliedCondMatchingOperands(LPred, RPred) == true)
9122 return true;
9123 }
9124
9125 // L0 = R0 = L1 + R1, L0 >=u L1 implies R0 >=u R1, L0 <u L1 implies R0 <u R1
9126 if (L0 == R0 &&
9127 (LPred == ICmpInst::ICMP_ULT || LPred == ICmpInst::ICMP_UGE) &&
9128 (RPred == ICmpInst::ICMP_ULT || RPred == ICmpInst::ICMP_UGE) &&
9129 match(L0, m_c_Add(m_Specific(L1), m_Specific(R1))))
9130 return LPred == RPred;
9131
9132 if (LPred == RPred)
9133 return isImpliedCondOperands(LPred, L0, L1, R0, R1);
9134
9135 return std::nullopt;
9136}
9137
9138/// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
9139/// false. Otherwise, return std::nullopt if we can't infer anything. We
9140/// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select'
9141/// instruction.
9142static std::optional<bool>
9144 const Value *RHSOp0, const Value *RHSOp1,
9145 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
9146 // The LHS must be an 'or', 'and', or a 'select' instruction.
9147 assert((LHS->getOpcode() == Instruction::And ||
9148 LHS->getOpcode() == Instruction::Or ||
9149 LHS->getOpcode() == Instruction::Select) &&
9150 "Expected LHS to be 'and', 'or', or 'select'.");
9151
9152 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
9153
9154 // If the result of an 'or' is false, then we know both legs of the 'or' are
9155 // false. Similarly, if the result of an 'and' is true, then we know both
9156 // legs of the 'and' are true.
9157 const Value *ALHS, *ARHS;
9158 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
9159 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
9160 // FIXME: Make this non-recursion.
9161 if (std::optional<bool> Implication = isImpliedCondition(
9162 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
9163 return Implication;
9164 if (std::optional<bool> Implication = isImpliedCondition(
9165 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
9166 return Implication;
9167 return std::nullopt;
9168 }
9169 return std::nullopt;
9170}
9171
9172std::optional<bool>
9174 const Value *RHSOp0, const Value *RHSOp1,
9175 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
9176 // Bail out when we hit the limit.
9178 return std::nullopt;
9179
9180 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
9181 // example.
9182 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
9183 return std::nullopt;
9184
9186 "Expected integer type only!");
9187
9188 // Match not
9189 if (match(LHS, m_Not(m_Value(LHS))))
9190 LHSIsTrue = !LHSIsTrue;
9191
9192 // Both LHS and RHS are icmps.
9193 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
9194 if (LHSCmp)
9195 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue);
9196
9197 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
9198 /// the RHS to be an icmp.
9199 /// FIXME: Add support for and/or/select on the RHS.
9200 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
9201 if ((LHSI->getOpcode() == Instruction::And ||
9202 LHSI->getOpcode() == Instruction::Or ||
9203 LHSI->getOpcode() == Instruction::Select))
9204 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
9205 Depth);
9206 }
9207 return std::nullopt;
9208}
9209
9210std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
9211 const DataLayout &DL,
9212 bool LHSIsTrue, unsigned Depth) {
9213 // LHS ==> RHS by definition
9214 if (LHS == RHS)
9215 return LHSIsTrue;
9216
9217 // Match not
9218 bool InvertRHS = false;
9219 if (match(RHS, m_Not(m_Value(RHS)))) {
9220 if (LHS == RHS)
9221 return !LHSIsTrue;
9222 InvertRHS = true;
9223 }
9224
9225 if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS)) {
9226 if (auto Implied = isImpliedCondition(
9227 LHS, RHSCmp->getPredicate(), RHSCmp->getOperand(0),
9228 RHSCmp->getOperand(1), DL, LHSIsTrue, Depth))
9229 return InvertRHS ? !*Implied : *Implied;
9230 return std::nullopt;
9231 }
9232
9234 return std::nullopt;
9235
9236 // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2
9237 // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
9238 const Value *RHS1, *RHS2;
9239 if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) {
9240 if (std::optional<bool> Imp =
9241 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
9242 if (*Imp == true)
9243 return !InvertRHS;
9244 if (std::optional<bool> Imp =
9245 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
9246 if (*Imp == true)
9247 return !InvertRHS;
9248 }
9249 if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) {
9250 if (std::optional<bool> Imp =
9251 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
9252 if (*Imp == false)
9253 return InvertRHS;
9254 if (std::optional<bool> Imp =
9255 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
9256 if (*Imp == false)
9257 return InvertRHS;
9258 }
9259
9260 return std::nullopt;
9261}
9262
9263// Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
9264// condition dominating ContextI or nullptr, if no condition is found.
9265static std::pair<Value *, bool>
9267 if (!ContextI || !ContextI->getParent())
9268 return {nullptr, false};
9269
9270 // TODO: This is a poor/cheap way to determine dominance. Should we use a
9271 // dominator tree (eg, from a SimplifyQuery) instead?
9272 const BasicBlock *ContextBB = ContextI->getParent();
9273 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
9274 if (!PredBB)
9275 return {nullptr, false};
9276
9277 // We need a conditional branch in the predecessor.
9278 Value *PredCond;
9279 BasicBlock *TrueBB, *FalseBB;
9280 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
9281 return {nullptr, false};
9282
9283 // The branch should get simplified. Don't bother simplifying this condition.
9284 if (TrueBB == FalseBB)
9285 return {nullptr, false};
9286
9287 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
9288 "Predecessor block does not point to successor?");
9289
9290 // Is this condition implied by the predecessor condition?
9291 return {PredCond, TrueBB == ContextBB};
9292}
9293
9294std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
9295 const Instruction *ContextI,
9296 const DataLayout &DL) {
9297 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
9298 auto PredCond = getDomPredecessorCondition(ContextI);
9299 if (PredCond.first)
9300 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
9301 return std::nullopt;
9302}
9303
9305 const Value *LHS,
9306 const Value *RHS,
9307 const Instruction *ContextI,
9308 const DataLayout &DL) {
9309 auto PredCond = getDomPredecessorCondition(ContextI);
9310 if (PredCond.first)
9311 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
9312 PredCond.second);
9313 return std::nullopt;
9314}
9315
9317 APInt &Upper, const InstrInfoQuery &IIQ,
9318 bool PreferSignedRange) {
9319 unsigned Width = Lower.getBitWidth();
9320 const APInt *C;
9321 switch (BO.getOpcode()) {
9322 case Instruction::Add:
9323 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
9324 bool HasNSW = IIQ.hasNoSignedWrap(&BO);
9325 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
9326
9327 // If the caller expects a signed compare, then try to use a signed range.
9328 // Otherwise if both no-wraps are set, use the unsigned range because it
9329 // is never larger than the signed range. Example:
9330 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
9331 if (PreferSignedRange && HasNSW && HasNUW)
9332 HasNUW = false;
9333
9334 if (HasNUW) {
9335 // 'add nuw x, C' produces [C, UINT_MAX].
9336 Lower = *C;
9337 } else if (HasNSW) {
9338 if (C->isNegative()) {
9339 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
9341 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
9342 } else {
9343 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
9344 Lower = APInt::getSignedMinValue(Width) + *C;
9345 Upper = APInt::getSignedMaxValue(Width) + 1;
9346 }
9347 }
9348 }
9349 break;
9350
9351 case Instruction::And:
9352 if (match(BO.getOperand(1), m_APInt(C)))
9353 // 'and x, C' produces [0, C].
9354 Upper = *C + 1;
9355 // X & -X is a power of two or zero. So we can cap the value at max power of
9356 // two.
9357 if (match(BO.getOperand(0), m_Neg(m_Specific(BO.getOperand(1)))) ||
9358 match(BO.getOperand(1), m_Neg(m_Specific(BO.getOperand(0)))))
9359 Upper = APInt::getSignedMinValue(Width) + 1;
9360 break;
9361
9362 case Instruction::Or:
9363 if (match(BO.getOperand(1), m_APInt(C)))
9364 // 'or x, C' produces [C, UINT_MAX].
9365 Lower = *C;
9366 break;
9367
9368 case Instruction::AShr:
9369 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
9370 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
9372 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
9373 } else if (match(BO.getOperand(0), m_APInt(C))) {
9374 unsigned ShiftAmount = Width - 1;
9375 if (!C->isZero() && IIQ.isExact(&BO))
9376 ShiftAmount = C->countr_zero();
9377 if (C->isNegative()) {
9378 // 'ashr C, x' produces [C, C >> (Width-1)]
9379 Lower = *C;
9380 Upper = C->ashr(ShiftAmount) + 1;
9381 } else {
9382 // 'ashr C, x' produces [C >> (Width-1), C]
9383 Lower = C->ashr(ShiftAmount);
9384 Upper = *C + 1;
9385 }
9386 }
9387 break;
9388
9389 case Instruction::LShr:
9390 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
9391 // 'lshr x, C' produces [0, UINT_MAX >> C].
9392 Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
9393 } else if (match(BO.getOperand(0), m_APInt(C))) {
9394 // 'lshr C, x' produces [C >> (Width-1), C].
9395 unsigned ShiftAmount = Width - 1;
9396 if (!C->isZero() && IIQ.isExact(&BO))
9397 ShiftAmount = C->countr_zero();
9398 Lower = C->lshr(ShiftAmount);
9399 Upper = *C + 1;
9400 }
9401 break;
9402
9403 case Instruction::Shl:
9404 if (match(BO.getOperand(0), m_APInt(C))) {
9405 if (IIQ.hasNoUnsignedWrap(&BO)) {
9406 // 'shl nuw C, x' produces [C, C << CLZ(C)]
9407 Lower = *C;
9408 Upper = Lower.shl(Lower.countl_zero()) + 1;
9409 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
9410 if (C->isNegative()) {
9411 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
9412 unsigned ShiftAmount = C->countl_one() - 1;
9413 Lower = C->shl(ShiftAmount);
9414 Upper = *C + 1;
9415 } else {
9416 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
9417 unsigned ShiftAmount = C->countl_zero() - 1;
9418 Lower = *C;
9419 Upper = C->shl(ShiftAmount) + 1;
9420 }
9421 } else {
9422 // If lowbit is set, value can never be zero.
9423 if ((*C)[0])
9424 Lower = APInt::getOneBitSet(Width, 0);
9425 // If we are shifting a constant the largest it can be is if the longest
9426 // sequence of consecutive ones is shifted to the highbits (breaking
9427 // ties for which sequence is higher). At the moment we take a liberal
9428 // upper bound on this by just popcounting the constant.
9429 // TODO: There may be a bitwise trick for it longest/highest
9430 // consecutative sequence of ones (naive method is O(Width) loop).
9431 Upper = APInt::getHighBitsSet(Width, C->popcount()) + 1;
9432 }
9433 } else if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
9434 Upper = APInt::getBitsSetFrom(Width, C->getZExtValue()) + 1;
9435 }
9436 break;
9437
9438 case Instruction::SDiv:
9439 if (match(BO.getOperand(1), m_APInt(C))) {
9440 APInt IntMin = APInt::getSignedMinValue(Width);
9441 APInt IntMax = APInt::getSignedMaxValue(Width);
9442 if (C->isAllOnes()) {
9443 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
9444 // where C != -1 and C != 0 and C != 1
9445 Lower = IntMin + 1;
9446 Upper = IntMax + 1;
9447 } else if (C->countl_zero() < Width - 1) {
9448 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
9449 // where C != -1 and C != 0 and C != 1
9450 Lower = IntMin.sdiv(*C);
9451 Upper = IntMax.sdiv(*C);
9452 if (Lower.sgt(Upper))
9454 Upper = Upper + 1;
9455 assert(Upper != Lower && "Upper part of range has wrapped!");
9456 }
9457 } else if (match(BO.getOperand(0), m_APInt(C))) {
9458 if (C->isMinSignedValue()) {
9459 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
9460 Lower = *C;
9461 Upper = Lower.lshr(1) + 1;
9462 } else {
9463 // 'sdiv C, x' produces [-|C|, |C|].
9464 Upper = C->abs() + 1;
9465 Lower = (-Upper) + 1;
9466 }
9467 }
9468 break;
9469
9470 case Instruction::UDiv:
9471 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
9472 // 'udiv x, C' produces [0, UINT_MAX / C].
9473 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
9474 } else if (match(BO.getOperand(0), m_APInt(C))) {
9475 // 'udiv C, x' produces [0, C].
9476 Upper = *C + 1;
9477 }
9478 break;
9479
9480 case Instruction::SRem:
9481 if (match(BO.getOperand(1), m_APInt(C))) {
9482 // 'srem x, C' produces (-|C|, |C|).
9483 Upper = C->abs();
9484 Lower = (-Upper) + 1;
9485 } else if (match(BO.getOperand(0), m_APInt(C))) {
9486 if (C->isNegative()) {
9487 // 'srem -|C|, x' produces [-|C|, 0].
9488 Upper = 1;
9489 Lower = *C;
9490 } else {
9491 // 'srem |C|, x' produces [0, |C|].
9492 Upper = *C + 1;
9493 }
9494 }
9495 break;
9496
9497 case Instruction::URem:
9498 if (match(BO.getOperand(1), m_APInt(C)))
9499 // 'urem x, C' produces [0, C).
9500 Upper = *C;
9501 else if (match(BO.getOperand(0), m_APInt(C)))
9502 // 'urem C, x' produces [0, C].
9503 Upper = *C + 1;
9504 break;
9505
9506 default:
9507 break;
9508 }
9509}
9510
9512 unsigned Width = II.getType()->getScalarSizeInBits();
9513 const APInt *C;
9514 switch (II.getIntrinsicID()) {
9515 case Intrinsic::ctpop:
9516 case Intrinsic::ctlz:
9517 case Intrinsic::cttz:
9518 // Maximum of set/clear bits is the bit width.
9520 APInt(Width, Width + 1));
9521 case Intrinsic::uadd_sat:
9522 // uadd.sat(x, C) produces [C, UINT_MAX].
9523 if (match(II.getOperand(0), m_APInt(C)) ||
9524 match(II.getOperand(1), m_APInt(C)))
9526 break;
9527 case Intrinsic::sadd_sat:
9528 if (match(II.getOperand(0), m_APInt(C)) ||
9529 match(II.getOperand(1), m_APInt(C))) {
9530 if (C->isNegative())
9531 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
9533 APInt::getSignedMaxValue(Width) + *C +
9534 1);
9535
9536 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
9538 APInt::getSignedMaxValue(Width) + 1);
9539 }
9540 break;
9541 case Intrinsic::usub_sat:
9542 // usub.sat(C, x) produces [0, C].
9543 if (match(II.getOperand(0), m_APInt(C)))
9544 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
9545
9546 // usub.sat(x, C) produces [0, UINT_MAX - C].
9547 if (match(II.getOperand(1), m_APInt(C)))
9549 APInt::getMaxValue(Width) - *C + 1);
9550 break;
9551 case Intrinsic::ssub_sat:
9552 if (match(II.getOperand(0), m_APInt(C))) {
9553 if (C->isNegative())
9554 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
9556 *C - APInt::getSignedMinValue(Width) +
9557 1);
9558
9559 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
9561 APInt::getSignedMaxValue(Width) + 1);
9562 } else if (match(II.getOperand(1), m_APInt(C))) {
9563 if (C->isNegative())
9564 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
9566 APInt::getSignedMaxValue(Width) + 1);
9567
9568 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
9570 APInt::getSignedMaxValue(Width) - *C +
9571 1);
9572 }
9573 break;
9574 case Intrinsic::umin:
9575 case Intrinsic::umax:
9576 case Intrinsic::smin:
9577 case Intrinsic::smax:
9578 if (!match(II.getOperand(0), m_APInt(C)) &&
9579 !match(II.getOperand(1), m_APInt(C)))
9580 break;
9581
9582 switch (II.getIntrinsicID()) {
9583 case Intrinsic::umin:
9584 return ConstantRange::getNonEmpty(APInt::getZero(Width), *C + 1);
9585 case Intrinsic::umax:
9587 case Intrinsic::smin:
9589 *C + 1);
9590 case Intrinsic::smax:
9592 APInt::getSignedMaxValue(Width) + 1);
9593 default:
9594 llvm_unreachable("Must be min/max intrinsic");
9595 }
9596 break;
9597 case Intrinsic::abs:
9598 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
9599 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
9600 if (match(II.getOperand(1), m_One()))
9602 APInt::getSignedMaxValue(Width) + 1);
9603
9605 APInt::getSignedMinValue(Width) + 1);
9606 case Intrinsic::vscale:
9607 if (!II.getParent() || !II.getFunction())
9608 break;
9609 return getVScaleRange(II.getFunction(), Width);
9610 case Intrinsic::scmp:
9611 case Intrinsic::ucmp:
9613 APInt(Width, 2));
9614 default:
9615 break;
9616 }
9617
9618 return ConstantRange::getFull(Width);
9619}
9620
9622 const InstrInfoQuery &IIQ) {
9623 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
9624 const Value *LHS = nullptr, *RHS = nullptr;
9626 if (R.Flavor == SPF_UNKNOWN)
9627 return ConstantRange::getFull(BitWidth);
9628
9629 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
9630 // If the negation part of the abs (in RHS) has the NSW flag,
9631 // then the result of abs(X) is [0..SIGNED_MAX],
9632 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
9633 if (match(RHS, m_Neg(m_Specific(LHS))) &&
9634 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
9637
9640 }
9641
9642 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
9643 // The result of -abs(X) is <= 0.
9645 APInt(BitWidth, 1));
9646 }
9647
9648 const APInt *C;
9649 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
9650 return ConstantRange::getFull(BitWidth);
9651
9652 switch (R.Flavor) {
9653 case SPF_UMIN:
9655 case SPF_UMAX:
9657 case SPF_SMIN:
9659 *C + 1);
9660 case SPF_SMAX:
9663 default:
9664 return ConstantRange::getFull(BitWidth);
9665 }
9666}
9667
9669 // The maximum representable value of a half is 65504. For floats the maximum
9670 // value is 3.4e38 which requires roughly 129 bits.
9671 unsigned BitWidth = I->getType()->getScalarSizeInBits();
9672 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy())
9673 return;
9674 if (isa<FPToSIInst>(I) && BitWidth >= 17) {
9675 Lower = APInt(BitWidth, -65504, true);
9676 Upper = APInt(BitWidth, 65505);
9677 }
9678
9679 if (isa<FPToUIInst>(I) && BitWidth >= 16) {
9680 // For a fptoui the lower limit is left as 0.
9681 Upper = APInt(BitWidth, 65505);
9682 }
9683}
9684
9686 bool UseInstrInfo, AssumptionCache *AC,
9687 const Instruction *CtxI,
9688 const DominatorTree *DT,
9689 unsigned Depth) {
9690 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
9691
9693 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
9694
9695 if (auto *C = dyn_cast<Constant>(V))
9696 return C->toConstantRange();
9697
9698 unsigned BitWidth = V->getType()->getScalarSizeInBits();
9699 InstrInfoQuery IIQ(UseInstrInfo);
9700 ConstantRange CR = ConstantRange::getFull(BitWidth);
9701 if (auto *BO = dyn_cast<BinaryOperator>(V)) {
9702 APInt Lower = APInt(BitWidth, 0);
9703 APInt Upper = APInt(BitWidth, 0);
9704 // TODO: Return ConstantRange.
9705 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
9707 } else if (auto *II = dyn_cast<IntrinsicInst>(V))
9708 CR = getRangeForIntrinsic(*II);
9709 else if (auto *SI = dyn_cast<SelectInst>(V)) {
9711 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1);
9713 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1);
9714 CR = CRTrue.unionWith(CRFalse);
9715 CR = CR.intersectWith(getRangeForSelectPattern(*SI, IIQ));
9716 } else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) {
9717 APInt Lower = APInt(BitWidth, 0);
9718 APInt Upper = APInt(BitWidth, 0);
9719 // TODO: Return ConstantRange.
9720 setLimitForFPToI(cast<Instruction>(V), Lower, Upper);
9722 } else if (const auto *A = dyn_cast<Argument>(V))
9723 if (std::optional<ConstantRange> Range = A->getRange())
9724 CR = *Range;
9725
9726 if (auto *I = dyn_cast<Instruction>(V)) {
9727 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
9729
9730 if (const auto *CB = dyn_cast<CallBase>(V))
9731 if (std::optional<ConstantRange> Range = CB->getRange())
9732 CR = CR.intersectWith(*Range);
9733 }
9734
9735 if (CtxI && AC) {
9736 // Try to restrict the range based on information from assumptions.
9737 for (auto &AssumeVH : AC->assumptionsFor(V)) {
9738 if (!AssumeVH)
9739 continue;
9740 CallInst *I = cast<CallInst>(AssumeVH);
9741 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
9742 "Got assumption for the wrong function!");
9743 assert(I->getIntrinsicID() == Intrinsic::assume &&
9744 "must be an assume intrinsic");
9745
9746 if (!isValidAssumeForContext(I, CtxI, DT))
9747 continue;
9748 Value *Arg = I->getArgOperand(0);
9749 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
9750 // Currently we just use information from comparisons.
9751 if (!Cmp || Cmp->getOperand(0) != V)
9752 continue;
9753 // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
9755 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
9756 UseInstrInfo, AC, I, DT, Depth + 1);
9757 CR = CR.intersectWith(
9758 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
9759 }
9760 }
9761
9762 return CR;
9763}
9764
9765static void
9767 function_ref<void(Value *)> InsertAffected) {
9768 assert(V != nullptr);
9769 if (isa<Argument>(V) || isa<GlobalValue>(V)) {
9770 InsertAffected(V);
9771 } else if (auto *I = dyn_cast<Instruction>(V)) {
9772 InsertAffected(V);
9773
9774 // Peek through unary operators to find the source of the condition.
9775 Value *Op;
9777 if (isa<Instruction>(Op) || isa<Argument>(Op))
9778 InsertAffected(Op);
9779 }
9780 }
9781}
9782
9784 Value *Cond, bool IsAssume, function_ref<void(Value *)> InsertAffected) {
9785 auto AddAffected = [&InsertAffected](Value *V) {
9786 addValueAffectedByCondition(V, InsertAffected);
9787 };
9788
9789 auto AddCmpOperands = [&AddAffected, IsAssume](Value *LHS, Value *RHS) {
9790 if (IsAssume) {
9791 AddAffected(LHS);
9792 AddAffected(RHS);
9793 } else if (match(RHS, m_Constant()))
9794 AddAffected(LHS);
9795 };
9796
9797 SmallVector<Value *, 8> Worklist;
9799 Worklist.push_back(Cond);
9800 while (!Worklist.empty()) {
9801 Value *V = Worklist.pop_back_val();
9802 if (!Visited.insert(V).second)
9803 continue;
9804
9805 CmpInst::Predicate Pred;
9806 Value *A, *B, *X;
9807
9808 if (IsAssume) {
9809 AddAffected(V);
9810 if (match(V, m_Not(m_Value(X))))
9811 AddAffected(X);
9812 }
9813
9814 if (match(V, m_LogicalOp(m_Value(A), m_Value(B)))) {
9815 // assume(A && B) is split to -> assume(A); assume(B);
9816 // assume(!(A || B)) is split to -> assume(!A); assume(!B);
9817 // Finally, assume(A || B) / assume(!(A && B)) generally don't provide
9818 // enough information to be worth handling (intersection of information as
9819 // opposed to union).
9820 if (!IsAssume) {
9821 Worklist.push_back(A);
9822 Worklist.push_back(B);
9823 }
9824 } else if (match(V, m_ICmp(Pred, m_Value(A), m_Value(B)))) {
9825 AddCmpOperands(A, B);
9826
9827 if (ICmpInst::isEquality(Pred)) {
9828 if (match(B, m_ConstantInt())) {
9829 Value *Y;
9830 // (X & C) or (X | C) or (X ^ C).
9831 // (X << C) or (X >>_s C) or (X >>_u C).
9834 AddAffected(X);
9835 else if (match(A, m_And(m_Value(X), m_Value(Y))) ||
9836 match(A, m_Or(m_Value(X), m_Value(Y)))) {
9837 AddAffected(X);
9838 AddAffected(Y);
9839 }
9840 }
9841 } else {
9842 if (match(B, m_ConstantInt())) {
9843 // Handle (A + C1) u< C2, which is the canonical form of
9844 // A > C3 && A < C4.
9846 AddAffected(X);
9847
9848 if (ICmpInst::isUnsigned(Pred)) {
9849 Value *Y;
9850 // X & Y u> C -> X >u C && Y >u C
9851 // X | Y u< C -> X u< C && Y u< C
9852 // X nuw+ Y u< C -> X u< C && Y u< C
9853 if (match(A, m_And(m_Value(X), m_Value(Y))) ||
9854 match(A, m_Or(m_Value(X), m_Value(Y))) ||
9855 match(A, m_NUWAdd(m_Value(X), m_Value(Y)))) {
9856 AddAffected(X);
9857 AddAffected(Y);
9858 }
9859 // X nuw- Y u> C -> X u> C
9860 if (match(A, m_NUWSub(m_Value(X), m_Value())))
9861 AddAffected(X);
9862 }
9863 }
9864
9865 // Handle icmp slt/sgt (bitcast X to int), 0/-1, which is supported
9866 // by computeKnownFPClass().
9868 if (Pred == ICmpInst::ICMP_SLT && match(B, m_Zero()))
9869 InsertAffected(X);
9870 else if (Pred == ICmpInst::ICMP_SGT && match(B, m_AllOnes()))
9871 InsertAffected(X);
9872 }
9873 }
9874 } else if (match(Cond, m_FCmp(Pred, m_Value(A), m_Value(B)))) {
9875 AddCmpOperands(A, B);
9876
9877 // fcmp fneg(x), y
9878 // fcmp fabs(x), y
9879 // fcmp fneg(fabs(x)), y
9880 if (match(A, m_FNeg(m_Value(A))))
9881 AddAffected(A);
9882 if (match(A, m_FAbs(m_Value(A))))
9883 AddAffected(A);
9884
9885 } else if (match(V, m_Intrinsic<Intrinsic::is_fpclass>(m_Value(A),
9886 m_Value()))) {
9887 // Handle patterns that computeKnownFPClass() support.
9888 AddAffected(A);
9889 }
9890 }
9891}
amdgpu AMDGPU Register Bank Select
Rewrite undef for PHI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
This file contains the simple types necessary to represent the attributes associated with functions a...
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1309
bool End
Definition: ELF_riscv.cpp:480
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
Hexagon Common GEP
static MaybeAlign getAlign(Value *Ptr)
Definition: IRBuilder.cpp:531
static const unsigned MaxDepth
static bool hasNoUnsignedWrap(BinaryOperator &I)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
R600 Clause Merge
const SmallVectorImpl< MachineOperand > & Cond
static bool mayHaveSideEffects(MachineInstr &MI)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
static SmallVector< VPValue *, 4 > getOperands(ArrayRef< VPValue * > Values, unsigned OperandIndex)
Definition: VPlanSLP.cpp:154
static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
static std::optional< bool > isImpliedCondICmps(const ICmpInst *LHS, CmpInst::Predicate RPred, const Value *R0, const Value *R1, const DataLayout &DL, bool LHSIsTrue)
Return true if LHS implies RHS (expanded to its components as "R0 RPred R1") is true.
static cl::opt< unsigned > DomConditionsMaxUses("dom-conditions-max-uses", cl::Hidden, cl::init(20))
static unsigned computeNumSignBitsVectorConstant(const Value *V, const APInt &DemandedElts, unsigned TyBits)
For vector constants, loop over the elements and find the constant with the minimum number of sign bi...
static bool isKnownNonZeroFromOperator(const Operator *I, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, const Value *RHS)
Return true if "icmp Pred LHS RHS" is always true.
static bool isNonZeroMul(const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW)
static bool isKnownNonNullFromDominatingCondition(const Value *V, const Instruction *CtxI, const DominatorTree *DT)
static const Value * getUnderlyingObjectFromInt(const Value *V)
This is the function that does the work of looking through basic ptrtoint+arithmetic+inttoptr sequenc...
static bool isNonZeroShift(const Operator *I, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, const KnownBits &KnownVal)
static bool rangeMetadataExcludesValue(const MDNode *Ranges, const APInt &Value)
Does the 'Range' metadata (which must be a valid MD_range operand list) ensure that the value it's at...
static bool outputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static bool inputDenormalIsIEEE(const Function &F, const Type *Ty)
Return true if it's possible to assume IEEE treatment of input denormals in F for Val.
static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR)
Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
static bool isNonEqualShl(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and the shift is nuw or nsw.
static void addValueAffectedByCondition(Value *V, function_ref< void(Value *)> InsertAffected)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
static std::tuple< Value *, FPClassTest, FPClassTest > exactClass(Value *V, FPClassTest M)
Return the return value for fcmpImpliesClass for a compare that produces an exact class test.
static bool haveNoCommonBitsSetSpecialCases(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
static std::optional< bool > isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred, const Value *RHSOp0, const Value *RHSOp1, const DataLayout &DL, bool LHSIsTrue, unsigned Depth)
Return true if LHS implies RHS is true.
static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, APInt &Upper, const InstrInfoQuery &IIQ, bool PreferSignedRange)
static Value * lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, Instruction::CastOps *CastOp)
Helps to match a select pattern in case of a type mismatch.
static std::pair< Value *, bool > getDomPredecessorCondition(const Instruction *ContextI)
static bool isKnownNonEqual(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return true if it is known that V1 != V2.
static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, const SimplifyQuery &Q, unsigned Depth)
Return true if the given value is known to be non-zero when defined.
static bool isNonEqualSelect(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
UndefPoisonKind
static bool includesPoison(UndefPoisonKind Kind)
static bool isNonEqualMul(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and the multiplication is nuw o...
static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS)
Match clamp pattern for float types without care about NaNs or signed zeros.
static bool includesUndef(UndefPoisonKind Kind)
static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, unsigned Depth, SimplifyQuery &Q)
Try to detect a recurrence that the value of the induction variable is always a power of two (or zero...
static bool isModifyingBinopOfNonZero(const Value *V1, const Value *V2, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return true if V1 == (binop V2, X), where X is known non-zero.
static ConstantRange getRangeForSelectPattern(const SelectInst &SI, const InstrInfoQuery &IIQ)
static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, FastMathFlags FMF, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
static uint64_t GetStringLengthH(const Value *V, SmallPtrSetImpl< const PHINode * > &PHIs, unsigned CharSize)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(const Value *V, bool AllowLifetime, bool AllowDroppable)
static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, const APInt *&CLow, const APInt *&CHigh)
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, bool NSW, bool NUW, const APInt &DemandedElts, KnownBits &KnownOut, KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q)
static void computeKnownBitsFromOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, unsigned Depth)
static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred, Value *LHS, Value *RHS, KnownBits &Known, const SimplifyQuery &Q)
static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TVal, Value *FVal, unsigned Depth)
Recognize variations of: a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
static void computeKnownFPClassFromCond(const Value *V, Value *Cond, bool CondIsTrue, const Instruction *CxtI, KnownFPClass &KnownFromContext)
static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper)
static bool isSameUnderlyingObjectInLoop(const PHINode *PN, const LoopInfo *LI)
PN defines a loop-variant pointer to an object.
static bool isNonEqualPointersWithRecursiveGEP(const Value *A, const Value *B, const SimplifyQuery &Q)
static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, const APInt *&CLow, const APInt *&CHigh)
static std::optional< bool > isImpliedCondCommonOperandWithCR(CmpInst::Predicate LPred, const ConstantRange &LCR, CmpInst::Predicate RPred, const ConstantRange &RCR)
Return true if "icmp LPred X, LCR" implies "icmp RPred X, RCR" is true.
static void computeKnownFPClassForFPTrunc(const Operator *Op, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, unsigned Depth, const SimplifyQuery &Q)
static bool handleGuaranteedWellDefinedOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be undef or poison.
static void computeKnownBits(const Value *V, const APInt &DemandedElts, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
Determine which bits of V are known to be either zero or one and return them in the Known bit set.
static KnownFPClass computeKnownFPClassFromContext(const Value *V, const SimplifyQuery &Q)
static Value * getNotValue(Value *V)
If the input value is the result of a 'not' op, constant integer, or vector splat of a constant integ...
static void computeKnownBitsFromCond(const Value *V, Value *Cond, KnownBits &Known, unsigned Depth, const SimplifyQuery &SQ, bool Invert)
static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, KnownBits &Known, const SimplifyQuery &SQ, bool Invert)
static ConstantRange getRangeForIntrinsic(const IntrinsicInst &II)
static KnownBits computeKnownBitsForHorizontalOperation(const Operator *I, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, const function_ref< KnownBits(const KnownBits &, const KnownBits &)> KnownBitsFunc)
static bool matchOpWithOpEqZero(Value *Op0, Value *Op1)
static bool isNonZeroRecurrence(const PHINode *PN)
Try to detect a recurrence that monotonically increases/decreases from a non-zero starting value.
static SelectPatternResult matchClamp(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal)
Recognize variations of: CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
static bool shiftAmountKnownInRange(const Value *ShiftAmount)
Shifts return poison if shiftwidth is larger than the bitwidth.
static bool isEphemeralValueOf(const Instruction *I, const Value *E)
static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, Value *CmpLHS, Value *CmpRHS, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, unsigned Depth)
Match non-obvious integer minimum and maximum sequences.
static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, const SimplifyQuery &Q)
Test whether a GEP's result is known to be non-null.
static bool handleGuaranteedNonPoisonOps(const Instruction *I, const CallableT &Handle)
Enumerates all operands of I that are guaranteed to not be poison.
static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y)
static std::optional< std::pair< Value *, Value * > > getInvertibleOperands(const Operator *Op1, const Operator *Op2)
If the pair of operators are the same invertible function, return the the operands of the function co...
static void computeKnownBitsFromShiftOperator(const Operator *I, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q, function_ref< KnownBits(const KnownBits &, const KnownBits &, bool)> KF)
Compute known bits from a shift operator, including those with a non-constant shift amount.
static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS)
static bool inputDenormalIsIEEEOrPosZero(const Function &F, const Type *Ty)
static KnownBits getKnownBitsFromAndXorOr(const Operator *I, const APInt &DemandedElts, const KnownBits &KnownLHS, const KnownBits &KnownRHS, unsigned Depth, const SimplifyQuery &Q)
static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q)
static std::optional< bool > isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, const Value *ARHS, const Value *BLHS, const Value *BRHS)
Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred ALHS ARHS" is true.
static unsigned ComputeNumSignBitsImpl(const Value *V, const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q)
Return the number of times the sign bit of the register is replicated into the other bits.
static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth, const SimplifyQuery &Q, unsigned BitWidth, Value *X, Value *Y, bool NSW, bool NUW)
static const Instruction * safeCxtI(const Value *V, const Instruction *CxtI)
static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, const SimplifyQuery &Q)
Return true if the given value is known to have exactly one bit set when defined.
static bool isKnownNonNaN(const Value *V, FastMathFlags FMF)
static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, const APInt &DemandedElts, KnownBits &Known, KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q)
static std::optional< bool > isImpliedCondMatchingOperands(CmpInst::Predicate LPred, CmpInst::Predicate RPred)
Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
static Value * BuildSubAggregate(Value *From, Value *To, Type *IndexedType, SmallVectorImpl< unsigned > &Idxs, unsigned IdxSkip, BasicBlock::iterator InsertBefore)
void computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, KnownFPClass &Known, unsigned Depth, const SimplifyQuery &Q)
Value * RHS
Value * LHS
bool isNegative() const
Definition: APFloat.h:1351
bool isFinite() const
Definition: APFloat.h:1356
APInt bitcastToAPInt() const
Definition: APFloat.h:1262
static APFloat getLargest(const fltSemantics &Sem, bool Negative=false)
Returns the largest finite number in the given semantics.
Definition: APFloat.h:1046
static APFloat getInf(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Infinity.
Definition: APFloat.h:1006
FPClassTest classify() const
Return the FPClassTest which will return true for the value.
Definition: APFloat.cpp:5315
bool isSmallestNormalized() const
Definition: APFloat.h:1371
Class for arbitrary precision integers.
Definition: APInt.h:77
APInt udiv(const APInt &RHS) const
Unsigned division operation.
Definition: APInt.cpp:1543
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:211
void clearBit(unsigned BitPosition)
Set a given bit to 0.
Definition: APInt.h:1384
bool isMinSignedValue() const
Determine if this is the smallest signed value.
Definition: APInt.h:400
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1497
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
Definition: APInt.h:1369
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
Definition: APInt.h:1363
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition: APInt.cpp:1002
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:183
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1307
unsigned ceilLogBase2() const
Definition: APInt.h:1712
bool sgt(const APInt &RHS) const
Signed greater than comparison.
Definition: APInt.h:1178
bool isAllOnes() const
Determine if all bits are set. This is true for zero-width values.
Definition: APInt.h:348
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
Definition: APInt.h:1159
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:357
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition: APInt.cpp:1636
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1445
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition: APInt.h:1088
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
Definition: APInt.h:186
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
Definition: APInt.h:193
bool isNegative() const
Determine sign of this APInt.
Definition: APInt.h:306
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition: APInt.h:1226
APInt sdiv(const APInt &RHS) const
Signed division function for APInt.
Definition: APInt.cpp:1614
void clearAllBits()
Set every bit to 0.
Definition: APInt.h:1374
APInt reverseBits() const
Definition: APInt.cpp:737
bool sle(const APInt &RHS) const
Signed less or equal comparison.
Definition: APInt.h:1143
unsigned getNumSignBits() const
Computes the number of leading bits of this APInt that are equal to its sign bit.
Definition: APInt.h:1584
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition: APInt.h:196
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition: APInt.h:333
unsigned logBase2() const
Definition: APInt.h:1709
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
Definition: APInt.h:804
void setAllBits()
Set every bit to 1.
Definition: APInt.h:1296
bool getBoolValue() const
Convert APInt to a boolean value.
Definition: APInt.h:448
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition: APInt.h:382
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition: APInt.h:1127
APInt shl(unsigned shiftAmt) const
Left-shift function.
Definition: APInt.h:850
APInt byteSwap() const
Definition: APInt.cpp:715
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition: APInt.h:1107
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition: APInt.h:273
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:177
void setLowBits(unsigned loBits)
Set the bottom loBits bits.
Definition: APInt.h:1366
bool sge(const APInt &RHS) const
Signed greater or equal comparison.
Definition: APInt.h:1214
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition: APInt.h:263
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:216
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
Definition: APInt.h:835
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
Definition: APInt.h:828
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1198
void clearSignBit()
Set the sign bit to 0.
Definition: APInt.h:1408
an instruction to allocate memory on the stack
Definition: Instructions.h:61
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:195
Class to represent array types.
Definition: DerivedTypes.h:371
Type * getElementType() const
Definition: DerivedTypes.h:384
This represents the llvm.assume intrinsic.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
Definition: Attributes.cpp:466
unsigned getVScaleRangeMin() const
Returns the minimum value for the vscale_range attribute.
Definition: Attributes.cpp:460
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:203
bool isSingleEdge() const
Check if this is the only edge between Start and End.
Definition: Dominators.cpp:51
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator end()
Definition: BasicBlock.h:461
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:448
InstListType::const_iterator const_iterator
Definition: BasicBlock.h:178
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:367
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
Definition: BasicBlock.cpp:459
const BasicBlock * getSingleSuccessor() const
Return the successor of this block if it has a single successor.
Definition: BasicBlock.cpp:489
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:219
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:177
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:239
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Definition: InstrTypes.h:442
Conditional or Unconditional Branch instruction.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1236
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1465
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory(unsigned OpNo) const
Definition: InstrTypes.h:1816
Value * getCalledOperand() const
Definition: InstrTypes.h:1458
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1410
unsigned arg_size() const
Definition: InstrTypes.h:1408
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:530
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:747
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:760
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:774
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:763
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:772
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:761
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:762
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:771
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:765
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:768
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:769
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:764
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:766
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:773
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:770
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:759
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:767
bool isSigned() const
Definition: InstrTypes.h:1007
static bool isEquality(Predicate pred)
Determine if this is an equals/not equals predicate.
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:909
bool isTrueWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:1056
bool isFPPredicate() const
Definition: InstrTypes.h:864
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:871
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
bool isIntPredicate() const
Definition: InstrTypes.h:865
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is false when two compares have matching operands.
bool isUnsigned() const
Definition: InstrTypes.h:1013
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:693
ConstantDataSequential - A vector or array constant whose element type is a simple 1/2/4/8-byte integ...
Definition: Constants.h:584
StringRef getAsString() const
If this array is isString(), then this method returns the array as a StringRef.
Definition: Constants.h:659
uint64_t getElementAsInteger(unsigned i) const
If this is a sequential container of integers (of any size), return the specified element in the low ...
Definition: Constants.cpp:3074
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
Definition: Constants.h:767
static Constant * getBitCast(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2295
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:2253
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:269
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static ConstantInt * getTrue(LLVMContext &Context)
Definition: Constants.cpp:850
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:206
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:155
This class represents a range of values.
Definition: ConstantRange.h:47
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const
Return whether unsigned sub of the two ranges always/never overflows.
bool isAllNegative() const
Return true if all values in this range are negative.
OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const
Return whether unsigned add of the two ranges always/never overflows.
KnownBits toKnownBits() const
Return known bits for values in this range.
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other? NOTE: false does not mean that inverse pr...
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const
Return whether unsigned mul of the two ranges always/never overflows.
bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
OverflowResult signedAddMayOverflow(const ConstantRange &Other) const
Return whether signed add of the two ranges always/never overflows.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
OverflowResult
Represents whether an operation on the given constant range is known to always or never overflow.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
Definition: ConstantRange.h:84
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
OverflowResult signedSubMayOverflow(const ConstantRange &Other) const
Return whether signed sub of the two ranges always/never overflows.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
This is an important base class in LLVM.
Definition: Constant.h:42
Constant * getSplatValue(bool AllowPoison=false) const
If all elements of the vector constant have the same value, return that value.
Definition: Constants.cpp:1686
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
Definition: Constants.cpp:432
bool isZeroValue() const
Return true if the value is negative zero or null value.
Definition: Constants.cpp:76
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:195
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:695
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
Definition: DataLayout.cpp:749
unsigned getPointerTypeSizeInBits(Type *) const
Layout pointer size, in bits, based on the type.
Definition: DataLayout.cpp:738
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition: DataLayout.h:621
ArrayRef< BranchInst * > conditionsFor(const Value *V) const
Access the list of branches which affect this value.
DomTreeNodeBase * getIDom() const
NodeT * getBlock() const
DomTreeNodeBase< NodeT > * getNode(const NodeT *BB) const
getNode - return the (Post)DominatorTree node for the specified basic block.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
Utility class for floating point operations which can have information about relaxed accuracy require...
Definition: Operator.h:202
Convenience struct for specifying and reasoning about fast-math flags.
Definition: FMF.h:20
bool noSignedZeros() const
Definition: FMF.h:68
void setNoSignedZeros(bool B=true)
Definition: FMF.h:85
bool noNaNs() const
Definition: FMF.h:66
const BasicBlock & getEntryBlock() const
Definition: Function.h:807
DenormalMode getDenormalMode(const fltSemantics &FPType) const
Returns the denormal handling type for the default rounding mode of the function.
Definition: Function.cpp:810
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:915
const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Definition: Globals.cpp:124
Type * getValueType() const
Definition: GlobalValue.h:296
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool isConstant() const
If the value is a global constant, its value is immutable throughout the runtime execution of the pro...
bool hasDefinitiveInitializer() const
hasDefinitiveInitializer - Whether the global variable has an initializer, and any other instances of...
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
Return true if this predicate is either EQ or NE.
This instruction inserts a struct field of array element value into an aggregate value.
Value * getAggregateOperand()
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
bool isBinaryOp() const
Definition: Instruction.h:279
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:70
bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
bool isUnaryOp() const
Definition: Instruction.h:278
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:174
Value * getPointerOperand()
Definition: Instructions.h:253
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:209
bool isLoopHeader(const BlockT *BB) const
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:39
Metadata node.
Definition: Metadata.h:1069
This is a utility class that provides an abstraction for the common functionality between Instruction...
Definition: Operator.h:32
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Definition: Operator.h:42
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
iterator_range< const_block_iterator > blocks() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1852
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed.
Definition: Operator.h:152
bool isExact() const
Test whether this division is known to be exact, with zero remainder.
Definition: Operator.h:171
This class represents the LLVM 'select' instruction.
const Value * getFalseValue() const
const Value * getCondition() const
const Value * getTrueValue() const
This instruction constructs a fixed permutation of two input vectors.
VectorType * getType() const
Overload to return most specific vector type.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
size_type size() const
Definition: SmallPtrSet.h:95
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:346
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:435
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:367
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:502
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void reserve(size_type N)
Definition: SmallVector.h:676
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:571
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:600
Class to represent struct types.
Definition: DerivedTypes.h:216
unsigned getNumElements() const
Random access to the elements.
Definition: DerivedTypes.h:341
Type * getElementType(unsigned N) const
Definition: DerivedTypes.h:342
Provides information about what library functions are available for the current target.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getIntegerBitWidth() const
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:261
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:230
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:251
uint64_t getArrayNumElements() const
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:298
static IntegerType * getInt16Ty(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:258
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
Definition: Type.h:239
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:224
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:343
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1833
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
User * getUser() const
Returns the User that contains this Use.
Definition: Use.h:72
unsigned getOperandNo() const
Return the operand # of this use in its User.
Definition: Use.cpp:31
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:736
iterator_range< user_iterator > users()
Definition: Value.h:421
const KnownBits & getKnownBits(const SimplifyQuery &Q) const
Definition: WithCache.h:58
PointerType getValue() const
Definition: WithCache.h:56
Represents an op.with.overflow intrinsic.
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:202
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
A range adaptor for a pair of iterators.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define UINT64_MAX
Definition: DataTypes.h:77
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
Definition: PatternMatch.h:524
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
MaxMin_match< FCmpInst, LHS, RHS, ufmin_pred_ty > m_UnordFMin(const LHS &L, const RHS &R)
Match an 'unordered' floating point minimum function.
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
Definition: PatternMatch.h:664
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
Definition: PatternMatch.h:619
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
auto m_LogicalOp()
Matches either L && R or L || R where L and R are arbitrary values.
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
Definition: PatternMatch.h:165
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
cst_pred_ty< is_power2_or_zero > m_Power2OrZero()
Match an integer or vector of 0 or power-of-2 values.
Definition: PatternMatch.h:652
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoSignedWrap > m_NSWSub(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
Definition: PatternMatch.h:764
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:875
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap, true > m_c_NUWAdd(const LHS &L, const RHS &R)
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
Definition: PatternMatch.h:560
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
Definition: PatternMatch.h:168
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
Definition: PatternMatch.h:592
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
ExtractValue_match< Ind, Val_t > m_ExtractValue(const Val_t &V)
Match a single index ExtractValue instruction.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, FCmpInst, FCmpInst::Predicate > m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R)
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
Definition: PatternMatch.h:822
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:893
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
Definition: PatternMatch.h:599
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
Definition: PatternMatch.h:67
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > m_c_SMin(const LHS &L, const RHS &R)
Matches an SMin with LHS and RHS in either order.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
Definition: PatternMatch.h:854
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true > m_c_UMax(const LHS &L, const RHS &R)
Matches a UMax with LHS and RHS in either order.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate, true > m_c_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > m_c_UMin(const LHS &L, const RHS &R)
Matches a UMin with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
apfloat_match m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
Definition: PatternMatch.h:322
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
SpecificCmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_SpecificICmp(ICmpInst::Predicate MatchPred, const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true > m_c_SMax(const LHS &L, const RHS &R)
Matches an SMax with LHS and RHS in either order.
MaxMin_match< FCmpInst, LHS, RHS, ufmax_pred_ty > m_UnordFMax(const LHS &L, const RHS &R)
Match an 'unordered' floating point maximum function.
VScaleVal_match m_VScale()
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
MaxMin_match< FCmpInst, LHS, RHS, ofmax_pred_ty > m_OrdFMax(const LHS &L, const RHS &R)
Match an 'ordered' floating point maximum function.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap >, DisjointOr_match< LHS, RHS > > m_NSWAddLike(const LHS &L, const RHS &R)
Match either "add nsw" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
BinOpPred_match< LHS, RHS, is_irem_op > m_IRem(const LHS &L, const RHS &R)
Matches integer remainder operations.
apfloat_match m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
Definition: PatternMatch.h:316
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
MaxMin_match< FCmpInst, LHS, RHS, ofmin_pred_ty > m_OrdFMin(const LHS &L, const RHS &R)
Match an 'ordered' floating point minimum function.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
Definition: PatternMatch.h:189
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
cst_pred_ty< is_nonpositive > m_NonPositive()
Match an integer or vector of non-positive values.
Definition: PatternMatch.h:582
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoUnsignedWrap >, DisjointOr_match< LHS, RHS > > m_NUWAddLike(const LHS &L, const RHS &R)
Match either "add nuw" or "or disjoint".
BinOpPred_match< LHS, RHS, is_bitwiselogic_op > m_BitwiseLogic(const LHS &L, const RHS &R)
Matches bitwise logic operations.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
static unsigned decodeVSEW(unsigned VSEW)
unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul)
static constexpr unsigned RVVBitsPerBlock
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
bool haveNoCommonBitsSet(const WithCache< const Value * > &LHSCache, const WithCache< const Value * > &RHSCache, const SimplifyQuery &SQ)
Return true if LHS and RHS have no common bits set.
bool mustExecuteUBIfPoisonOnPathTo(Instruction *Root, Instruction *OnPathTo, DominatorTree *DT)
Return true if undefined behavior would provable be executed on the path to OnPathTo if Root produced...
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
OverflowResult
@ NeverOverflows
Never overflows.
@ AlwaysOverflowsHigh
Always overflows in the direction of signed/unsigned max value.
@ AlwaysOverflowsLow
Always overflows in the direction of signed/unsigned min value.
@ MayOverflow
May or may not overflow.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &DL, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
const Value * getArgumentAliasingToReturnedPointer(const CallBase *Call, bool MustPreserveNullness)
This function returns call pointer argument that is considered the same by aliasing rules.
bool isAssumeLikeIntrinsic(const Instruction *I)
Return true if it is an intrinsic that cannot be speculated but also cannot trap.
AllocaInst * findAllocaForValue(Value *V, bool OffsetZero=false)
Returns unique alloca where the value comes from, or nullptr.
APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
void getGuaranteedNonPoisonOps(const Instruction *I, SmallVectorImpl< const Value * > &Ops)
Insert operands of I into Ops such that I will trigger undefined behavior if I is executed and that o...
bool isOnlyUsedInZeroComparison(const Instruction *CxtI)
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
bool getConstantStringInfo(const Value *V, StringRef &Str, bool TrimAtNul=true)
This function computes the length of a null-terminated C string pointed to by V.
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:201
bool onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V)
Return true if the only users of this pointer are lifetime markers or droppable instructions.
Constant * ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset)
bool getUnderlyingObjectsForCodeGen(const Value *V, SmallVectorImpl< Value * > &Objects)
This is a wrapper around getUnderlyingObjects and adds support for basic ptrtoint+arithmetic+inttoptr...
std::pair< Intrinsic::ID, bool > canConvertToMinOrMaxIntrinsic(ArrayRef< Value * > VL)
Check if the values in VL are select instructions that can be converted to a min or max (vector) intr...
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice, unsigned ElementSize, uint64_t Offset=0)
Returns true if the value V is a pointer into a ConstantDataArray.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition: bit.h:317
bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, const Loop *L)
Return true if this function can prove that the instruction I is executed for every iteration of the ...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2098
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to have exactly one bit set when defined.
bool mustSuppressSpeculation(const LoadInst &LI)
Return true if speculation of the given load must be suppressed to avoid ordering or interfering with...
Definition: Loads.cpp:357
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
Definition: MathExtras.h:296
gep_type_iterator gep_type_end(const User *GEP)
CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
void computeKnownBitsFromContext(const Value *V, KnownBits &Known, unsigned Depth, const SimplifyQuery &Q)
Merge bits known from context-dependent facts into Known.
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:346
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition: bit.h:215
bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
RetainedKnowledge getKnowledgeValidInContext(const Value *V, ArrayRef< Attribute::AttrKind > AttrKinds, const Instruction *CtxI, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
Return a valid Knowledge associated to the Value V if its Attribute kind is in AttrKinds and the know...
RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
bool isSafeToSpeculativelyExecuteWithOpcode(unsigned Opcode, const Instruction *Inst, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
This returns the same result as isSafeToSpeculativelyExecute if Opcode is the actual opcode of Inst.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
KnownBits analyzeKnownBitsFromAndXorOr(const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS, unsigned Depth, const SimplifyQuery &SQ)
Using KnownBits LHS/RHS produce the known bits for logic op (and/xor/or).
OverflowResult computeOverflowForUnsignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ, bool IsNSW=false)
bool getShuffleDemandedElts(int SrcWidth, ArrayRef< int > Mask, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS, bool AllowUndefElts=false)
Transform a shuffle mask's output demanded element mask into demanded element masks for the 2 operand...
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:340
bool isGuard(const User *U)
Returns true iff U has semantics of a guard expressed in a form of call of llvm.experimental....
Definition: GuardUtils.cpp:18
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
Definition: bit.h:281
SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:44
void getGuaranteedWellDefinedOps(const Instruction *I, SmallVectorImpl< const Value * > &Ops)
Insert operands of I into Ops such that I will trigger undefined behavior if I is executed and that o...
OverflowResult computeOverflowForSignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
std::tuple< Value *, FPClassTest, FPClassTest > fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Compute the possible floating-point classes that LHS could be based on fcmp \Pred LHS,...
SelectPatternFlavor
Specific patterns of select instructions we can match.
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_FMAXNUM
Floating point minnum.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
@ SPF_UNKNOWN
@ SPF_FMINNUM
Unsigned maximum.
bool isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(const CallBase *Call, bool MustPreserveNullness)
{launder,strip}.invariant.group returns pointer that aliases its argument, and it only captures point...
void adjustKnownBitsForSelectArm(KnownBits &Known, Value *Cond, Value *Arm, bool Invert, unsigned Depth, const SimplifyQuery &Q)
Adjust Known for the given select Arm to include information from the select Cond.
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
void getHorizDemandedEltsForFirstOperand(unsigned VectorBitWidth, const APInt &DemandedElts, APInt &DemandedLHS, APInt &DemandedRHS)
Compute the demanded elements mask of horizontal binary operations.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool programUndefinedIfPoison(const Instruction *Inst)
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2132
bool programUndefinedIfUndefOrPoison(const Instruction *Inst)
Return true if this function can prove that if Inst is executed and yields a poison value or undef bi...
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr, bool UseVariableInfo=true)
Return true if the instruction does not have any effects besides calculating the result and does not ...
FPClassTest inverse_fabs(FPClassTest Mask)
Return the test mask which returns true after fabs is applied to the value.
uint64_t GetStringLength(const Value *V, unsigned CharSize=8)
If we can compute the length of the string pointed to by the specified pointer, return 'len+1'.
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isKnownInversion(const Value *X, const Value *Y)
Return true iff:
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
bool onlyUsedByLifetimeMarkers(const Value *V)
Return true if the only users of this pointer are lifetime markers.
Intrinsic::ID getIntrinsicForCallSite(const CallBase &CB, const TargetLibraryInfo *TLI)
Map a call instruction to an intrinsic ID.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
const Value * getUnderlyingObjectAggressive(const Value *V)
Like getUnderlyingObject(), but will try harder to find a single underlying object.
OverflowResult computeOverflowForSignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
bool isKnownNegative(const Value *V, const SimplifyQuery &DL, unsigned Depth=0)
Returns true if the given value is known be negative (i.e.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given values are known to be non-equal when defined.
@ Add
Sum of integers.
ConstantRange computeConstantRangeIncludingKnownBits(const WithCache< const Value * > &V, bool ForSigned, const SimplifyQuery &SQ)
Combine constant ranges from computeConstantRange() and computeKnownBits().
SelectPatternNaNBehavior
Behavior when a floating point min/max is given one NaN and one non-NaN as input.
@ SPNB_RETURNS_NAN
NaN behavior not applicable.
@ SPNB_RETURNS_OTHER
Given one NaN input, returns the NaN.
@ SPNB_RETURNS_ANY
Given one NaN input, returns the non-NaN.
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
OverflowResult computeOverflowForUnsignedSub(const Value *LHS, const Value *RHS, const SimplifyQuery &SQ)
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
std::pair< Value *, FPClassTest > fcmpToClassTest(CmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
Value * isBytewiseValue(Value *V, const DataLayout &DL)
If the specified value can be set by repeating the same byte in memory, return the i8 value that it i...
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1886
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
OverflowResult computeOverflowForUnsignedAdd(const WithCache< const Value * > &LHS, const WithCache< const Value * > &RHS, const SimplifyQuery &SQ)
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
bool isGEPBasedOnPointerToString(const GEPOperator *GEP, unsigned CharSize=8)
Returns true if the GEP is based on a pointer to a string (array of.
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, unsigned Depth, const SimplifyQuery &SQ)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, KnownBits &Known)
Compute known bits from the range metadata.
Value * FindInsertedValue(Value *V, ArrayRef< unsigned > idx_range, std::optional< BasicBlock::iterator > InsertBefore=std::nullopt)
Given an aggregate and an sequence of indices, see if the scalar value indexed is already around as a...
bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
bool isKnownPositive(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the given value is known be positive (i.e.
Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
unsigned ComputeMaxSignificantBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Get the upper bound on bit size for this Value Op as a signed integer.
bool mayHaveNonDefUseDependency(const Instruction &I)
Returns true if the result or effects of the given instructions I depend values not reachable through...
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
void findValuesAffectedByCondition(Value *Cond, bool IsAssume, function_ref< void(Value *)> InsertAffected)
Call InsertAffected on all Values whose known bits / value may be affected by the condition Cond.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
static unsigned int semanticsPrecision(const fltSemantics &)
Definition: APFloat.cpp:329
static bool isRepresentableAsNormalIn(const fltSemantics &Src, const fltSemantics &Dst)
Definition: APFloat.cpp:354
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
SmallPtrSet< Value *, 4 > AffectedValues
Definition: SimplifyQuery.h:65
Represents offset+length into a ConstantDataArray.
uint64_t Length
Length of the slice.
uint64_t Offset
Slice starts at this Offset.
const ConstantDataArray * Array
ConstantDataArray pointer.
Represent subnormal handling kind for floating point instruction inputs and outputs.
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
constexpr bool outputsAreZero() const
Return true if output denormals should be flushed to 0.
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ PositiveZero
Denormals are flushed to positive zero.
@ Dynamic
Denormals have unknown treatment.
@ IEEE
IEEE-754 denormal numbers preserved.
static constexpr DenormalMode getPositiveZero()
constexpr bool inputsAreZero() const
Return true if input denormals must be implicitly treated as 0.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
static constexpr DenormalMode getIEEE()
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
Definition: SimplifyQuery.h:25
bool isExact(const BinaryOperator *Op) const
Definition: SimplifyQuery.h:48
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
Definition: SimplifyQuery.h:30
bool hasNoSignedZeros(const InstT *Op) const
Definition: SimplifyQuery.h:54
bool hasNoSignedWrap(const InstT *Op) const
Definition: SimplifyQuery.h:42
bool hasNoUnsignedWrap(const InstT *Op) const
Definition: SimplifyQuery.h:36
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
Definition: KnownBits.h:290
static KnownBits sadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.sadd.sat(LHS, RHS)
Definition: KnownBits.cpp:753
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
Definition: KnownBits.h:175
static KnownBits mulhu(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from zero-extended multiply-hi.
Definition: KnownBits.cpp:902
unsigned countMinSignBits() const
Returns the number of times the sign bit is replicated into the other bits.
Definition: KnownBits.h:244
static KnownBits smax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smax(LHS, RHS).
Definition: KnownBits.cpp:202
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition: KnownBits.h:97
KnownBits blsi() const
Compute known bits for X & -X, which has only the lowest bit set of X set.
Definition: KnownBits.cpp:1109
void makeNonNegative()
Make this value non-negative.
Definition: KnownBits.h:113
static KnownBits usub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.usub.sat(LHS, RHS)
Definition: KnownBits.cpp:762
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
Definition: KnownBits.h:240
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition: KnownBits.h:231
static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
Definition: KnownBits.cpp:428
static KnownBits ssub_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.ssub.sat(LHS, RHS)
Definition: KnownBits.cpp:756
static KnownBits urem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for urem(LHS, RHS).
Definition: KnownBits.cpp:1042
bool isUnknown() const
Returns true if we don't know any bits.
Definition: KnownBits.h:62
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition: KnownBits.h:263
KnownBits blsmsk() const
Compute known bits for X ^ (X - 1), which has all bits up to and including the lowest set bit of X se...
Definition: KnownBits.cpp:1120
void makeNegative()
Make this value negative.
Definition: KnownBits.h:108
KnownBits trunc(unsigned BitWidth) const
Return known bits for a truncation of the value we're tracking.
Definition: KnownBits.h:150
bool hasConflict() const
Returns true if there is conflicting information.
Definition: KnownBits.h:47
unsigned countMaxPopulation() const
Returns the maximum number of bits that could be one.
Definition: KnownBits.h:278
void setAllZero()
Make all bits known to be zero and discard any previous information.
Definition: KnownBits.h:82
unsigned getBitWidth() const
Get the bit width of this value.
Definition: KnownBits.h:40
static KnownBits umax(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umax(LHS, RHS).
Definition: KnownBits.cpp:178
bool isConstant() const
Returns true if we know the value of all bits.
Definition: KnownBits.h:50
void resetAll()
Resets the known state of all bits.
Definition: KnownBits.h:70
KnownBits unionWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for either this or RHS or both.
Definition: KnownBits.h:310
static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
Definition: KnownBits.cpp:370
bool isNonZero() const
Returns true if this value is known to be non-zero.
Definition: KnownBits.h:100
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
Definition: KnownBits.h:300
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
Definition: KnownBits.h:169
unsigned countMinTrailingOnes() const
Returns the minimum number of trailing one bits.
Definition: KnownBits.h:234
static KnownBits add(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from addition of LHS and RHS.
Definition: KnownBits.h:333
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
Definition: KnownBits.h:185
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition: KnownBits.h:237
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition: KnownBits.h:134
static KnownBits smin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for smin(LHS, RHS).
Definition: KnownBits.cpp:215
static KnownBits mulhs(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits from sign-extended multiply-hi.
Definition: KnownBits.cpp:894
static KnownBits srem(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for srem(LHS, RHS).
Definition: KnownBits.cpp:1059
static KnownBits udiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for udiv(LHS, RHS).
Definition: KnownBits.cpp:1002
static KnownBits computeForAddSub(bool Add, bool NSW, bool NUW, const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from adding LHS and RHS.
Definition: KnownBits.cpp:51
static KnownBits sdiv(const KnownBits &LHS, const KnownBits &RHS, bool Exact=false)
Compute known bits for sdiv(LHS, RHS).
Definition: KnownBits.cpp:946
static bool haveNoCommonBitsSet(const KnownBits &LHS, const KnownBits &RHS)
Return true if LHS and RHS have no common bits set.
Definition: KnownBits.h:315
bool isNegative() const
Returns true if this value is known to be negative.
Definition: KnownBits.h:94
static KnownBits sub(const KnownBits &LHS, const KnownBits &RHS, bool NSW=false, bool NUW=false)
Compute knownbits resulting from subtraction of LHS and RHS.
Definition: KnownBits.h:339
unsigned countMaxLeadingZeros() const
Returns the maximum number of leading zero bits possible.
Definition: KnownBits.h:269
void setAllOnes()
Make all bits known to be one and discard any previous information.
Definition: KnownBits.h:88
void insertBits(const KnownBits &SubBits, unsigned BitPosition)
Insert the bits from a smaller known bits starting at bitPosition.
Definition: KnownBits.h:208
static KnownBits uadd_sat(const KnownBits &LHS, const KnownBits &RHS)
Compute knownbits resulting from llvm.uadd.sat(LHS, RHS)
Definition: KnownBits.cpp:759
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS, bool NoUndefSelfMultiply=false)
Compute known bits resulting from multiplying LHS and RHS.
Definition: KnownBits.cpp:797
KnownBits anyext(unsigned BitWidth) const
Return known bits for an "any" extension of the value we're tracking, where we don't know anything ab...
Definition: KnownBits.h:156
KnownBits abs(bool IntMinIsPoison=false) const
Compute known bits for the absolute value.
Definition: KnownBits.cpp:550
static std::optional< bool > uge(const KnownBits &LHS, const KnownBits &RHS)
Determine if these known bits always give the same ICMP_UGE result.
Definition: KnownBits.cpp:512
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
Definition: KnownBits.cpp:285
static KnownBits umin(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits for umin(LHS, RHS).
Definition: KnownBits.cpp:196
KnownBits sextOrTrunc(unsigned BitWidth) const
Return known bits for a sign extension or truncation of the value we're tracking.
Definition: KnownBits.h:195
const APInt & getConstant() const
Returns the value when all bits have a known value.
Definition: KnownBits.h:56
FPClassTest KnownFPClasses
Floating-point classes the value could be one of.
bool isKnownNeverInfinity() const
Return true if it's known this can never be an infinity.
bool cannotBeOrderedGreaterThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never greater tha...
static constexpr FPClassTest OrderedGreaterThanZeroMask
static constexpr FPClassTest OrderedLessThanZeroMask
void knownNot(FPClassTest RuleOut)
bool isKnownNeverZero() const
Return true if it's known this can never be a zero.
void copysign(const KnownFPClass &Sign)
bool isKnownNeverSubnormal() const
Return true if it's known this can never be a subnormal.
bool isKnownNeverLogicalNegZero(const Function &F, Type *Ty) const
Return true if it's know this can never be interpreted as a negative zero.
bool isKnownNeverLogicalPosZero(const Function &F, Type *Ty) const
Return true if it's know this can never be interpreted as a positive zero.
void propagateCanonicalizingSrc(const KnownFPClass &Src, const Function &F, Type *Ty)
Report known classes if Src is evaluated through a potentially canonicalizing operation.
void propagateDenormal(const KnownFPClass &Src, const Function &F, Type *Ty)
Propagate knowledge from a source value that could be a denormal or zero.
bool isUnknown() const
bool isKnownNeverNegInfinity() const
Return true if it's known this can never be -infinity.
bool isKnownNeverNegSubnormal() const
Return true if it's known this can never be a negative subnormal.
bool isKnownNeverPosZero() const
Return true if it's known this can never be a literal positive zero.
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool isKnownNeverNegZero() const
Return true if it's known this can never be a negative zero.
bool isKnownNeverLogicalZero(const Function &F, Type *Ty) const
Return true if it's know this can never be interpreted as a zero.
void propagateNaN(const KnownFPClass &Src, bool PreserveSign=false)
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
void signBitMustBeOne()
Assume the sign bit is one.
void signBitMustBeZero()
Assume the sign bit is zero.
bool isKnownNeverPosInfinity() const
Return true if it's known this can never be +infinity.
bool isKnownNeverPosSubnormal() const
Return true if it's known this can never be a positive subnormal.
Represent one information held inside an operand bundle of an llvm.assume.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
const DataLayout & DL
Definition: SimplifyQuery.h:71
SimplifyQuery getWithoutCondContext() const
const Instruction * CxtI
Definition: SimplifyQuery.h:75
const DominatorTree * DT
Definition: SimplifyQuery.h:73
SimplifyQuery getWithInstruction(const Instruction *I) const
AssumptionCache * AC
Definition: SimplifyQuery.h:74
const DomConditionCache * DC
Definition: SimplifyQuery.h:76
const InstrInfoQuery IIQ
Definition: SimplifyQuery.h:82
const CondContext * CC
Definition: SimplifyQuery.h:77