LLVM 19.0.0git
LazyValueInfo.cpp
Go to the documentation of this file.
1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for lazy computation of value constraint
10// information.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/STLExtras.h"
24#include "llvm/IR/CFG.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/LLVMContext.h"
34#include "llvm/IR/Module.h"
36#include "llvm/IR/ValueHandle.h"
38#include "llvm/Support/Debug.h"
42#include <optional>
43using namespace llvm;
44using namespace PatternMatch;
45
46#define DEBUG_TYPE "lazy-value-info"
47
48// This is the number of worklist items we will process to try to discover an
49// answer for a given value.
50static const unsigned MaxProcessedPerValue = 500;
51
55}
57 "Lazy Value Information Analysis", false, true)
62
63namespace llvm {
65 return new LazyValueInfoWrapperPass();
66}
67} // namespace llvm
68
69AnalysisKey LazyValueAnalysis::Key;
70
71/// Returns true if this lattice value represents at most one possible value.
72/// This is as precise as any lattice value can get while still representing
73/// reachable code.
74static bool hasSingleValue(const ValueLatticeElement &Val) {
75 if (Val.isConstantRange() &&
77 // Integer constants are single element ranges
78 return true;
79 if (Val.isConstant())
80 // Non integer constants
81 return true;
82 return false;
83}
84
85/// Combine two sets of facts about the same value into a single set of
86/// facts. Note that this method is not suitable for merging facts along
87/// different paths in a CFG; that's what the mergeIn function is for. This
88/// is for merging facts gathered about the same value at the same location
89/// through two independent means.
90/// Notes:
91/// * This method does not promise to return the most precise possible lattice
92/// value implied by A and B. It is allowed to return any lattice element
93/// which is at least as strong as *either* A or B (unless our facts
94/// conflict, see below).
95/// * Due to unreachable code, the intersection of two lattice values could be
96/// contradictory. If this happens, we return some valid lattice value so as
97/// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
98/// we do not make this guarantee. TODO: This would be a useful enhancement.
100 const ValueLatticeElement &B) {
101 // Undefined is the strongest state. It means the value is known to be along
102 // an unreachable path.
103 if (A.isUnknown())
104 return A;
105 if (B.isUnknown())
106 return B;
107
108 // If we gave up for one, but got a useable fact from the other, use it.
109 if (A.isOverdefined())
110 return B;
111 if (B.isOverdefined())
112 return A;
113
114 // Can't get any more precise than constants.
115 if (hasSingleValue(A))
116 return A;
117 if (hasSingleValue(B))
118 return B;
119
120 // Could be either constant range or not constant here.
121 if (!A.isConstantRange() || !B.isConstantRange()) {
122 // TODO: Arbitrary choice, could be improved
123 return A;
124 }
125
126 // Intersect two constant ranges
128 A.getConstantRange().intersectWith(B.getConstantRange());
129 // Note: An empty range is implicitly converted to unknown or undef depending
130 // on MayIncludeUndef internally.
132 std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() ||
133 B.isConstantRangeIncludingUndef());
134}
135
136//===----------------------------------------------------------------------===//
137// LazyValueInfoCache Decl
138//===----------------------------------------------------------------------===//
139
140namespace {
141 /// A callback value handle updates the cache when values are erased.
142 class LazyValueInfoCache;
143 struct LVIValueHandle final : public CallbackVH {
144 LazyValueInfoCache *Parent;
145
146 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
147 : CallbackVH(V), Parent(P) { }
148
149 void deleted() override;
150 void allUsesReplacedWith(Value *V) override {
151 deleted();
152 }
153 };
154} // end anonymous namespace
155
156namespace {
157using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
158
159/// This is the cache kept by LazyValueInfo which
160/// maintains information about queries across the clients' queries.
161class LazyValueInfoCache {
162 /// This is all of the cached information for one basic block. It contains
163 /// the per-value lattice elements, as well as a separate set for
164 /// overdefined values to reduce memory usage. Additionally pointers
165 /// dereferenced in the block are cached for nullability queries.
166 struct BlockCacheEntry {
168 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
169 // std::nullopt indicates that the nonnull pointers for this basic block
170 // block have not been computed yet.
171 std::optional<NonNullPointerSet> NonNullPointers;
172 };
173
174 /// Cached information per basic block.
175 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
176 BlockCache;
177 /// Set of value handles used to erase values from the cache on deletion.
179
180 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
181 auto It = BlockCache.find_as(BB);
182 if (It == BlockCache.end())
183 return nullptr;
184 return It->second.get();
185 }
186
187 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
188 auto It = BlockCache.find_as(BB);
189 if (It == BlockCache.end())
190 It = BlockCache.insert({BB, std::make_unique<BlockCacheEntry>()}).first;
191
192 return It->second.get();
193 }
194
195 void addValueHandle(Value *Val) {
196 auto HandleIt = ValueHandles.find_as(Val);
197 if (HandleIt == ValueHandles.end())
198 ValueHandles.insert({Val, this});
199 }
200
201public:
202 void insertResult(Value *Val, BasicBlock *BB,
203 const ValueLatticeElement &Result) {
204 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
205
206 // Insert over-defined values into their own cache to reduce memory
207 // overhead.
208 if (Result.isOverdefined())
209 Entry->OverDefined.insert(Val);
210 else
211 Entry->LatticeElements.insert({Val, Result});
212
213 addValueHandle(Val);
214 }
215
216 std::optional<ValueLatticeElement> getCachedValueInfo(Value *V,
217 BasicBlock *BB) const {
218 const BlockCacheEntry *Entry = getBlockEntry(BB);
219 if (!Entry)
220 return std::nullopt;
221
222 if (Entry->OverDefined.count(V))
224
225 auto LatticeIt = Entry->LatticeElements.find_as(V);
226 if (LatticeIt == Entry->LatticeElements.end())
227 return std::nullopt;
228
229 return LatticeIt->second;
230 }
231
232 bool
233 isNonNullAtEndOfBlock(Value *V, BasicBlock *BB,
234 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
235 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
236 if (!Entry->NonNullPointers) {
237 Entry->NonNullPointers = InitFn(BB);
238 for (Value *V : *Entry->NonNullPointers)
239 addValueHandle(V);
240 }
241
242 return Entry->NonNullPointers->count(V);
243 }
244
245 /// clear - Empty the cache.
246 void clear() {
247 BlockCache.clear();
248 ValueHandles.clear();
249 }
250
251 /// Inform the cache that a given value has been deleted.
252 void eraseValue(Value *V);
253
254 /// This is part of the update interface to inform the cache
255 /// that a block has been deleted.
256 void eraseBlock(BasicBlock *BB);
257
258 /// Updates the cache to remove any influence an overdefined value in
259 /// OldSucc might have (unless also overdefined in NewSucc). This just
260 /// flushes elements from the cache and does not add any.
261 void threadEdgeImpl(BasicBlock *OldSucc, BasicBlock *NewSucc);
262};
263} // namespace
264
265void LazyValueInfoCache::eraseValue(Value *V) {
266 for (auto &Pair : BlockCache) {
267 Pair.second->LatticeElements.erase(V);
268 Pair.second->OverDefined.erase(V);
269 if (Pair.second->NonNullPointers)
270 Pair.second->NonNullPointers->erase(V);
271 }
272
273 auto HandleIt = ValueHandles.find_as(V);
274 if (HandleIt != ValueHandles.end())
275 ValueHandles.erase(HandleIt);
276}
277
278void LVIValueHandle::deleted() {
279 // This erasure deallocates *this, so it MUST happen after we're done
280 // using any and all members of *this.
281 Parent->eraseValue(*this);
282}
283
284void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
285 BlockCache.erase(BB);
286}
287
288void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
289 BasicBlock *NewSucc) {
290 // When an edge in the graph has been threaded, values that we could not
291 // determine a value for before (i.e. were marked overdefined) may be
292 // possible to solve now. We do NOT try to proactively update these values.
293 // Instead, we clear their entries from the cache, and allow lazy updating to
294 // recompute them when needed.
295
296 // The updating process is fairly simple: we need to drop cached info
297 // for all values that were marked overdefined in OldSucc, and for those same
298 // values in any successor of OldSucc (except NewSucc) in which they were
299 // also marked overdefined.
300 std::vector<BasicBlock*> worklist;
301 worklist.push_back(OldSucc);
302
303 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
304 if (!Entry || Entry->OverDefined.empty())
305 return; // Nothing to process here.
306 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
307 Entry->OverDefined.end());
308
309 // Use a worklist to perform a depth-first search of OldSucc's successors.
310 // NOTE: We do not need a visited list since any blocks we have already
311 // visited will have had their overdefined markers cleared already, and we
312 // thus won't loop to their successors.
313 while (!worklist.empty()) {
314 BasicBlock *ToUpdate = worklist.back();
315 worklist.pop_back();
316
317 // Skip blocks only accessible through NewSucc.
318 if (ToUpdate == NewSucc) continue;
319
320 // If a value was marked overdefined in OldSucc, and is here too...
321 auto OI = BlockCache.find_as(ToUpdate);
322 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
323 continue;
324 auto &ValueSet = OI->second->OverDefined;
325
326 bool changed = false;
327 for (Value *V : ValsToClear) {
328 if (!ValueSet.erase(V))
329 continue;
330
331 // If we removed anything, then we potentially need to update
332 // blocks successors too.
333 changed = true;
334 }
335
336 if (!changed) continue;
337
338 llvm::append_range(worklist, successors(ToUpdate));
339 }
340}
341
342namespace llvm {
343namespace {
344/// An assembly annotator class to print LazyValueCache information in
345/// comments.
346class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
347 LazyValueInfoImpl *LVIImpl;
348 // While analyzing which blocks we can solve values for, we need the dominator
349 // information.
350 DominatorTree &DT;
351
352public:
353 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
354 : LVIImpl(L), DT(DTree) {}
355
356 void emitBasicBlockStartAnnot(const BasicBlock *BB,
357 formatted_raw_ostream &OS) override;
358
359 void emitInstructionAnnot(const Instruction *I,
360 formatted_raw_ostream &OS) override;
361};
362} // namespace
363// The actual implementation of the lazy analysis and update. Note that the
364// inheritance from LazyValueInfoCache is intended to be temporary while
365// splitting the code and then transitioning to a has-a relationship.
367
368 /// Cached results from previous queries
369 LazyValueInfoCache TheCache;
370
371 /// This stack holds the state of the value solver during a query.
372 /// It basically emulates the callstack of the naive
373 /// recursive value lookup process.
375
376 /// Keeps track of which block-value pairs are in BlockValueStack.
378
379 /// Push BV onto BlockValueStack unless it's already in there.
380 /// Returns true on success.
381 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
382 if (!BlockValueSet.insert(BV).second)
383 return false; // It's already in the stack.
384
385 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
386 << BV.first->getName() << "\n");
387 BlockValueStack.push_back(BV);
388 return true;
389 }
390
391 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
392 const DataLayout &DL; ///< A mandatory DataLayout
393
394 /// Declaration of the llvm.experimental.guard() intrinsic,
395 /// if it exists in the module.
396 Function *GuardDecl;
397
398 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
399 Instruction *CxtI);
400 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
401 BasicBlock *T,
402 Instruction *CxtI = nullptr);
403
404 // These methods process one work item and may add more. A false value
405 // returned means that the work item was not completely processed and must
406 // be revisited after going through the new items.
407 bool solveBlockValue(Value *Val, BasicBlock *BB);
408 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
409 BasicBlock *BB);
410 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
411 BasicBlock *BB);
412 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
413 BasicBlock *BB);
414 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
415 BasicBlock *BB);
416 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
417 BasicBlock *BB);
418 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
420 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
421 OpFn);
422 std::optional<ValueLatticeElement>
423 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
424 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
425 BasicBlock *BB);
426 std::optional<ValueLatticeElement>
427 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
428 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
429 BasicBlock *BB);
430 std::optional<ValueLatticeElement>
431 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
432 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
433 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
435 Instruction *BBI);
436
437 void solve();
438
439 // For the following methods, if UseBlockValue is true, the function may
440 // push additional values to the worklist and return nullopt. If
441 // UseBlockValue is false, it will never return nullopt.
442
443 std::optional<ValueLatticeElement>
444 getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
445 const APInt &Offset, Instruction *CxtI,
446 bool UseBlockValue);
447
448 std::optional<ValueLatticeElement>
449 getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
450 bool UseBlockValue);
451
452 std::optional<ValueLatticeElement>
453 getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
454 bool UseBlockValue, unsigned Depth = 0);
455
456 std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
457 BasicBlock *BBFrom,
458 BasicBlock *BBTo,
459 bool UseBlockValue);
460
461public:
462 /// This is the query interface to determine the lattice value for the
463 /// specified Value* at the context instruction (if specified) or at the
464 /// start of the block.
466 Instruction *CxtI = nullptr);
467
468 /// This is the query interface to determine the lattice value for the
469 /// specified Value* at the specified instruction using only information
470 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
471 /// recursive query is performed.
473
474 /// This is the query interface to determine the lattice
475 /// value for the specified Value* that is true on the specified edge.
477 BasicBlock *ToBB,
478 Instruction *CxtI = nullptr);
479
481
482 /// Complete flush all previously computed values
483 void clear() {
484 TheCache.clear();
485 }
486
487 /// Printing the LazyValueInfo Analysis.
489 LazyValueInfoAnnotatedWriter Writer(this, DTree);
490 F.print(OS, &Writer);
491 }
492
493 /// This is part of the update interface to remove information related to this
494 /// value from the cache.
495 void forgetValue(Value *V) { TheCache.eraseValue(V); }
496
497 /// This is part of the update interface to inform the cache
498 /// that a block has been deleted.
500 TheCache.eraseBlock(BB);
501 }
502
503 /// This is the update interface to inform the cache that an edge from
504 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
505 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
506
508 Function *GuardDecl)
509 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
510};
511} // namespace llvm
512
513void LazyValueInfoImpl::solve() {
515 BlockValueStack.begin(), BlockValueStack.end());
516
517 unsigned processedCount = 0;
518 while (!BlockValueStack.empty()) {
519 processedCount++;
520 // Abort if we have to process too many values to get a result for this one.
521 // Because of the design of the overdefined cache currently being per-block
522 // to avoid naming-related issues (IE it wants to try to give different
523 // results for the same name in different blocks), overdefined results don't
524 // get cached globally, which in turn means we will often try to rediscover
525 // the same overdefined result again and again. Once something like
526 // PredicateInfo is used in LVI or CVP, we should be able to make the
527 // overdefined cache global, and remove this throttle.
528 if (processedCount > MaxProcessedPerValue) {
530 dbgs() << "Giving up on stack because we are getting too deep\n");
531 // Fill in the original values
532 while (!StartingStack.empty()) {
533 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
534 TheCache.insertResult(e.second, e.first,
536 StartingStack.pop_back();
537 }
538 BlockValueSet.clear();
539 BlockValueStack.clear();
540 return;
541 }
542 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
543 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
544 unsigned StackSize = BlockValueStack.size();
545 (void) StackSize;
546
547 if (solveBlockValue(e.second, e.first)) {
548 // The work item was completely processed.
549 assert(BlockValueStack.size() == StackSize &&
550 BlockValueStack.back() == e && "Nothing should have been pushed!");
551#ifndef NDEBUG
552 std::optional<ValueLatticeElement> BBLV =
553 TheCache.getCachedValueInfo(e.second, e.first);
554 assert(BBLV && "Result should be in cache!");
556 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
557 << *BBLV << "\n");
558#endif
559
560 BlockValueStack.pop_back();
561 BlockValueSet.erase(e);
562 } else {
563 // More work needs to be done before revisiting.
564 assert(BlockValueStack.size() == StackSize + 1 &&
565 "Exactly one element should have been pushed!");
566 }
567 }
568}
569
570std::optional<ValueLatticeElement>
571LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
572 Instruction *CxtI) {
573 // If already a constant, there is nothing to compute.
574 if (Constant *VC = dyn_cast<Constant>(Val))
575 return ValueLatticeElement::get(VC);
576
577 if (std::optional<ValueLatticeElement> OptLatticeVal =
578 TheCache.getCachedValueInfo(Val, BB)) {
579 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
580 return OptLatticeVal;
581 }
582
583 // We have hit a cycle, assume overdefined.
584 if (!pushBlockValue({ BB, Val }))
586
587 // Yet to be resolved.
588 return std::nullopt;
589}
590
592 switch (BBI->getOpcode()) {
593 default:
594 break;
595 case Instruction::Call:
596 case Instruction::Invoke:
597 if (std::optional<ConstantRange> Range = cast<CallBase>(BBI)->getRange())
599 [[fallthrough]];
600 case Instruction::Load:
601 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
602 if (isa<IntegerType>(BBI->getType())) {
605 }
606 break;
607 };
608 // Nothing known - will be intersected with other facts
610}
611
612bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
613 assert(!isa<Constant>(Val) && "Value should not be constant");
614 assert(!TheCache.getCachedValueInfo(Val, BB) &&
615 "Value should not be in cache");
616
617 // Hold off inserting this value into the Cache in case we have to return
618 // false and come back later.
619 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
620 if (!Res)
621 // Work pushed, will revisit
622 return false;
623
624 TheCache.insertResult(Val, BB, *Res);
625 return true;
626}
627
628std::optional<ValueLatticeElement>
629LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
630 Instruction *BBI = dyn_cast<Instruction>(Val);
631 if (!BBI || BBI->getParent() != BB)
632 return solveBlockValueNonLocal(Val, BB);
633
634 if (PHINode *PN = dyn_cast<PHINode>(BBI))
635 return solveBlockValuePHINode(PN, BB);
636
637 if (auto *SI = dyn_cast<SelectInst>(BBI))
638 return solveBlockValueSelect(SI, BB);
639
640 // If this value is a nonnull pointer, record it's range and bailout. Note
641 // that for all other pointer typed values, we terminate the search at the
642 // definition. We could easily extend this to look through geps, bitcasts,
643 // and the like to prove non-nullness, but it's not clear that's worth it
644 // compile time wise. The context-insensitive value walk done inside
645 // isKnownNonZero gets most of the profitable cases at much less expense.
646 // This does mean that we have a sensitivity to where the defining
647 // instruction is placed, even if it could legally be hoisted much higher.
648 // That is unfortunate.
649 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
650 if (PT && isKnownNonZero(BBI, DL))
652
653 if (BBI->getType()->isIntegerTy()) {
654 if (auto *CI = dyn_cast<CastInst>(BBI))
655 return solveBlockValueCast(CI, BB);
656
657 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
658 return solveBlockValueBinaryOp(BO, BB);
659
660 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
661 return solveBlockValueExtractValue(EVI, BB);
662
663 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
664 return solveBlockValueIntrinsic(II, BB);
665 }
666
667 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
668 << "' - unknown inst def found.\n");
669 return getFromRangeMetadata(BBI);
670}
671
672static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) {
673 // TODO: Use NullPointerIsDefined instead.
674 if (Ptr->getType()->getPointerAddressSpace() == 0)
675 PtrSet.insert(getUnderlyingObject(Ptr));
676}
677
679 Instruction *I, NonNullPointerSet &PtrSet) {
680 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
681 AddNonNullPointer(L->getPointerOperand(), PtrSet);
682 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
683 AddNonNullPointer(S->getPointerOperand(), PtrSet);
684 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
685 if (MI->isVolatile()) return;
686
687 // FIXME: check whether it has a valuerange that excludes zero?
688 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
689 if (!Len || Len->isZero()) return;
690
691 AddNonNullPointer(MI->getRawDest(), PtrSet);
692 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
693 AddNonNullPointer(MTI->getRawSource(), PtrSet);
694 }
695}
696
697bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
700 return false;
701
702 Val = Val->stripInBoundsOffsets();
703 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
704 NonNullPointerSet NonNullPointers;
705 for (Instruction &I : *BB)
706 AddNonNullPointersByInstruction(&I, NonNullPointers);
707 return NonNullPointers;
708 });
709}
710
711std::optional<ValueLatticeElement>
712LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
713 ValueLatticeElement Result; // Start Undefined.
714
715 // If this is the entry block, we must be asking about an argument.
716 if (BB->isEntryBlock()) {
717 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
718 if (std::optional<ConstantRange> Range = cast<Argument>(Val)->getRange())
721 }
722
723 // Loop over all of our predecessors, merging what we know from them into
724 // result. If we encounter an unexplored predecessor, we eagerly explore it
725 // in a depth first manner. In practice, this has the effect of discovering
726 // paths we can't analyze eagerly without spending compile times analyzing
727 // other paths. This heuristic benefits from the fact that predecessors are
728 // frequently arranged such that dominating ones come first and we quickly
729 // find a path to function entry. TODO: We should consider explicitly
730 // canonicalizing to make this true rather than relying on this happy
731 // accident.
732 for (BasicBlock *Pred : predecessors(BB)) {
733 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
734 if (!EdgeResult)
735 // Explore that input, then return here
736 return std::nullopt;
737
738 Result.mergeIn(*EdgeResult);
739
740 // If we hit overdefined, exit early. The BlockVals entry is already set
741 // to overdefined.
742 if (Result.isOverdefined()) {
743 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
744 << "' - overdefined because of pred '"
745 << Pred->getName() << "' (non local).\n");
746 return Result;
747 }
748 }
749
750 // Return the merged value, which is more precise than 'overdefined'.
751 assert(!Result.isOverdefined());
752 return Result;
753}
754
755std::optional<ValueLatticeElement>
756LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
757 ValueLatticeElement Result; // Start Undefined.
758
759 // Loop over all of our predecessors, merging what we know from them into
760 // result. See the comment about the chosen traversal order in
761 // solveBlockValueNonLocal; the same reasoning applies here.
762 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
763 BasicBlock *PhiBB = PN->getIncomingBlock(i);
764 Value *PhiVal = PN->getIncomingValue(i);
765 // Note that we can provide PN as the context value to getEdgeValue, even
766 // though the results will be cached, because PN is the value being used as
767 // the cache key in the caller.
768 std::optional<ValueLatticeElement> EdgeResult =
769 getEdgeValue(PhiVal, PhiBB, BB, PN);
770 if (!EdgeResult)
771 // Explore that input, then return here
772 return std::nullopt;
773
774 Result.mergeIn(*EdgeResult);
775
776 // If we hit overdefined, exit early. The BlockVals entry is already set
777 // to overdefined.
778 if (Result.isOverdefined()) {
779 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
780 << "' - overdefined because of pred (local).\n");
781
782 return Result;
783 }
784 }
785
786 // Return the merged value, which is more precise than 'overdefined'.
787 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
788 return Result;
789}
790
791// If we can determine a constraint on the value given conditions assumed by
792// the program, intersect those constraints with BBLV
793void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
794 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
795 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
796 if (!BBI)
797 return;
798
799 BasicBlock *BB = BBI->getParent();
800 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
801 if (!AssumeVH)
802 continue;
803
804 // Only check assumes in the block of the context instruction. Other
805 // assumes will have already been taken into account when the value was
806 // propagated from predecessor blocks.
807 auto *I = cast<CallInst>(AssumeVH);
808 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
809 continue;
810
811 BBLV = intersect(BBLV, *getValueFromCondition(Val, I->getArgOperand(0),
812 /*IsTrueDest*/ true,
813 /*UseBlockValue*/ false));
814 }
815
816 // If guards are not used in the module, don't spend time looking for them
817 if (GuardDecl && !GuardDecl->use_empty() &&
818 BBI->getIterator() != BB->begin()) {
819 for (Instruction &I :
820 make_range(std::next(BBI->getIterator().getReverse()), BB->rend())) {
821 Value *Cond = nullptr;
822 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
823 BBLV = intersect(BBLV,
824 *getValueFromCondition(Val, Cond, /*IsTrueDest*/ true,
825 /*UseBlockValue*/ false));
826 }
827 }
828
829 if (BBLV.isOverdefined()) {
830 // Check whether we're checking at the terminator, and the pointer has
831 // been dereferenced in this block.
832 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
833 if (PTy && BB->getTerminator() == BBI &&
834 isNonNullAtEndOfBlock(Val, BB))
836 }
837}
838
840 Type *Ty, bool UndefAllowed = false) {
841 assert(Ty->isIntOrIntVectorTy() && "Must be integer type");
842 if (Val.isConstantRange(UndefAllowed))
843 return Val.getConstantRange();
844 unsigned BW = Ty->getScalarSizeInBits();
845 if (Val.isUnknown())
846 return ConstantRange::getEmpty(BW);
847 return ConstantRange::getFull(BW);
848}
849
850std::optional<ValueLatticeElement>
851LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
852 // Recurse on our inputs if needed
853 std::optional<ValueLatticeElement> OptTrueVal =
854 getBlockValue(SI->getTrueValue(), BB, SI);
855 if (!OptTrueVal)
856 return std::nullopt;
857 ValueLatticeElement &TrueVal = *OptTrueVal;
858
859 std::optional<ValueLatticeElement> OptFalseVal =
860 getBlockValue(SI->getFalseValue(), BB, SI);
861 if (!OptFalseVal)
862 return std::nullopt;
863 ValueLatticeElement &FalseVal = *OptFalseVal;
864
865 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
866 const ConstantRange &TrueCR = toConstantRange(TrueVal, SI->getType());
867 const ConstantRange &FalseCR = toConstantRange(FalseVal, SI->getType());
868 Value *LHS = nullptr;
869 Value *RHS = nullptr;
870 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
871 // Is this a min specifically of our two inputs? (Avoid the risk of
872 // ValueTracking getting smarter looking back past our immediate inputs.)
874 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
875 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
876 ConstantRange ResultCR = [&]() {
877 switch (SPR.Flavor) {
878 default:
879 llvm_unreachable("unexpected minmax type!");
880 case SPF_SMIN: /// Signed minimum
881 return TrueCR.smin(FalseCR);
882 case SPF_UMIN: /// Unsigned minimum
883 return TrueCR.umin(FalseCR);
884 case SPF_SMAX: /// Signed maximum
885 return TrueCR.smax(FalseCR);
886 case SPF_UMAX: /// Unsigned maximum
887 return TrueCR.umax(FalseCR);
888 };
889 }();
891 ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
892 FalseVal.isConstantRangeIncludingUndef());
893 }
894
895 if (SPR.Flavor == SPF_ABS) {
896 if (LHS == SI->getTrueValue())
898 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
899 if (LHS == SI->getFalseValue())
901 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
902 }
903
904 if (SPR.Flavor == SPF_NABS) {
906 if (LHS == SI->getTrueValue())
908 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
909 if (LHS == SI->getFalseValue())
911 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
912 }
913 }
914
915 // Can we constrain the facts about the true and false values by using the
916 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
917 // TODO: We could potentially refine an overdefined true value above.
918 Value *Cond = SI->getCondition();
919 // If the value is undef, a different value may be chosen in
920 // the select condition.
922 TrueVal =
923 intersect(TrueVal, *getValueFromCondition(SI->getTrueValue(), Cond,
924 /*IsTrueDest*/ true,
925 /*UseBlockValue*/ false));
926 FalseVal =
927 intersect(FalseVal, *getValueFromCondition(SI->getFalseValue(), Cond,
928 /*IsTrueDest*/ false,
929 /*UseBlockValue*/ false));
930 }
931
933 Result.mergeIn(FalseVal);
934 return Result;
935}
936
937std::optional<ConstantRange>
938LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
939 std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
940 if (!OptVal)
941 return std::nullopt;
942 return toConstantRange(*OptVal, V->getType());
943}
944
945std::optional<ValueLatticeElement>
946LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
947 // Filter out casts we don't know how to reason about before attempting to
948 // recurse on our operand. This can cut a long search short if we know we're
949 // not going to be able to get any useful information anways.
950 switch (CI->getOpcode()) {
951 case Instruction::Trunc:
952 case Instruction::SExt:
953 case Instruction::ZExt:
954 break;
955 default:
956 // Unhandled instructions are overdefined.
957 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
958 << "' - overdefined (unknown cast).\n");
960 }
961
962 // Figure out the range of the LHS. If that fails, we still apply the
963 // transfer rule on the full set since we may be able to locally infer
964 // interesting facts.
965 std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
966 if (!LHSRes)
967 // More work to do before applying this transfer rule.
968 return std::nullopt;
969 const ConstantRange &LHSRange = *LHSRes;
970
971 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
972
973 // NOTE: We're currently limited by the set of operations that ConstantRange
974 // can evaluate symbolically. Enhancing that set will allows us to analyze
975 // more definitions.
976 return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
977 ResultBitWidth));
978}
979
980std::optional<ValueLatticeElement>
981LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
983 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
984 OpFn) {
985 // Figure out the ranges of the operands. If that fails, use a
986 // conservative range, but apply the transfer rule anyways. This
987 // lets us pick up facts from expressions like "and i32 (call i32
988 // @foo()), 32"
989 std::optional<ConstantRange> LHSRes = getRangeFor(I->getOperand(0), I, BB);
990 if (!LHSRes)
991 return std::nullopt;
992
993 std::optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB);
994 if (!RHSRes)
995 return std::nullopt;
996
997 const ConstantRange &LHSRange = *LHSRes;
998 const ConstantRange &RHSRange = *RHSRes;
999 return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
1000}
1001
1002std::optional<ValueLatticeElement>
1003LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
1004 assert(BO->getOperand(0)->getType()->isSized() &&
1005 "all operands to binary operators are sized");
1006 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
1007 unsigned NoWrapKind = OBO->getNoWrapKind();
1008 return solveBlockValueBinaryOpImpl(
1009 BO, BB,
1010 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1011 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
1012 });
1013 }
1014
1015 return solveBlockValueBinaryOpImpl(
1016 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1017 return CR1.binaryOp(BO->getOpcode(), CR2);
1018 });
1019}
1020
1021std::optional<ValueLatticeElement>
1022LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1023 BasicBlock *BB) {
1024 return solveBlockValueBinaryOpImpl(
1025 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1026 return CR1.binaryOp(WO->getBinaryOp(), CR2);
1027 });
1028}
1029
1030std::optional<ValueLatticeElement>
1031LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1033 if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1034 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1035 << "' - unknown intrinsic.\n");
1036 return MetadataVal;
1037 }
1038
1040 for (Value *Op : II->args()) {
1041 std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1042 if (!Range)
1043 return std::nullopt;
1044 OpRanges.push_back(*Range);
1045 }
1046
1048 II->getIntrinsicID(), OpRanges)),
1049 MetadataVal);
1050}
1051
1052std::optional<ValueLatticeElement>
1053LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1054 BasicBlock *BB) {
1055 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1056 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1057 return solveBlockValueOverflowIntrinsic(WO, BB);
1058
1059 // Handle extractvalue of insertvalue to allow further simplification
1060 // based on replaced with.overflow intrinsics.
1062 EVI->getAggregateOperand(), EVI->getIndices(),
1063 EVI->getDataLayout()))
1064 return getBlockValue(V, BB, EVI);
1065
1066 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1067 << "' - overdefined (unknown extractvalue).\n");
1069}
1070
1071static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1072 ICmpInst::Predicate Pred) {
1073 if (LHS == Val)
1074 return true;
1075
1076 // Handle range checking idiom produced by InstCombine. We will subtract the
1077 // offset from the allowed range for RHS in this case.
1078 const APInt *C;
1079 if (match(LHS, m_AddLike(m_Specific(Val), m_APInt(C)))) {
1080 Offset = *C;
1081 return true;
1082 }
1083
1084 // Handle the symmetric case. This appears in saturation patterns like
1085 // (x == 16) ? 16 : (x + 1).
1086 if (match(Val, m_AddLike(m_Specific(LHS), m_APInt(C)))) {
1087 Offset = -*C;
1088 return true;
1089 }
1090
1091 // If (x | y) < C, then (x < C) && (y < C).
1092 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1093 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1094 return true;
1095
1096 // If (x & y) > C, then (x > C) && (y > C).
1097 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1098 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1099 return true;
1100
1101 return false;
1102}
1103
1104/// Get value range for a "(Val + Offset) Pred RHS" condition.
1105std::optional<ValueLatticeElement>
1106LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1107 Value *RHS,
1108 const APInt &Offset,
1109 Instruction *CxtI,
1110 bool UseBlockValue) {
1112 /*isFullSet=*/true);
1113 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1114 RHSRange = ConstantRange(CI->getValue());
1115 } else if (UseBlockValue) {
1116 std::optional<ValueLatticeElement> R =
1117 getBlockValue(RHS, CxtI->getParent(), CxtI);
1118 if (!R)
1119 return std::nullopt;
1120 RHSRange = toConstantRange(*R, RHS->getType());
1121 }
1122
1123 ConstantRange TrueValues =
1125 return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1126}
1127
1128static std::optional<ConstantRange>
1130 function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1131 bool Invert = false;
1132 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1133 Pred = ICmpInst::getInversePredicate(Pred);
1134 Invert = true;
1135 }
1136 if (Pred == ICmpInst::ICMP_SLE) {
1137 Pred = ICmpInst::ICMP_SLT;
1138 if (RHS.isMaxSignedValue())
1139 return std::nullopt; // Could also return full/empty here, if we wanted.
1140 ++RHS;
1141 }
1142 assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1143 if (auto CR = Fn(RHS))
1144 return Invert ? CR->inverse() : CR;
1145 return std::nullopt;
1146}
1147
1148std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1149 Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1150 Value *LHS = ICI->getOperand(0);
1151 Value *RHS = ICI->getOperand(1);
1152
1153 // Get the predicate that must hold along the considered edge.
1154 CmpInst::Predicate EdgePred =
1155 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1156
1157 if (isa<Constant>(RHS)) {
1158 if (ICI->isEquality() && LHS == Val) {
1159 if (EdgePred == ICmpInst::ICMP_EQ)
1160 return ValueLatticeElement::get(cast<Constant>(RHS));
1161 else if (!isa<UndefValue>(RHS))
1162 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1163 }
1164 }
1165
1166 Type *Ty = Val->getType();
1167 if (!Ty->isIntegerTy())
1169
1170 unsigned BitWidth = Ty->getScalarSizeInBits();
1171 APInt Offset(BitWidth, 0);
1172 if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1173 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset, ICI,
1174 UseBlockValue);
1175
1176 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1177 if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1178 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset, ICI,
1179 UseBlockValue);
1180
1181 const APInt *Mask, *C;
1182 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1183 match(RHS, m_APInt(C))) {
1184 // If (Val & Mask) == C then all the masked bits are known and we can
1185 // compute a value range based on that.
1186 if (EdgePred == ICmpInst::ICMP_EQ) {
1187 KnownBits Known;
1188 Known.Zero = ~*C & *Mask;
1189 Known.One = *C & *Mask;
1191 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1192 }
1193
1194 if (EdgePred == ICmpInst::ICMP_NE)
1197 }
1198
1199 // If (X urem Modulus) >= C, then X >= C.
1200 // If trunc X >= C, then X >= C.
1201 // TODO: An upper bound could be computed as well.
1202 if (match(LHS, m_CombineOr(m_URem(m_Specific(Val), m_Value()),
1203 m_Trunc(m_Specific(Val)))) &&
1204 match(RHS, m_APInt(C))) {
1205 // Use the icmp region so we don't have to deal with different predicates.
1207 if (!CR.isEmptySet())
1210 }
1211
1212 // Recognize:
1213 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1214 // Preconditions: (C << ShAmtC) >> ShAmtC == C
1215 const APInt *ShAmtC;
1216 if (CmpInst::isSigned(EdgePred) &&
1217 match(LHS, m_AShr(m_Specific(Val), m_APInt(ShAmtC))) &&
1218 match(RHS, m_APInt(C))) {
1219 auto CR = getRangeViaSLT(
1220 EdgePred, *C, [&](const APInt &RHS) -> std::optional<ConstantRange> {
1221 APInt New = RHS << *ShAmtC;
1222 if ((New.ashr(*ShAmtC)) != RHS)
1223 return std::nullopt;
1225 APInt::getSignedMinValue(New.getBitWidth()), New);
1226 });
1227 if (CR)
1229 }
1230
1232}
1233
1234// Handle conditions of the form
1235// extractvalue(op.with.overflow(%x, C), 1).
1237 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1238 // TODO: This only works with a constant RHS for now. We could also compute
1239 // the range of the RHS, but this doesn't fit into the current structure of
1240 // the edge value calculation.
1241 const APInt *C;
1242 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1244
1245 // Calculate the possible values of %x for which no overflow occurs.
1247 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1248
1249 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1250 // constrained to it's inverse (all values that might cause overflow).
1251 if (IsTrueDest)
1252 NWR = NWR.inverse();
1254}
1255
1256std::optional<ValueLatticeElement>
1257LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1258 bool IsTrueDest, bool UseBlockValue,
1259 unsigned Depth) {
1260 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1261 return getValueFromICmpCondition(Val, ICI, IsTrueDest, UseBlockValue);
1262
1263 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1264 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1265 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1266 return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1267
1270
1271 Value *N;
1272 if (match(Cond, m_Not(m_Value(N))))
1273 return getValueFromCondition(Val, N, !IsTrueDest, UseBlockValue, Depth);
1274
1275 Value *L, *R;
1276 bool IsAnd;
1277 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1278 IsAnd = true;
1279 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1280 IsAnd = false;
1281 else
1283
1284 std::optional<ValueLatticeElement> LV =
1285 getValueFromCondition(Val, L, IsTrueDest, UseBlockValue, Depth);
1286 if (!LV)
1287 return std::nullopt;
1288 std::optional<ValueLatticeElement> RV =
1289 getValueFromCondition(Val, R, IsTrueDest, UseBlockValue, Depth);
1290 if (!RV)
1291 return std::nullopt;
1292
1293 // if (L && R) -> intersect L and R
1294 // if (!(L || R)) -> intersect !L and !R
1295 // if (L || R) -> union L and R
1296 // if (!(L && R)) -> union !L and !R
1297 if (IsTrueDest ^ IsAnd) {
1298 LV->mergeIn(*RV);
1299 return *LV;
1300 }
1301
1302 return intersect(*LV, *RV);
1303}
1304
1305// Return true if Usr has Op as an operand, otherwise false.
1306static bool usesOperand(User *Usr, Value *Op) {
1307 return is_contained(Usr->operands(), Op);
1308}
1309
1310// Return true if the instruction type of Val is supported by
1311// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1312// Call this before calling constantFoldUser() to find out if it's even worth
1313// attempting to call it.
1314static bool isOperationFoldable(User *Usr) {
1315 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1316}
1317
1318// Check if Usr can be simplified to an integer constant when the value of one
1319// of its operands Op is an integer constant OpConstVal. If so, return it as an
1320// lattice value range with a single element or otherwise return an overdefined
1321// lattice value.
1323 const APInt &OpConstVal,
1324 const DataLayout &DL) {
1325 assert(isOperationFoldable(Usr) && "Precondition");
1326 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1327 // Check if Usr can be simplified to a constant.
1328 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1329 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1330 if (auto *C = dyn_cast_or_null<ConstantInt>(
1331 simplifyCastInst(CI->getOpcode(), OpConst,
1332 CI->getDestTy(), DL))) {
1333 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1334 }
1335 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1336 bool Op0Match = BO->getOperand(0) == Op;
1337 bool Op1Match = BO->getOperand(1) == Op;
1338 assert((Op0Match || Op1Match) &&
1339 "Operand 0 nor Operand 1 isn't a match");
1340 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1341 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1342 if (auto *C = dyn_cast_or_null<ConstantInt>(
1343 simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1344 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1345 }
1346 } else if (isa<FreezeInst>(Usr)) {
1347 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1348 return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1349 }
1351}
1352
1353/// Compute the value of Val on the edge BBFrom -> BBTo.
1354std::optional<ValueLatticeElement>
1355LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1356 BasicBlock *BBTo, bool UseBlockValue) {
1357 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1358 // know that v != 0.
1359 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1360 // If this is a conditional branch and only one successor goes to BBTo, then
1361 // we may be able to infer something from the condition.
1362 if (BI->isConditional() &&
1363 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1364 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1365 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1366 "BBTo isn't a successor of BBFrom");
1367 Value *Condition = BI->getCondition();
1368
1369 // If V is the condition of the branch itself, then we know exactly what
1370 // it is.
1371 if (Condition == Val)
1372 return ValueLatticeElement::get(ConstantInt::get(
1373 Type::getInt1Ty(Val->getContext()), isTrueDest));
1374
1375 // If the condition of the branch is an equality comparison, we may be
1376 // able to infer the value.
1377 std::optional<ValueLatticeElement> Result =
1378 getValueFromCondition(Val, Condition, isTrueDest, UseBlockValue);
1379 if (!Result)
1380 return std::nullopt;
1381
1382 if (!Result->isOverdefined())
1383 return Result;
1384
1385 if (User *Usr = dyn_cast<User>(Val)) {
1386 assert(Result->isOverdefined() && "Result isn't overdefined");
1387 // Check with isOperationFoldable() first to avoid linearly iterating
1388 // over the operands unnecessarily which can be expensive for
1389 // instructions with many operands.
1390 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1391 const DataLayout &DL = BBTo->getDataLayout();
1392 if (usesOperand(Usr, Condition)) {
1393 // If Val has Condition as an operand and Val can be folded into a
1394 // constant with either Condition == true or Condition == false,
1395 // propagate the constant.
1396 // eg.
1397 // ; %Val is true on the edge to %then.
1398 // %Val = and i1 %Condition, true.
1399 // br %Condition, label %then, label %else
1400 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1401 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1402 } else {
1403 // If one of Val's operand has an inferred value, we may be able to
1404 // infer the value of Val.
1405 // eg.
1406 // ; %Val is 94 on the edge to %then.
1407 // %Val = add i8 %Op, 1
1408 // %Condition = icmp eq i8 %Op, 93
1409 // br i1 %Condition, label %then, label %else
1410 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1411 Value *Op = Usr->getOperand(i);
1412 ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1413 Op, Condition, isTrueDest, /*UseBlockValue*/ false);
1414 if (std::optional<APInt> OpConst =
1415 OpLatticeVal.asConstantInteger()) {
1416 Result = constantFoldUser(Usr, Op, *OpConst, DL);
1417 break;
1418 }
1419 }
1420 }
1421 }
1422 }
1423 if (!Result->isOverdefined())
1424 return Result;
1425 }
1426 }
1427
1428 // If the edge was formed by a switch on the value, then we may know exactly
1429 // what it is.
1430 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1431 Value *Condition = SI->getCondition();
1432 if (!isa<IntegerType>(Val->getType()))
1434 bool ValUsesConditionAndMayBeFoldable = false;
1435 if (Condition != Val) {
1436 // Check if Val has Condition as an operand.
1437 if (User *Usr = dyn_cast<User>(Val))
1438 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1439 usesOperand(Usr, Condition);
1440 if (!ValUsesConditionAndMayBeFoldable)
1442 }
1443 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1444 "Condition != Val nor Val doesn't use Condition");
1445
1446 bool DefaultCase = SI->getDefaultDest() == BBTo;
1447 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1448 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1449
1450 for (auto Case : SI->cases()) {
1451 APInt CaseValue = Case.getCaseValue()->getValue();
1452 ConstantRange EdgeVal(CaseValue);
1453 if (ValUsesConditionAndMayBeFoldable) {
1454 User *Usr = cast<User>(Val);
1455 const DataLayout &DL = BBTo->getDataLayout();
1456 ValueLatticeElement EdgeLatticeVal =
1457 constantFoldUser(Usr, Condition, CaseValue, DL);
1458 if (EdgeLatticeVal.isOverdefined())
1460 EdgeVal = EdgeLatticeVal.getConstantRange();
1461 }
1462 if (DefaultCase) {
1463 // It is possible that the default destination is the destination of
1464 // some cases. We cannot perform difference for those cases.
1465 // We know Condition != CaseValue in BBTo. In some cases we can use
1466 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1467 // only do this when f is identity (i.e. Val == Condition), but we
1468 // should be able to do this for any injective f.
1469 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1470 EdgesVals = EdgesVals.difference(EdgeVal);
1471 } else if (Case.getCaseSuccessor() == BBTo)
1472 EdgesVals = EdgesVals.unionWith(EdgeVal);
1473 }
1474 return ValueLatticeElement::getRange(std::move(EdgesVals));
1475 }
1477}
1478
1479/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1480/// the basic block if the edge does not constrain Val.
1481std::optional<ValueLatticeElement>
1482LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1483 BasicBlock *BBTo, Instruction *CxtI) {
1484 // If already a constant, there is nothing to compute.
1485 if (Constant *VC = dyn_cast<Constant>(Val))
1486 return ValueLatticeElement::get(VC);
1487
1488 std::optional<ValueLatticeElement> LocalResult =
1489 getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1490 if (!LocalResult)
1491 return std::nullopt;
1492
1493 if (hasSingleValue(*LocalResult))
1494 // Can't get any more precise here
1495 return LocalResult;
1496
1497 std::optional<ValueLatticeElement> OptInBlock =
1498 getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1499 if (!OptInBlock)
1500 return std::nullopt;
1501 ValueLatticeElement &InBlock = *OptInBlock;
1502
1503 // We can use the context instruction (generically the ultimate instruction
1504 // the calling pass is trying to simplify) here, even though the result of
1505 // this function is generally cached when called from the solve* functions
1506 // (and that cached result might be used with queries using a different
1507 // context instruction), because when this function is called from the solve*
1508 // functions, the context instruction is not provided. When called from
1509 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1510 // but then the result is not cached.
1511 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1512
1513 return intersect(*LocalResult, InBlock);
1514}
1515
1517 Instruction *CxtI) {
1518 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1519 << BB->getName() << "'\n");
1520
1521 assert(BlockValueStack.empty() && BlockValueSet.empty());
1522 std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1523 if (!OptResult) {
1524 solve();
1525 OptResult = getBlockValue(V, BB, CxtI);
1526 assert(OptResult && "Value not available after solving");
1527 }
1528
1529 ValueLatticeElement Result = *OptResult;
1530 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1531 return Result;
1532}
1533
1535 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1536 << "'\n");
1537
1538 if (auto *C = dyn_cast<Constant>(V))
1540
1542 if (auto *I = dyn_cast<Instruction>(V))
1543 Result = getFromRangeMetadata(I);
1544 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1545
1546 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1547 return Result;
1548}
1549
1551getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1552 Instruction *CxtI) {
1553 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1554 << FromBB->getName() << "' to '" << ToBB->getName()
1555 << "'\n");
1556
1557 std::optional<ValueLatticeElement> Result =
1558 getEdgeValue(V, FromBB, ToBB, CxtI);
1559 while (!Result) {
1560 // As the worklist only explicitly tracks block values (but not edge values)
1561 // we may have to call solve() multiple times, as the edge value calculation
1562 // may request additional block values.
1563 solve();
1564 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1565 }
1566
1567 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1568 return *Result;
1569}
1570
1572 Value *V = U.get();
1573 auto *CxtI = cast<Instruction>(U.getUser());
1574 ValueLatticeElement VL = getValueInBlock(V, CxtI->getParent(), CxtI);
1575
1576 // Check whether the only (possibly transitive) use of the value is in a
1577 // position where V can be constrained by a select or branch condition.
1578 const Use *CurrU = &U;
1579 // TODO: Increase limit?
1580 const unsigned MaxUsesToInspect = 3;
1581 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1582 std::optional<ValueLatticeElement> CondVal;
1583 auto *CurrI = cast<Instruction>(CurrU->getUser());
1584 if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1585 // If the value is undef, a different value may be chosen in
1586 // the select condition and at use.
1587 if (!isGuaranteedNotToBeUndef(SI->getCondition(), AC))
1588 break;
1589 if (CurrU->getOperandNo() == 1)
1590 CondVal =
1591 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ true,
1592 /*UseBlockValue*/ false);
1593 else if (CurrU->getOperandNo() == 2)
1594 CondVal =
1595 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ false,
1596 /*UseBlockValue*/ false);
1597 } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1598 // TODO: Use non-local query?
1599 CondVal = *getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU),
1600 PHI->getParent(), /*UseBlockValue*/ false);
1601 }
1602 if (CondVal)
1603 VL = intersect(VL, *CondVal);
1604
1605 // Only follow one-use chain, to allow direct intersection of conditions.
1606 // If there are multiple uses, we would have to intersect with the union of
1607 // all conditions at different uses.
1608 // Stop walking if we hit a non-speculatable instruction. Even if the
1609 // result is only used under a specific condition, executing the
1610 // instruction itself may cause side effects or UB already.
1611 // This also disallows looking through phi nodes: If the phi node is part
1612 // of a cycle, we might end up reasoning about values from different cycle
1613 // iterations (PR60629).
1614 if (!CurrI->hasOneUse() || !isSafeToSpeculativelyExecute(CurrI))
1615 break;
1616 CurrU = &*CurrI->use_begin();
1617 }
1618 return VL;
1619}
1620
1622 BasicBlock *NewSucc) {
1623 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1624}
1625
1626//===----------------------------------------------------------------------===//
1627// LazyValueInfo Impl
1628//===----------------------------------------------------------------------===//
1629
1631 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1632
1633 if (auto *Impl = Info.getImpl())
1634 Impl->clear();
1635
1636 // Fully lazy.
1637 return false;
1638}
1639
1641 AU.setPreservesAll();
1644}
1645
1647
1648/// This lazily constructs the LazyValueInfoImpl.
1649LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1650 if (!PImpl) {
1651 assert(M && "getCache() called with a null Module");
1652 const DataLayout &DL = M->getDataLayout();
1653 Function *GuardDecl =
1654 M->getFunction(Intrinsic::getName(Intrinsic::experimental_guard));
1655 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1656 }
1657 return *static_cast<LazyValueInfoImpl *>(PImpl);
1658}
1659
1660LazyValueInfoImpl *LazyValueInfo::getImpl() {
1661 if (!PImpl)
1662 return nullptr;
1663 return static_cast<LazyValueInfoImpl *>(PImpl);
1664}
1665
1667
1669 // If the cache was allocated, free it.
1670 if (auto *Impl = getImpl()) {
1671 delete &*Impl;
1672 PImpl = nullptr;
1673 }
1674}
1675
1678 // We need to invalidate if we have either failed to preserve this analyses
1679 // result directly or if any of its dependencies have been invalidated.
1680 auto PAC = PA.getChecker<LazyValueAnalysis>();
1681 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1682 return true;
1683
1684 return false;
1685}
1686
1688
1691 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1692
1693 return LazyValueInfo(&AC, &F.getDataLayout());
1694}
1695
1696/// Returns true if we can statically tell that this value will never be a
1697/// "useful" constant. In practice, this means we've got something like an
1698/// alloca or a malloc call for which a comparison against a constant can
1699/// only be guarding dead code. Note that we are potentially giving up some
1700/// precision in dead code (a constant result) in favour of avoiding a
1701/// expensive search for a easily answered common query.
1702static bool isKnownNonConstant(Value *V) {
1703 V = V->stripPointerCasts();
1704 // The return val of alloc cannot be a Constant.
1705 if (isa<AllocaInst>(V))
1706 return true;
1707 return false;
1708}
1709
1711 // Bail out early if V is known not to be a Constant.
1712 if (isKnownNonConstant(V))
1713 return nullptr;
1714
1715 BasicBlock *BB = CxtI->getParent();
1716 ValueLatticeElement Result =
1717 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1718
1719 if (Result.isConstant())
1720 return Result.getConstant();
1721 if (Result.isConstantRange()) {
1722 const ConstantRange &CR = Result.getConstantRange();
1723 if (const APInt *SingleVal = CR.getSingleElement())
1724 return ConstantInt::get(V->getContext(), *SingleVal);
1725 }
1726 return nullptr;
1727}
1728
1730 bool UndefAllowed) {
1731 assert(V->getType()->isIntegerTy());
1732 BasicBlock *BB = CxtI->getParent();
1733 ValueLatticeElement Result =
1734 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1735 return toConstantRange(Result, V->getType(), UndefAllowed);
1736}
1737
1739 bool UndefAllowed) {
1740 auto *Inst = cast<Instruction>(U.getUser());
1741 ValueLatticeElement Result =
1742 getOrCreateImpl(Inst->getModule()).getValueAtUse(U);
1743 return toConstantRange(Result, U->getType(), UndefAllowed);
1744}
1745
1746/// Determine whether the specified value is known to be a
1747/// constant on the specified edge. Return null if not.
1749 BasicBlock *ToBB,
1750 Instruction *CxtI) {
1751 Module *M = FromBB->getModule();
1752 ValueLatticeElement Result =
1753 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1754
1755 if (Result.isConstant())
1756 return Result.getConstant();
1757 if (Result.isConstantRange()) {
1758 const ConstantRange &CR = Result.getConstantRange();
1759 if (const APInt *SingleVal = CR.getSingleElement())
1760 return ConstantInt::get(V->getContext(), *SingleVal);
1761 }
1762 return nullptr;
1763}
1764
1766 BasicBlock *FromBB,
1767 BasicBlock *ToBB,
1768 Instruction *CxtI) {
1769 Module *M = FromBB->getModule();
1770 ValueLatticeElement Result =
1771 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1772 // TODO: Should undef be allowed here?
1773 return toConstantRange(Result, V->getType(), /*UndefAllowed*/ true);
1774}
1775
1778 const DataLayout &DL) {
1779 // If we know the value is a constant, evaluate the conditional.
1780 Constant *Res = nullptr;
1781 if (Val.isConstant()) {
1782 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL);
1783 if (ConstantInt *ResCI = dyn_cast_or_null<ConstantInt>(Res))
1784 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1786 }
1787
1788 if (Val.isConstantRange()) {
1789 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1790 if (!CI) return LazyValueInfo::Unknown;
1791
1792 const ConstantRange &CR = Val.getConstantRange();
1793 if (Pred == ICmpInst::ICMP_EQ) {
1794 if (!CR.contains(CI->getValue()))
1795 return LazyValueInfo::False;
1796
1797 if (CR.isSingleElement())
1798 return LazyValueInfo::True;
1799 } else if (Pred == ICmpInst::ICMP_NE) {
1800 if (!CR.contains(CI->getValue()))
1801 return LazyValueInfo::True;
1802
1803 if (CR.isSingleElement())
1804 return LazyValueInfo::False;
1805 } else {
1806 // Handle more complex predicates.
1808 (ICmpInst::Predicate)Pred, CI->getValue());
1809 if (TrueValues.contains(CR))
1810 return LazyValueInfo::True;
1811 if (TrueValues.inverse().contains(CR))
1812 return LazyValueInfo::False;
1813 }
1815 }
1816
1817 if (Val.isNotConstant()) {
1818 // If this is an equality comparison, we can try to fold it knowing that
1819 // "V != C1".
1820 if (Pred == ICmpInst::ICMP_EQ) {
1821 // !C1 == C -> false iff C1 == C.
1823 Val.getNotConstant(), C, DL);
1824 if (Res && Res->isNullValue())
1825 return LazyValueInfo::False;
1826 } else if (Pred == ICmpInst::ICMP_NE) {
1827 // !C1 != C -> true iff C1 == C.
1829 Val.getNotConstant(), C, DL);
1830 if (Res && Res->isNullValue())
1831 return LazyValueInfo::True;
1832 }
1834 }
1835
1837}
1838
1839/// Determine whether the specified value comparison with a constant is known to
1840/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1843 BasicBlock *FromBB, BasicBlock *ToBB,
1844 Instruction *CxtI) {
1845 Module *M = FromBB->getModule();
1846 ValueLatticeElement Result =
1847 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1848
1849 return getPredicateResult(Pred, C, Result, M->getDataLayout());
1850}
1851
1854 Instruction *CxtI, bool UseBlockValue) {
1855 // Is or is not NonNull are common predicates being queried. If
1856 // isKnownNonZero can tell us the result of the predicate, we can
1857 // return it quickly. But this is only a fastpath, and falling
1858 // through would still be correct.
1859 Module *M = CxtI->getModule();
1860 const DataLayout &DL = M->getDataLayout();
1861 if (V->getType()->isPointerTy() && C->isNullValue() &&
1862 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1863 if (Pred == ICmpInst::ICMP_EQ)
1864 return LazyValueInfo::False;
1865 else if (Pred == ICmpInst::ICMP_NE)
1866 return LazyValueInfo::True;
1867 }
1868
1869 auto &Impl = getOrCreateImpl(M);
1870 ValueLatticeElement Result =
1871 UseBlockValue ? Impl.getValueInBlock(V, CxtI->getParent(), CxtI)
1872 : Impl.getValueAt(V, CxtI);
1873 Tristate Ret = getPredicateResult(Pred, C, Result, DL);
1874 if (Ret != Unknown)
1875 return Ret;
1876
1877 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1878 // LVI as a whole tries to compute a lattice value which is conservatively
1879 // correct at a given location. In this case, we have a predicate which we
1880 // weren't able to prove about the merged result, and we're pushing that
1881 // predicate back along each incoming edge to see if we can prove it
1882 // separately for each input. As a motivating example, consider:
1883 // bb1:
1884 // %v1 = ... ; constantrange<1, 5>
1885 // br label %merge
1886 // bb2:
1887 // %v2 = ... ; constantrange<10, 20>
1888 // br label %merge
1889 // merge:
1890 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1891 // %pred = icmp eq i32 %phi, 8
1892 // We can't tell from the lattice value for '%phi' that '%pred' is false
1893 // along each path, but by checking the predicate over each input separately,
1894 // we can.
1895 // We limit the search to one step backwards from the current BB and value.
1896 // We could consider extending this to search further backwards through the
1897 // CFG and/or value graph, but there are non-obvious compile time vs quality
1898 // tradeoffs.
1899 BasicBlock *BB = CxtI->getParent();
1900
1901 // Function entry or an unreachable block. Bail to avoid confusing
1902 // analysis below.
1903 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1904 if (PI == PE)
1905 return Unknown;
1906
1907 // If V is a PHI node in the same block as the context, we need to ask
1908 // questions about the predicate as applied to the incoming value along
1909 // each edge. This is useful for eliminating cases where the predicate is
1910 // known along all incoming edges.
1911 if (auto *PHI = dyn_cast<PHINode>(V))
1912 if (PHI->getParent() == BB) {
1913 Tristate Baseline = Unknown;
1914 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1915 Value *Incoming = PHI->getIncomingValue(i);
1916 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1917 // Note that PredBB may be BB itself.
1918 Tristate Result =
1919 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
1920
1921 // Keep going as long as we've seen a consistent known result for
1922 // all inputs.
1923 Baseline = (i == 0) ? Result /* First iteration */
1924 : (Baseline == Result ? Baseline
1925 : Unknown); /* All others */
1926 if (Baseline == Unknown)
1927 break;
1928 }
1929 if (Baseline != Unknown)
1930 return Baseline;
1931 }
1932
1933 // For a comparison where the V is outside this block, it's possible
1934 // that we've branched on it before. Look to see if the value is known
1935 // on all incoming edges.
1936 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
1937 // For predecessor edge, determine if the comparison is true or false
1938 // on that edge. If they're all true or all false, we can conclude
1939 // the value of the comparison in this block.
1940 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1941 if (Baseline != Unknown) {
1942 // Check that all remaining incoming values match the first one.
1943 while (++PI != PE) {
1944 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1945 if (Ret != Baseline)
1946 break;
1947 }
1948 // If we terminated early, then one of the values didn't match.
1949 if (PI == PE) {
1950 return Baseline;
1951 }
1952 }
1953 }
1954
1955 return Unknown;
1956}
1957
1959 Value *RHS,
1960 Instruction *CxtI,
1961 bool UseBlockValue) {
1963
1964 if (auto *C = dyn_cast<Constant>(RHS))
1965 return getPredicateAt(P, LHS, C, CxtI, UseBlockValue);
1966 if (auto *C = dyn_cast<Constant>(LHS))
1967 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
1968 UseBlockValue);
1969
1970 // Got two non-Constant values. Try to determine the comparison results based
1971 // on the block values of the two operands, e.g. because they have
1972 // non-overlapping ranges.
1973 if (UseBlockValue) {
1974 Module *M = CxtI->getModule();
1976 getOrCreateImpl(M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
1977 if (L.isOverdefined())
1979
1981 getOrCreateImpl(M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
1983 if (Constant *Res = L.getCompare((CmpInst::Predicate)P, Ty, R,
1984 M->getDataLayout())) {
1985 if (Res->isNullValue())
1986 return LazyValueInfo::False;
1987 if (Res->isOneValue())
1988 return LazyValueInfo::True;
1989 }
1990 }
1992}
1993
1995 BasicBlock *NewSucc) {
1996 if (auto *Impl = getImpl())
1997 Impl->threadEdge(PredBB, OldSucc, NewSucc);
1998}
1999
2001 if (auto *Impl = getImpl())
2002 Impl->forgetValue(V);
2003}
2004
2006 if (auto *Impl = getImpl())
2007 Impl->eraseBlock(BB);
2008}
2009
2011 if (auto *Impl = getImpl())
2012 Impl->clear();
2013}
2014
2016 if (auto *Impl = getImpl())
2017 Impl->printLVI(F, DTree, OS);
2018}
2019
2020// Print the LVI for the function arguments at the start of each basic block.
2021void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2022 const BasicBlock *BB, formatted_raw_ostream &OS) {
2023 // Find if there are latticevalues defined for arguments of the function.
2024 auto *F = BB->getParent();
2025 for (const auto &Arg : F->args()) {
2026 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2027 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
2028 if (Result.isUnknown())
2029 continue;
2030 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2031 }
2032}
2033
2034// This function prints the LVI analysis for the instruction I at the beginning
2035// of various basic blocks. It relies on calculated values that are stored in
2036// the LazyValueInfoCache, and in the absence of cached values, recalculate the
2037// LazyValueInfo for `I`, and print that info.
2038void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2040
2041 auto *ParentBB = I->getParent();
2042 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2043 // We can generate (solve) LVI values only for blocks that are dominated by
2044 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2045 // that contain redundant/uninteresting information, we print LVI for
2046 // blocks that may use this LVI information (such as immediate successor
2047 // blocks, and blocks that contain uses of `I`).
2048 auto printResult = [&](const BasicBlock *BB) {
2049 if (!BlocksContainingLVI.insert(BB).second)
2050 return;
2051 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2052 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2053 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2054 BB->printAsOperand(OS, false);
2055 OS << "' is: " << Result << "\n";
2056 };
2057
2058 printResult(ParentBB);
2059 // Print the LVI analysis results for the immediate successor blocks, that
2060 // are dominated by `ParentBB`.
2061 for (const auto *BBSucc : successors(ParentBB))
2062 if (DT.dominates(ParentBB, BBSucc))
2063 printResult(BBSucc);
2064
2065 // Print LVI in blocks where `I` is used.
2066 for (const auto *U : I->users())
2067 if (auto *UseI = dyn_cast<Instruction>(U))
2068 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2069 printResult(UseI->getParent());
2070
2071}
2072
2075 OS << "LVI for function '" << F.getName() << "':\n";
2076 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
2077 auto &DTree = AM.getResult<DominatorTreeAnalysis>(F);
2078 LVI.printLVI(F, DTree, OS);
2079 return PreservedAnalyses::all();
2080}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
basic Basic Alias true
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:148
Given that RA is a live value
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseSet and SmallDenseSet classes.
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static bool isOperationFoldable(User *Usr)
static ConstantRange toConstantRange(const ValueLatticeElement &Val, Type *Ty, bool UndefAllowed=false)
static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val, const DataLayout &DL)
static void AddNonNullPointersByInstruction(Instruction *I, NonNullPointerSet &PtrSet)
static std::optional< ConstantRange > getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS, function_ref< std::optional< ConstantRange >(const APInt &)> Fn)
static bool hasSingleValue(const ValueLatticeElement &Val)
Returns true if this lattice value represents at most one possible value.
static const unsigned MaxProcessedPerValue
static bool usesOperand(User *Usr, Value *Op)
static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, const APInt &OpConstVal, const DataLayout &DL)
static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet)
static ValueLatticeElement getFromRangeMetadata(Instruction *BBI)
static ValueLatticeElement intersect(const ValueLatticeElement &A, const ValueLatticeElement &B)
Combine two sets of facts about the same value into a single set of facts.
static ValueLatticeElement getValueFromOverflowCondition(Value *Val, WithOverflowInst *WO, bool IsTrueDest)
lazy value info
static bool isKnownNonConstant(Value *V)
Returns true if we can statically tell that this value will never be a "useful" constant.
static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val, ICmpInst::Predicate Pred)
Natural Loop Information
Definition: LoopInfo.cpp:1209
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Module.h This file contains the declarations for the Module class.
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static bool InBlock(const Value *V, const BasicBlock *BB)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:77
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition: APInt.h:198
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:179
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:49
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:292
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:253
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:405
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void clear()
Clear the cache of @llvm.assume intrinsics for a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:438
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:569
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:209
const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
Definition: BasicBlock.cpp:294
reverse_iterator rend()
Definition: BasicBlock.h:456
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
const Instruction & back() const
Definition: BasicBlock.h:463
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:290
Value * getRHS() const
unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Value * getLHS() const
BinaryOps getOpcode() const
Definition: InstrTypes.h:442
Conditional or Unconditional Branch instruction.
Value handle with callbacks on RAUW and destruction.
Definition: ValueHandle.h:383
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:530
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:694
Type * getDestTy() const
Return the destination type, as a convenience.
Definition: InstrTypes.h:701
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1104
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:786
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:787
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:781
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:780
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:784
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:785
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:783
bool isSigned() const
Definition: InstrTypes.h:1007
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:909
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:871
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1762
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
ConstantRange umin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned minimum of a value in ...
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
static ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange abs(bool IntMinIsPoison=false) const
Calculate absolute value range.
static bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
ConstantRange overflowingBinaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind) const
Return a new range representing the possible values resulting from an application of the specified ov...
bool isSingleElement() const
Return true if this set contains exactly one member.
ConstantRange umax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned maximum of a value in ...
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange makeMaskNotEqualRange(const APInt &Mask, const APInt &C)
Initialize a range containing all values X that satisfy (X & Mask) != C.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
Definition: ConstantRange.h:84
ConstantRange smin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed minimum of a value in thi...
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange smax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed maximum of a value in thi...
ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:400
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:180
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:66
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:381
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
Definition: Instruction.cpp:74
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:48
Analysis to compute lazy value information.
Result run(Function &F, FunctionAnalysisManager &FAM)
ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* that is true on t...
ValueLatticeElement getValueAt(Value *V, Instruction *CxtI)
This is the query interface to determine the lattice value for the specified Value* at the specified ...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
This is the update interface to inform the cache that an edge from PredBB to OldSucc has been threade...
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Printing the LazyValueInfo Analysis.
void forgetValue(Value *V)
This is part of the update interface to remove information related to this value from the cache.
void eraseBlock(BasicBlock *BB)
This is part of the update interface to inform the cache that a block has been deleted.
void clear()
Complete flush all previously computed values.
LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL, Function *GuardDecl)
ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* at the context in...
ValueLatticeElement getValueAtUse(const Use &U)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Wrapper around LazyValueInfo.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
This pass computes, caches, and vends lazy value constraint information.
Definition: LazyValueInfo.h:33
void eraseBlock(BasicBlock *BB)
Inform the analysis cache that we have erased a block.
ConstantRange getConstantRangeAtUse(const Use &U, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the value at a specific use-site.
ConstantRange getConstantRange(Value *V, Instruction *CxtI, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the specified value at the specified in...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
Inform the analysis cache that we have threaded an edge from PredBB to OldSucc to be from PredBB to N...
Tristate
This is used to return true/false/dunno results.
Definition: LazyValueInfo.h:63
Constant * getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value is known to be a constant on the specified edge.
ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Return the ConstantRage constraint that is known to hold for the specified value on the specified edg...
Tristate getPredicateOnEdge(unsigned Pred, Value *V, Constant *C, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value comparison with a constant is known to be true or false on the ...
Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C, Instruction *CxtI, bool UseBlockValue)
Determine whether the specified value comparison with a constant is known to be true or false at the ...
Constant * getConstant(Value *V, Instruction *CxtI)
Determine whether the specified value is known to be a constant at the specified instruction.
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Print the \LazyValueInfo Analysis.
void forgetValue(Value *V)
Remove information related to this value from the cache.
void clear()
Complete flush all previously computed values.
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
An instruction for reading from memory.
Definition: Instructions.h:173
Metadata node.
Definition: Metadata.h:1067
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:111
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:117
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
This class represents the LLVM 'select' instruction.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:289
Multiway switch.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getIntegerBitWidth() const
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
This class represents lattice values for constants.
Definition: ValueLattice.h:29
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
Definition: ValueLattice.h:214
static ValueLatticeElement getNot(Constant *C)
Definition: ValueLattice.h:208
std::optional< APInt > asConstantInteger() const
Definition: ValueLattice.h:275
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
Definition: ValueLattice.h:269
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
Definition: ValueLattice.h:249
static ValueLatticeElement get(Constant *C)
Definition: ValueLattice.h:203
Constant * getNotConstant() const
Definition: ValueLattice.h:260
Constant * getConstant() const
Definition: ValueLattice.h:255
static ValueLatticeElement getOverdefined()
Definition: ValueLattice.h:231
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:785
use_iterator use_begin()
Definition: Value.h:360
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:5105
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
iterator find_as(const LookupKeyT &Val)
Alternative version of find() which allows a different, and possibly less expensive,...
Definition: DenseSet.h:195
bool erase(const ValueT &V)
Definition: DenseSet.h:101
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ Entry
Definition: COFF.h:811
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1042
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:875
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:239
constexpr double e
Definition: MathExtras.h:31
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
pred_iterator pred_end(BasicBlock *BB)
Definition: CFG.h:114
@ Offset
Definition: DWP.cpp:480
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2067
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
FunctionPass * createLazyValueInfoPass()
createLazyValueInfoPass - This creates an instance of the LazyValueInfo pass.
pred_iterator pred_begin(BasicBlock *BB)
Definition: CFG.h:110
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:48
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2073
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
void initializeLazyValueInfoWrapperPassPass(PassRegistry &)
#define N
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:28
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?