LLVM 19.0.0git
LazyValueInfo.cpp
Go to the documentation of this file.
1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for lazy computation of value constraint
10// information.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/STLExtras.h"
24#include "llvm/IR/CFG.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
29#include "llvm/IR/InstrTypes.h"
32#include "llvm/IR/Intrinsics.h"
33#include "llvm/IR/LLVMContext.h"
35#include "llvm/IR/ValueHandle.h"
37#include "llvm/Support/Debug.h"
41#include <optional>
42using namespace llvm;
43using namespace PatternMatch;
44
45#define DEBUG_TYPE "lazy-value-info"
46
47// This is the number of worklist items we will process to try to discover an
48// answer for a given value.
49static const unsigned MaxProcessedPerValue = 500;
50
54}
56 "Lazy Value Information Analysis", false, true)
61
62namespace llvm {
64}
65
66AnalysisKey LazyValueAnalysis::Key;
67
68/// Returns true if this lattice value represents at most one possible value.
69/// This is as precise as any lattice value can get while still representing
70/// reachable code.
71static bool hasSingleValue(const ValueLatticeElement &Val) {
72 if (Val.isConstantRange() &&
74 // Integer constants are single element ranges
75 return true;
76 if (Val.isConstant())
77 // Non integer constants
78 return true;
79 return false;
80}
81
82/// Combine two sets of facts about the same value into a single set of
83/// facts. Note that this method is not suitable for merging facts along
84/// different paths in a CFG; that's what the mergeIn function is for. This
85/// is for merging facts gathered about the same value at the same location
86/// through two independent means.
87/// Notes:
88/// * This method does not promise to return the most precise possible lattice
89/// value implied by A and B. It is allowed to return any lattice element
90/// which is at least as strong as *either* A or B (unless our facts
91/// conflict, see below).
92/// * Due to unreachable code, the intersection of two lattice values could be
93/// contradictory. If this happens, we return some valid lattice value so as
94/// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
95/// we do not make this guarantee. TODO: This would be a useful enhancement.
97 const ValueLatticeElement &B) {
98 // Undefined is the strongest state. It means the value is known to be along
99 // an unreachable path.
100 if (A.isUnknown())
101 return A;
102 if (B.isUnknown())
103 return B;
104
105 // If we gave up for one, but got a useable fact from the other, use it.
106 if (A.isOverdefined())
107 return B;
108 if (B.isOverdefined())
109 return A;
110
111 // Can't get any more precise than constants.
112 if (hasSingleValue(A))
113 return A;
114 if (hasSingleValue(B))
115 return B;
116
117 // Could be either constant range or not constant here.
118 if (!A.isConstantRange() || !B.isConstantRange()) {
119 // TODO: Arbitrary choice, could be improved
120 return A;
121 }
122
123 // Intersect two constant ranges
124 ConstantRange Range =
125 A.getConstantRange().intersectWith(B.getConstantRange());
126 // Note: An empty range is implicitly converted to unknown or undef depending
127 // on MayIncludeUndef internally.
129 std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() ||
130 B.isConstantRangeIncludingUndef());
131}
132
133//===----------------------------------------------------------------------===//
134// LazyValueInfoCache Decl
135//===----------------------------------------------------------------------===//
136
137namespace {
138 /// A callback value handle updates the cache when values are erased.
139 class LazyValueInfoCache;
140 struct LVIValueHandle final : public CallbackVH {
141 LazyValueInfoCache *Parent;
142
143 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
144 : CallbackVH(V), Parent(P) { }
145
146 void deleted() override;
147 void allUsesReplacedWith(Value *V) override {
148 deleted();
149 }
150 };
151} // end anonymous namespace
152
153namespace {
154 using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
155
156 /// This is the cache kept by LazyValueInfo which
157 /// maintains information about queries across the clients' queries.
158 class LazyValueInfoCache {
159 /// This is all of the cached information for one basic block. It contains
160 /// the per-value lattice elements, as well as a separate set for
161 /// overdefined values to reduce memory usage. Additionally pointers
162 /// dereferenced in the block are cached for nullability queries.
163 struct BlockCacheEntry {
165 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
166 // std::nullopt indicates that the nonnull pointers for this basic block
167 // block have not been computed yet.
168 std::optional<NonNullPointerSet> NonNullPointers;
169 };
170
171 /// Cached information per basic block.
172 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
173 BlockCache;
174 /// Set of value handles used to erase values from the cache on deletion.
176
177 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
178 auto It = BlockCache.find_as(BB);
179 if (It == BlockCache.end())
180 return nullptr;
181 return It->second.get();
182 }
183
184 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
185 auto It = BlockCache.find_as(BB);
186 if (It == BlockCache.end())
187 It = BlockCache.insert({ BB, std::make_unique<BlockCacheEntry>() })
188 .first;
189
190 return It->second.get();
191 }
192
193 void addValueHandle(Value *Val) {
194 auto HandleIt = ValueHandles.find_as(Val);
195 if (HandleIt == ValueHandles.end())
196 ValueHandles.insert({ Val, this });
197 }
198
199 public:
200 void insertResult(Value *Val, BasicBlock *BB,
201 const ValueLatticeElement &Result) {
202 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
203
204 // Insert over-defined values into their own cache to reduce memory
205 // overhead.
206 if (Result.isOverdefined())
207 Entry->OverDefined.insert(Val);
208 else
209 Entry->LatticeElements.insert({ Val, Result });
210
211 addValueHandle(Val);
212 }
213
214 std::optional<ValueLatticeElement>
215 getCachedValueInfo(Value *V, BasicBlock *BB) const {
216 const BlockCacheEntry *Entry = getBlockEntry(BB);
217 if (!Entry)
218 return std::nullopt;
219
220 if (Entry->OverDefined.count(V))
222
223 auto LatticeIt = Entry->LatticeElements.find_as(V);
224 if (LatticeIt == Entry->LatticeElements.end())
225 return std::nullopt;
226
227 return LatticeIt->second;
228 }
229
230 bool isNonNullAtEndOfBlock(
231 Value *V, BasicBlock *BB,
232 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
233 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
234 if (!Entry->NonNullPointers) {
235 Entry->NonNullPointers = InitFn(BB);
236 for (Value *V : *Entry->NonNullPointers)
237 addValueHandle(V);
238 }
239
240 return Entry->NonNullPointers->count(V);
241 }
242
243 /// clear - Empty the cache.
244 void clear() {
245 BlockCache.clear();
246 ValueHandles.clear();
247 }
248
249 /// Inform the cache that a given value has been deleted.
250 void eraseValue(Value *V);
251
252 /// This is part of the update interface to inform the cache
253 /// that a block has been deleted.
254 void eraseBlock(BasicBlock *BB);
255
256 /// Updates the cache to remove any influence an overdefined value in
257 /// OldSucc might have (unless also overdefined in NewSucc). This just
258 /// flushes elements from the cache and does not add any.
259 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
260 };
261}
262
263void LazyValueInfoCache::eraseValue(Value *V) {
264 for (auto &Pair : BlockCache) {
265 Pair.second->LatticeElements.erase(V);
266 Pair.second->OverDefined.erase(V);
267 if (Pair.second->NonNullPointers)
268 Pair.second->NonNullPointers->erase(V);
269 }
270
271 auto HandleIt = ValueHandles.find_as(V);
272 if (HandleIt != ValueHandles.end())
273 ValueHandles.erase(HandleIt);
274}
275
276void LVIValueHandle::deleted() {
277 // This erasure deallocates *this, so it MUST happen after we're done
278 // using any and all members of *this.
279 Parent->eraseValue(*this);
280}
281
282void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
283 BlockCache.erase(BB);
284}
285
286void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
287 BasicBlock *NewSucc) {
288 // When an edge in the graph has been threaded, values that we could not
289 // determine a value for before (i.e. were marked overdefined) may be
290 // possible to solve now. We do NOT try to proactively update these values.
291 // Instead, we clear their entries from the cache, and allow lazy updating to
292 // recompute them when needed.
293
294 // The updating process is fairly simple: we need to drop cached info
295 // for all values that were marked overdefined in OldSucc, and for those same
296 // values in any successor of OldSucc (except NewSucc) in which they were
297 // also marked overdefined.
298 std::vector<BasicBlock*> worklist;
299 worklist.push_back(OldSucc);
300
301 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
302 if (!Entry || Entry->OverDefined.empty())
303 return; // Nothing to process here.
304 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
305 Entry->OverDefined.end());
306
307 // Use a worklist to perform a depth-first search of OldSucc's successors.
308 // NOTE: We do not need a visited list since any blocks we have already
309 // visited will have had their overdefined markers cleared already, and we
310 // thus won't loop to their successors.
311 while (!worklist.empty()) {
312 BasicBlock *ToUpdate = worklist.back();
313 worklist.pop_back();
314
315 // Skip blocks only accessible through NewSucc.
316 if (ToUpdate == NewSucc) continue;
317
318 // If a value was marked overdefined in OldSucc, and is here too...
319 auto OI = BlockCache.find_as(ToUpdate);
320 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
321 continue;
322 auto &ValueSet = OI->second->OverDefined;
323
324 bool changed = false;
325 for (Value *V : ValsToClear) {
326 if (!ValueSet.erase(V))
327 continue;
328
329 // If we removed anything, then we potentially need to update
330 // blocks successors too.
331 changed = true;
332 }
333
334 if (!changed) continue;
335
336 llvm::append_range(worklist, successors(ToUpdate));
337 }
338}
339
340namespace llvm {
341namespace {
342/// An assembly annotator class to print LazyValueCache information in
343/// comments.
344class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
345 LazyValueInfoImpl *LVIImpl;
346 // While analyzing which blocks we can solve values for, we need the dominator
347 // information.
348 DominatorTree &DT;
349
350public:
351 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
352 : LVIImpl(L), DT(DTree) {}
353
354 void emitBasicBlockStartAnnot(const BasicBlock *BB,
355 formatted_raw_ostream &OS) override;
356
357 void emitInstructionAnnot(const Instruction *I,
358 formatted_raw_ostream &OS) override;
359};
360} // namespace
361// The actual implementation of the lazy analysis and update. Note that the
362// inheritance from LazyValueInfoCache is intended to be temporary while
363// splitting the code and then transitioning to a has-a relationship.
365
366 /// Cached results from previous queries
367 LazyValueInfoCache TheCache;
368
369 /// This stack holds the state of the value solver during a query.
370 /// It basically emulates the callstack of the naive
371 /// recursive value lookup process.
373
374 /// Keeps track of which block-value pairs are in BlockValueStack.
376
377 /// Push BV onto BlockValueStack unless it's already in there.
378 /// Returns true on success.
379 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
380 if (!BlockValueSet.insert(BV).second)
381 return false; // It's already in the stack.
382
383 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
384 << BV.first->getName() << "\n");
385 BlockValueStack.push_back(BV);
386 return true;
387 }
388
389 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
390 const DataLayout &DL; ///< A mandatory DataLayout
391
392 /// Declaration of the llvm.experimental.guard() intrinsic,
393 /// if it exists in the module.
394 Function *GuardDecl;
395
396 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
397 Instruction *CxtI);
398 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
399 BasicBlock *T,
400 Instruction *CxtI = nullptr);
401
402 // These methods process one work item and may add more. A false value
403 // returned means that the work item was not completely processed and must
404 // be revisited after going through the new items.
405 bool solveBlockValue(Value *Val, BasicBlock *BB);
406 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
407 BasicBlock *BB);
408 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
409 BasicBlock *BB);
410 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
411 BasicBlock *BB);
412 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
413 BasicBlock *BB);
414 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
415 BasicBlock *BB);
416 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
418 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
419 OpFn);
420 std::optional<ValueLatticeElement>
421 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
422 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
423 BasicBlock *BB);
424 std::optional<ValueLatticeElement>
425 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
426 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
427 BasicBlock *BB);
428 std::optional<ValueLatticeElement>
429 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
430 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
431 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
433 Instruction *BBI);
434
435 void solve();
436
437 // For the following methods, if UseBlockValue is true, the function may
438 // push additional values to the worklist and return nullopt. If
439 // UseBlockValue is false, it will never return nullopt.
440
441 std::optional<ValueLatticeElement>
442 getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
443 const APInt &Offset, Instruction *CxtI,
444 bool UseBlockValue);
445
446 std::optional<ValueLatticeElement>
447 getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
448 bool UseBlockValue);
449
450 std::optional<ValueLatticeElement>
451 getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
452 bool UseBlockValue, unsigned Depth = 0);
453
454 std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
455 BasicBlock *BBFrom,
456 BasicBlock *BBTo,
457 bool UseBlockValue);
458
459public:
460 /// This is the query interface to determine the lattice value for the
461 /// specified Value* at the context instruction (if specified) or at the
462 /// start of the block.
464 Instruction *CxtI = nullptr);
465
466 /// This is the query interface to determine the lattice value for the
467 /// specified Value* at the specified instruction using only information
468 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
469 /// recursive query is performed.
471
472 /// This is the query interface to determine the lattice
473 /// value for the specified Value* that is true on the specified edge.
475 BasicBlock *ToBB,
476 Instruction *CxtI = nullptr);
477
479
480 /// Complete flush all previously computed values
481 void clear() {
482 TheCache.clear();
483 }
484
485 /// Printing the LazyValueInfo Analysis.
487 LazyValueInfoAnnotatedWriter Writer(this, DTree);
488 F.print(OS, &Writer);
489 }
490
491 /// This is part of the update interface to remove information related to this
492 /// value from the cache.
493 void forgetValue(Value *V) { TheCache.eraseValue(V); }
494
495 /// This is part of the update interface to inform the cache
496 /// that a block has been deleted.
498 TheCache.eraseBlock(BB);
499 }
500
501 /// This is the update interface to inform the cache that an edge from
502 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
503 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
504
506 Function *GuardDecl)
507 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
508};
509} // namespace llvm
510
511void LazyValueInfoImpl::solve() {
513 BlockValueStack.begin(), BlockValueStack.end());
514
515 unsigned processedCount = 0;
516 while (!BlockValueStack.empty()) {
517 processedCount++;
518 // Abort if we have to process too many values to get a result for this one.
519 // Because of the design of the overdefined cache currently being per-block
520 // to avoid naming-related issues (IE it wants to try to give different
521 // results for the same name in different blocks), overdefined results don't
522 // get cached globally, which in turn means we will often try to rediscover
523 // the same overdefined result again and again. Once something like
524 // PredicateInfo is used in LVI or CVP, we should be able to make the
525 // overdefined cache global, and remove this throttle.
526 if (processedCount > MaxProcessedPerValue) {
528 dbgs() << "Giving up on stack because we are getting too deep\n");
529 // Fill in the original values
530 while (!StartingStack.empty()) {
531 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
532 TheCache.insertResult(e.second, e.first,
534 StartingStack.pop_back();
535 }
536 BlockValueSet.clear();
537 BlockValueStack.clear();
538 return;
539 }
540 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
541 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
542 unsigned StackSize = BlockValueStack.size();
543 (void) StackSize;
544
545 if (solveBlockValue(e.second, e.first)) {
546 // The work item was completely processed.
547 assert(BlockValueStack.size() == StackSize &&
548 BlockValueStack.back() == e && "Nothing should have been pushed!");
549#ifndef NDEBUG
550 std::optional<ValueLatticeElement> BBLV =
551 TheCache.getCachedValueInfo(e.second, e.first);
552 assert(BBLV && "Result should be in cache!");
554 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
555 << *BBLV << "\n");
556#endif
557
558 BlockValueStack.pop_back();
559 BlockValueSet.erase(e);
560 } else {
561 // More work needs to be done before revisiting.
562 assert(BlockValueStack.size() == StackSize + 1 &&
563 "Exactly one element should have been pushed!");
564 }
565 }
566}
567
568std::optional<ValueLatticeElement>
569LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
570 Instruction *CxtI) {
571 // If already a constant, there is nothing to compute.
572 if (Constant *VC = dyn_cast<Constant>(Val))
573 return ValueLatticeElement::get(VC);
574
575 if (std::optional<ValueLatticeElement> OptLatticeVal =
576 TheCache.getCachedValueInfo(Val, BB)) {
577 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
578 return OptLatticeVal;
579 }
580
581 // We have hit a cycle, assume overdefined.
582 if (!pushBlockValue({ BB, Val }))
584
585 // Yet to be resolved.
586 return std::nullopt;
587}
588
590 switch (BBI->getOpcode()) {
591 default: break;
592 case Instruction::Load:
593 case Instruction::Call:
594 case Instruction::Invoke:
595 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
596 if (isa<IntegerType>(BBI->getType())) {
599 }
600 break;
601 };
602 // Nothing known - will be intersected with other facts
604}
605
606bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
607 assert(!isa<Constant>(Val) && "Value should not be constant");
608 assert(!TheCache.getCachedValueInfo(Val, BB) &&
609 "Value should not be in cache");
610
611 // Hold off inserting this value into the Cache in case we have to return
612 // false and come back later.
613 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
614 if (!Res)
615 // Work pushed, will revisit
616 return false;
617
618 TheCache.insertResult(Val, BB, *Res);
619 return true;
620}
621
622std::optional<ValueLatticeElement>
623LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
624 Instruction *BBI = dyn_cast<Instruction>(Val);
625 if (!BBI || BBI->getParent() != BB)
626 return solveBlockValueNonLocal(Val, BB);
627
628 if (PHINode *PN = dyn_cast<PHINode>(BBI))
629 return solveBlockValuePHINode(PN, BB);
630
631 if (auto *SI = dyn_cast<SelectInst>(BBI))
632 return solveBlockValueSelect(SI, BB);
633
634 // If this value is a nonnull pointer, record it's range and bailout. Note
635 // that for all other pointer typed values, we terminate the search at the
636 // definition. We could easily extend this to look through geps, bitcasts,
637 // and the like to prove non-nullness, but it's not clear that's worth it
638 // compile time wise. The context-insensitive value walk done inside
639 // isKnownNonZero gets most of the profitable cases at much less expense.
640 // This does mean that we have a sensitivity to where the defining
641 // instruction is placed, even if it could legally be hoisted much higher.
642 // That is unfortunate.
643 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
644 if (PT && isKnownNonZero(BBI, DL))
646
647 if (BBI->getType()->isIntegerTy()) {
648 if (auto *CI = dyn_cast<CastInst>(BBI))
649 return solveBlockValueCast(CI, BB);
650
651 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
652 return solveBlockValueBinaryOp(BO, BB);
653
654 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
655 return solveBlockValueExtractValue(EVI, BB);
656
657 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
658 return solveBlockValueIntrinsic(II, BB);
659 }
660
661 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
662 << "' - unknown inst def found.\n");
663 return getFromRangeMetadata(BBI);
664}
665
666static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) {
667 // TODO: Use NullPointerIsDefined instead.
668 if (Ptr->getType()->getPointerAddressSpace() == 0)
669 PtrSet.insert(getUnderlyingObject(Ptr));
670}
671
673 Instruction *I, NonNullPointerSet &PtrSet) {
674 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
675 AddNonNullPointer(L->getPointerOperand(), PtrSet);
676 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
677 AddNonNullPointer(S->getPointerOperand(), PtrSet);
678 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
679 if (MI->isVolatile()) return;
680
681 // FIXME: check whether it has a valuerange that excludes zero?
682 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
683 if (!Len || Len->isZero()) return;
684
685 AddNonNullPointer(MI->getRawDest(), PtrSet);
686 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
687 AddNonNullPointer(MTI->getRawSource(), PtrSet);
688 }
689}
690
691bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
694 return false;
695
696 Val = Val->stripInBoundsOffsets();
697 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
698 NonNullPointerSet NonNullPointers;
699 for (Instruction &I : *BB)
700 AddNonNullPointersByInstruction(&I, NonNullPointers);
701 return NonNullPointers;
702 });
703}
704
705std::optional<ValueLatticeElement>
706LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
707 ValueLatticeElement Result; // Start Undefined.
708
709 // If this is the entry block, we must be asking about an argument. The
710 // value is overdefined.
711 if (BB->isEntryBlock()) {
712 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
714 }
715
716 // Loop over all of our predecessors, merging what we know from them into
717 // result. If we encounter an unexplored predecessor, we eagerly explore it
718 // in a depth first manner. In practice, this has the effect of discovering
719 // paths we can't analyze eagerly without spending compile times analyzing
720 // other paths. This heuristic benefits from the fact that predecessors are
721 // frequently arranged such that dominating ones come first and we quickly
722 // find a path to function entry. TODO: We should consider explicitly
723 // canonicalizing to make this true rather than relying on this happy
724 // accident.
725 for (BasicBlock *Pred : predecessors(BB)) {
726 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
727 if (!EdgeResult)
728 // Explore that input, then return here
729 return std::nullopt;
730
731 Result.mergeIn(*EdgeResult);
732
733 // If we hit overdefined, exit early. The BlockVals entry is already set
734 // to overdefined.
735 if (Result.isOverdefined()) {
736 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
737 << "' - overdefined because of pred '"
738 << Pred->getName() << "' (non local).\n");
739 return Result;
740 }
741 }
742
743 // Return the merged value, which is more precise than 'overdefined'.
744 assert(!Result.isOverdefined());
745 return Result;
746}
747
748std::optional<ValueLatticeElement>
749LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
750 ValueLatticeElement Result; // Start Undefined.
751
752 // Loop over all of our predecessors, merging what we know from them into
753 // result. See the comment about the chosen traversal order in
754 // solveBlockValueNonLocal; the same reasoning applies here.
755 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
756 BasicBlock *PhiBB = PN->getIncomingBlock(i);
757 Value *PhiVal = PN->getIncomingValue(i);
758 // Note that we can provide PN as the context value to getEdgeValue, even
759 // though the results will be cached, because PN is the value being used as
760 // the cache key in the caller.
761 std::optional<ValueLatticeElement> EdgeResult =
762 getEdgeValue(PhiVal, PhiBB, BB, PN);
763 if (!EdgeResult)
764 // Explore that input, then return here
765 return std::nullopt;
766
767 Result.mergeIn(*EdgeResult);
768
769 // If we hit overdefined, exit early. The BlockVals entry is already set
770 // to overdefined.
771 if (Result.isOverdefined()) {
772 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
773 << "' - overdefined because of pred (local).\n");
774
775 return Result;
776 }
777 }
778
779 // Return the merged value, which is more precise than 'overdefined'.
780 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
781 return Result;
782}
783
784// If we can determine a constraint on the value given conditions assumed by
785// the program, intersect those constraints with BBLV
786void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
787 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
788 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
789 if (!BBI)
790 return;
791
792 BasicBlock *BB = BBI->getParent();
793 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
794 if (!AssumeVH)
795 continue;
796
797 // Only check assumes in the block of the context instruction. Other
798 // assumes will have already been taken into account when the value was
799 // propagated from predecessor blocks.
800 auto *I = cast<CallInst>(AssumeVH);
801 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
802 continue;
803
804 BBLV = intersect(BBLV, *getValueFromCondition(Val, I->getArgOperand(0),
805 /*IsTrueDest*/ true,
806 /*UseBlockValue*/ false));
807 }
808
809 // If guards are not used in the module, don't spend time looking for them
810 if (GuardDecl && !GuardDecl->use_empty() &&
811 BBI->getIterator() != BB->begin()) {
812 for (Instruction &I :
813 make_range(std::next(BBI->getIterator().getReverse()), BB->rend())) {
814 Value *Cond = nullptr;
815 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
816 BBLV = intersect(BBLV,
817 *getValueFromCondition(Val, Cond, /*IsTrueDest*/ true,
818 /*UseBlockValue*/ false));
819 }
820 }
821
822 if (BBLV.isOverdefined()) {
823 // Check whether we're checking at the terminator, and the pointer has
824 // been dereferenced in this block.
825 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
826 if (PTy && BB->getTerminator() == BBI &&
827 isNonNullAtEndOfBlock(Val, BB))
829 }
830}
831
833 Type *Ty, bool UndefAllowed = false) {
834 assert(Ty->isIntOrIntVectorTy() && "Must be integer type");
835 if (Val.isConstantRange(UndefAllowed))
836 return Val.getConstantRange();
837 unsigned BW = Ty->getScalarSizeInBits();
838 if (Val.isUnknown())
839 return ConstantRange::getEmpty(BW);
840 return ConstantRange::getFull(BW);
841}
842
843std::optional<ValueLatticeElement>
844LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
845 // Recurse on our inputs if needed
846 std::optional<ValueLatticeElement> OptTrueVal =
847 getBlockValue(SI->getTrueValue(), BB, SI);
848 if (!OptTrueVal)
849 return std::nullopt;
850 ValueLatticeElement &TrueVal = *OptTrueVal;
851
852 std::optional<ValueLatticeElement> OptFalseVal =
853 getBlockValue(SI->getFalseValue(), BB, SI);
854 if (!OptFalseVal)
855 return std::nullopt;
856 ValueLatticeElement &FalseVal = *OptFalseVal;
857
858 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
859 const ConstantRange &TrueCR = toConstantRange(TrueVal, SI->getType());
860 const ConstantRange &FalseCR = toConstantRange(FalseVal, SI->getType());
861 Value *LHS = nullptr;
862 Value *RHS = nullptr;
863 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
864 // Is this a min specifically of our two inputs? (Avoid the risk of
865 // ValueTracking getting smarter looking back past our immediate inputs.)
867 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
868 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
869 ConstantRange ResultCR = [&]() {
870 switch (SPR.Flavor) {
871 default:
872 llvm_unreachable("unexpected minmax type!");
873 case SPF_SMIN: /// Signed minimum
874 return TrueCR.smin(FalseCR);
875 case SPF_UMIN: /// Unsigned minimum
876 return TrueCR.umin(FalseCR);
877 case SPF_SMAX: /// Signed maximum
878 return TrueCR.smax(FalseCR);
879 case SPF_UMAX: /// Unsigned maximum
880 return TrueCR.umax(FalseCR);
881 };
882 }();
884 ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
885 FalseVal.isConstantRangeIncludingUndef());
886 }
887
888 if (SPR.Flavor == SPF_ABS) {
889 if (LHS == SI->getTrueValue())
891 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
892 if (LHS == SI->getFalseValue())
894 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
895 }
896
897 if (SPR.Flavor == SPF_NABS) {
899 if (LHS == SI->getTrueValue())
901 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
902 if (LHS == SI->getFalseValue())
904 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
905 }
906 }
907
908 // Can we constrain the facts about the true and false values by using the
909 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
910 // TODO: We could potentially refine an overdefined true value above.
911 Value *Cond = SI->getCondition();
912 // If the value is undef, a different value may be chosen in
913 // the select condition.
915 TrueVal =
916 intersect(TrueVal, *getValueFromCondition(SI->getTrueValue(), Cond,
917 /*IsTrueDest*/ true,
918 /*UseBlockValue*/ false));
919 FalseVal =
920 intersect(FalseVal, *getValueFromCondition(SI->getFalseValue(), Cond,
921 /*IsTrueDest*/ false,
922 /*UseBlockValue*/ false));
923 }
924
926 Result.mergeIn(FalseVal);
927 return Result;
928}
929
930std::optional<ConstantRange>
931LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
932 std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
933 if (!OptVal)
934 return std::nullopt;
935 return toConstantRange(*OptVal, V->getType());
936}
937
938std::optional<ValueLatticeElement>
939LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
940 // Filter out casts we don't know how to reason about before attempting to
941 // recurse on our operand. This can cut a long search short if we know we're
942 // not going to be able to get any useful information anways.
943 switch (CI->getOpcode()) {
944 case Instruction::Trunc:
945 case Instruction::SExt:
946 case Instruction::ZExt:
947 break;
948 default:
949 // Unhandled instructions are overdefined.
950 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
951 << "' - overdefined (unknown cast).\n");
953 }
954
955 // Figure out the range of the LHS. If that fails, we still apply the
956 // transfer rule on the full set since we may be able to locally infer
957 // interesting facts.
958 std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
959 if (!LHSRes)
960 // More work to do before applying this transfer rule.
961 return std::nullopt;
962 const ConstantRange &LHSRange = *LHSRes;
963
964 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
965
966 // NOTE: We're currently limited by the set of operations that ConstantRange
967 // can evaluate symbolically. Enhancing that set will allows us to analyze
968 // more definitions.
969 return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
970 ResultBitWidth));
971}
972
973std::optional<ValueLatticeElement>
974LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
976 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
977 OpFn) {
978 // Figure out the ranges of the operands. If that fails, use a
979 // conservative range, but apply the transfer rule anyways. This
980 // lets us pick up facts from expressions like "and i32 (call i32
981 // @foo()), 32"
982 std::optional<ConstantRange> LHSRes = getRangeFor(I->getOperand(0), I, BB);
983 if (!LHSRes)
984 return std::nullopt;
985
986 std::optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB);
987 if (!RHSRes)
988 return std::nullopt;
989
990 const ConstantRange &LHSRange = *LHSRes;
991 const ConstantRange &RHSRange = *RHSRes;
992 return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
993}
994
995std::optional<ValueLatticeElement>
996LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
997 assert(BO->getOperand(0)->getType()->isSized() &&
998 "all operands to binary operators are sized");
999 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
1000 unsigned NoWrapKind = OBO->getNoWrapKind();
1001 return solveBlockValueBinaryOpImpl(
1002 BO, BB,
1003 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1004 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
1005 });
1006 }
1007
1008 return solveBlockValueBinaryOpImpl(
1009 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1010 return CR1.binaryOp(BO->getOpcode(), CR2);
1011 });
1012}
1013
1014std::optional<ValueLatticeElement>
1015LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1016 BasicBlock *BB) {
1017 return solveBlockValueBinaryOpImpl(
1018 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1019 return CR1.binaryOp(WO->getBinaryOp(), CR2);
1020 });
1021}
1022
1023std::optional<ValueLatticeElement>
1024LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1025 ValueLatticeElement MetadataVal = getFromRangeMetadata(II);
1027 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1028 << "' - unknown intrinsic.\n");
1029 return MetadataVal;
1030 }
1031
1033 for (Value *Op : II->args()) {
1034 std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1035 if (!Range)
1036 return std::nullopt;
1037 OpRanges.push_back(*Range);
1038 }
1039
1041 II->getIntrinsicID(), OpRanges)),
1042 MetadataVal);
1043}
1044
1045std::optional<ValueLatticeElement>
1046LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1047 BasicBlock *BB) {
1048 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1049 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1050 return solveBlockValueOverflowIntrinsic(WO, BB);
1051
1052 // Handle extractvalue of insertvalue to allow further simplification
1053 // based on replaced with.overflow intrinsics.
1055 EVI->getAggregateOperand(), EVI->getIndices(),
1056 EVI->getModule()->getDataLayout()))
1057 return getBlockValue(V, BB, EVI);
1058
1059 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1060 << "' - overdefined (unknown extractvalue).\n");
1062}
1063
1064static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1065 ICmpInst::Predicate Pred) {
1066 if (LHS == Val)
1067 return true;
1068
1069 // Handle range checking idiom produced by InstCombine. We will subtract the
1070 // offset from the allowed range for RHS in this case.
1071 const APInt *C;
1072 if (match(LHS, m_Add(m_Specific(Val), m_APInt(C)))) {
1073 Offset = *C;
1074 return true;
1075 }
1076
1077 // Handle the symmetric case. This appears in saturation patterns like
1078 // (x == 16) ? 16 : (x + 1).
1079 if (match(Val, m_Add(m_Specific(LHS), m_APInt(C)))) {
1080 Offset = -*C;
1081 return true;
1082 }
1083
1084 // If (x | y) < C, then (x < C) && (y < C).
1085 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1086 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1087 return true;
1088
1089 // If (x & y) > C, then (x > C) && (y > C).
1090 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1091 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1092 return true;
1093
1094 return false;
1095}
1096
1097/// Get value range for a "(Val + Offset) Pred RHS" condition.
1098std::optional<ValueLatticeElement>
1099LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1100 Value *RHS,
1101 const APInt &Offset,
1102 Instruction *CxtI,
1103 bool UseBlockValue) {
1105 /*isFullSet=*/true);
1106 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1107 RHSRange = ConstantRange(CI->getValue());
1108 } else if (UseBlockValue) {
1109 std::optional<ValueLatticeElement> R =
1110 getBlockValue(RHS, CxtI->getParent(), CxtI);
1111 if (!R)
1112 return std::nullopt;
1113 RHSRange = toConstantRange(*R, RHS->getType());
1114 } else if (Instruction *I = dyn_cast<Instruction>(RHS)) {
1115 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
1116 RHSRange = getConstantRangeFromMetadata(*Ranges);
1117 }
1118
1119 ConstantRange TrueValues =
1121 return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1122}
1123
1124static std::optional<ConstantRange>
1126 function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1127 bool Invert = false;
1128 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1129 Pred = ICmpInst::getInversePredicate(Pred);
1130 Invert = true;
1131 }
1132 if (Pred == ICmpInst::ICMP_SLE) {
1133 Pred = ICmpInst::ICMP_SLT;
1134 if (RHS.isMaxSignedValue())
1135 return std::nullopt; // Could also return full/empty here, if we wanted.
1136 ++RHS;
1137 }
1138 assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1139 if (auto CR = Fn(RHS))
1140 return Invert ? CR->inverse() : CR;
1141 return std::nullopt;
1142}
1143
1144std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1145 Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1146 Value *LHS = ICI->getOperand(0);
1147 Value *RHS = ICI->getOperand(1);
1148
1149 // Get the predicate that must hold along the considered edge.
1150 CmpInst::Predicate EdgePred =
1151 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1152
1153 if (isa<Constant>(RHS)) {
1154 if (ICI->isEquality() && LHS == Val) {
1155 if (EdgePred == ICmpInst::ICMP_EQ)
1156 return ValueLatticeElement::get(cast<Constant>(RHS));
1157 else if (!isa<UndefValue>(RHS))
1158 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1159 }
1160 }
1161
1162 Type *Ty = Val->getType();
1163 if (!Ty->isIntegerTy())
1165
1166 unsigned BitWidth = Ty->getScalarSizeInBits();
1167 APInt Offset(BitWidth, 0);
1168 if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1169 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset, ICI,
1170 UseBlockValue);
1171
1172 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1173 if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1174 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset, ICI,
1175 UseBlockValue);
1176
1177 const APInt *Mask, *C;
1178 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1179 match(RHS, m_APInt(C))) {
1180 // If (Val & Mask) == C then all the masked bits are known and we can
1181 // compute a value range based on that.
1182 if (EdgePred == ICmpInst::ICMP_EQ) {
1183 KnownBits Known;
1184 Known.Zero = ~*C & *Mask;
1185 Known.One = *C & *Mask;
1187 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1188 }
1189 // If (Val & Mask) != 0 then the value must be larger than the lowest set
1190 // bit of Mask.
1191 if (EdgePred == ICmpInst::ICMP_NE && !Mask->isZero() && C->isZero()) {
1193 APInt::getOneBitSet(BitWidth, Mask->countr_zero()),
1195 }
1196 }
1197
1198 // If (X urem Modulus) >= C, then X >= C.
1199 // If trunc X >= C, then X >= C.
1200 // TODO: An upper bound could be computed as well.
1201 if (match(LHS, m_CombineOr(m_URem(m_Specific(Val), m_Value()),
1202 m_Trunc(m_Specific(Val)))) &&
1203 match(RHS, m_APInt(C))) {
1204 // Use the icmp region so we don't have to deal with different predicates.
1206 if (!CR.isEmptySet())
1209 }
1210
1211 // Recognize:
1212 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1213 // Preconditions: (C << ShAmtC) >> ShAmtC == C
1214 const APInt *ShAmtC;
1215 if (CmpInst::isSigned(EdgePred) &&
1216 match(LHS, m_AShr(m_Specific(Val), m_APInt(ShAmtC))) &&
1217 match(RHS, m_APInt(C))) {
1218 auto CR = getRangeViaSLT(
1219 EdgePred, *C, [&](const APInt &RHS) -> std::optional<ConstantRange> {
1220 APInt New = RHS << *ShAmtC;
1221 if ((New.ashr(*ShAmtC)) != RHS)
1222 return std::nullopt;
1224 APInt::getSignedMinValue(New.getBitWidth()), New);
1225 });
1226 if (CR)
1228 }
1229
1231}
1232
1233// Handle conditions of the form
1234// extractvalue(op.with.overflow(%x, C), 1).
1236 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1237 // TODO: This only works with a constant RHS for now. We could also compute
1238 // the range of the RHS, but this doesn't fit into the current structure of
1239 // the edge value calculation.
1240 const APInt *C;
1241 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1243
1244 // Calculate the possible values of %x for which no overflow occurs.
1246 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1247
1248 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1249 // constrained to it's inverse (all values that might cause overflow).
1250 if (IsTrueDest)
1251 NWR = NWR.inverse();
1253}
1254
1255std::optional<ValueLatticeElement>
1256LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1257 bool IsTrueDest, bool UseBlockValue,
1258 unsigned Depth) {
1259 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1260 return getValueFromICmpCondition(Val, ICI, IsTrueDest, UseBlockValue);
1261
1262 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1263 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1264 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1265 return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1266
1269
1270 Value *N;
1271 if (match(Cond, m_Not(m_Value(N))))
1272 return getValueFromCondition(Val, N, !IsTrueDest, UseBlockValue, Depth);
1273
1274 Value *L, *R;
1275 bool IsAnd;
1276 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1277 IsAnd = true;
1278 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1279 IsAnd = false;
1280 else
1282
1283 std::optional<ValueLatticeElement> LV =
1284 getValueFromCondition(Val, L, IsTrueDest, UseBlockValue, Depth);
1285 if (!LV)
1286 return std::nullopt;
1287 std::optional<ValueLatticeElement> RV =
1288 getValueFromCondition(Val, R, IsTrueDest, UseBlockValue, Depth);
1289 if (!RV)
1290 return std::nullopt;
1291
1292 // if (L && R) -> intersect L and R
1293 // if (!(L || R)) -> intersect !L and !R
1294 // if (L || R) -> union L and R
1295 // if (!(L && R)) -> union !L and !R
1296 if (IsTrueDest ^ IsAnd) {
1297 LV->mergeIn(*RV);
1298 return *LV;
1299 }
1300
1301 return intersect(*LV, *RV);
1302}
1303
1304// Return true if Usr has Op as an operand, otherwise false.
1305static bool usesOperand(User *Usr, Value *Op) {
1306 return is_contained(Usr->operands(), Op);
1307}
1308
1309// Return true if the instruction type of Val is supported by
1310// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1311// Call this before calling constantFoldUser() to find out if it's even worth
1312// attempting to call it.
1313static bool isOperationFoldable(User *Usr) {
1314 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1315}
1316
1317// Check if Usr can be simplified to an integer constant when the value of one
1318// of its operands Op is an integer constant OpConstVal. If so, return it as an
1319// lattice value range with a single element or otherwise return an overdefined
1320// lattice value.
1322 const APInt &OpConstVal,
1323 const DataLayout &DL) {
1324 assert(isOperationFoldable(Usr) && "Precondition");
1325 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1326 // Check if Usr can be simplified to a constant.
1327 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1328 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1329 if (auto *C = dyn_cast_or_null<ConstantInt>(
1330 simplifyCastInst(CI->getOpcode(), OpConst,
1331 CI->getDestTy(), DL))) {
1332 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1333 }
1334 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1335 bool Op0Match = BO->getOperand(0) == Op;
1336 bool Op1Match = BO->getOperand(1) == Op;
1337 assert((Op0Match || Op1Match) &&
1338 "Operand 0 nor Operand 1 isn't a match");
1339 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1340 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1341 if (auto *C = dyn_cast_or_null<ConstantInt>(
1342 simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1343 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1344 }
1345 } else if (isa<FreezeInst>(Usr)) {
1346 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1347 return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1348 }
1350}
1351
1352/// Compute the value of Val on the edge BBFrom -> BBTo.
1353std::optional<ValueLatticeElement>
1354LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1355 BasicBlock *BBTo, bool UseBlockValue) {
1356 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1357 // know that v != 0.
1358 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1359 // If this is a conditional branch and only one successor goes to BBTo, then
1360 // we may be able to infer something from the condition.
1361 if (BI->isConditional() &&
1362 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1363 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1364 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1365 "BBTo isn't a successor of BBFrom");
1366 Value *Condition = BI->getCondition();
1367
1368 // If V is the condition of the branch itself, then we know exactly what
1369 // it is.
1370 if (Condition == Val)
1371 return ValueLatticeElement::get(ConstantInt::get(
1372 Type::getInt1Ty(Val->getContext()), isTrueDest));
1373
1374 // If the condition of the branch is an equality comparison, we may be
1375 // able to infer the value.
1376 std::optional<ValueLatticeElement> Result =
1377 getValueFromCondition(Val, Condition, isTrueDest, UseBlockValue);
1378 if (!Result)
1379 return std::nullopt;
1380
1381 if (!Result->isOverdefined())
1382 return Result;
1383
1384 if (User *Usr = dyn_cast<User>(Val)) {
1385 assert(Result->isOverdefined() && "Result isn't overdefined");
1386 // Check with isOperationFoldable() first to avoid linearly iterating
1387 // over the operands unnecessarily which can be expensive for
1388 // instructions with many operands.
1389 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1390 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1391 if (usesOperand(Usr, Condition)) {
1392 // If Val has Condition as an operand and Val can be folded into a
1393 // constant with either Condition == true or Condition == false,
1394 // propagate the constant.
1395 // eg.
1396 // ; %Val is true on the edge to %then.
1397 // %Val = and i1 %Condition, true.
1398 // br %Condition, label %then, label %else
1399 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1400 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1401 } else {
1402 // If one of Val's operand has an inferred value, we may be able to
1403 // infer the value of Val.
1404 // eg.
1405 // ; %Val is 94 on the edge to %then.
1406 // %Val = add i8 %Op, 1
1407 // %Condition = icmp eq i8 %Op, 93
1408 // br i1 %Condition, label %then, label %else
1409 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1410 Value *Op = Usr->getOperand(i);
1411 ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1412 Op, Condition, isTrueDest, /*UseBlockValue*/ false);
1413 if (std::optional<APInt> OpConst =
1414 OpLatticeVal.asConstantInteger()) {
1415 Result = constantFoldUser(Usr, Op, *OpConst, DL);
1416 break;
1417 }
1418 }
1419 }
1420 }
1421 }
1422 if (!Result->isOverdefined())
1423 return Result;
1424 }
1425 }
1426
1427 // If the edge was formed by a switch on the value, then we may know exactly
1428 // what it is.
1429 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1430 Value *Condition = SI->getCondition();
1431 if (!isa<IntegerType>(Val->getType()))
1433 bool ValUsesConditionAndMayBeFoldable = false;
1434 if (Condition != Val) {
1435 // Check if Val has Condition as an operand.
1436 if (User *Usr = dyn_cast<User>(Val))
1437 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1438 usesOperand(Usr, Condition);
1439 if (!ValUsesConditionAndMayBeFoldable)
1441 }
1442 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1443 "Condition != Val nor Val doesn't use Condition");
1444
1445 bool DefaultCase = SI->getDefaultDest() == BBTo;
1446 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1447 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1448
1449 for (auto Case : SI->cases()) {
1450 APInt CaseValue = Case.getCaseValue()->getValue();
1451 ConstantRange EdgeVal(CaseValue);
1452 if (ValUsesConditionAndMayBeFoldable) {
1453 User *Usr = cast<User>(Val);
1454 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1455 ValueLatticeElement EdgeLatticeVal =
1456 constantFoldUser(Usr, Condition, CaseValue, DL);
1457 if (EdgeLatticeVal.isOverdefined())
1459 EdgeVal = EdgeLatticeVal.getConstantRange();
1460 }
1461 if (DefaultCase) {
1462 // It is possible that the default destination is the destination of
1463 // some cases. We cannot perform difference for those cases.
1464 // We know Condition != CaseValue in BBTo. In some cases we can use
1465 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1466 // only do this when f is identity (i.e. Val == Condition), but we
1467 // should be able to do this for any injective f.
1468 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1469 EdgesVals = EdgesVals.difference(EdgeVal);
1470 } else if (Case.getCaseSuccessor() == BBTo)
1471 EdgesVals = EdgesVals.unionWith(EdgeVal);
1472 }
1473 return ValueLatticeElement::getRange(std::move(EdgesVals));
1474 }
1476}
1477
1478/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1479/// the basic block if the edge does not constrain Val.
1480std::optional<ValueLatticeElement>
1481LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1482 BasicBlock *BBTo, Instruction *CxtI) {
1483 // If already a constant, there is nothing to compute.
1484 if (Constant *VC = dyn_cast<Constant>(Val))
1485 return ValueLatticeElement::get(VC);
1486
1487 std::optional<ValueLatticeElement> LocalResult =
1488 getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1489 if (!LocalResult)
1490 return std::nullopt;
1491
1492 if (hasSingleValue(*LocalResult))
1493 // Can't get any more precise here
1494 return LocalResult;
1495
1496 std::optional<ValueLatticeElement> OptInBlock =
1497 getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1498 if (!OptInBlock)
1499 return std::nullopt;
1500 ValueLatticeElement &InBlock = *OptInBlock;
1501
1502 // We can use the context instruction (generically the ultimate instruction
1503 // the calling pass is trying to simplify) here, even though the result of
1504 // this function is generally cached when called from the solve* functions
1505 // (and that cached result might be used with queries using a different
1506 // context instruction), because when this function is called from the solve*
1507 // functions, the context instruction is not provided. When called from
1508 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1509 // but then the result is not cached.
1510 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1511
1512 return intersect(*LocalResult, InBlock);
1513}
1514
1516 Instruction *CxtI) {
1517 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1518 << BB->getName() << "'\n");
1519
1520 assert(BlockValueStack.empty() && BlockValueSet.empty());
1521 std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1522 if (!OptResult) {
1523 solve();
1524 OptResult = getBlockValue(V, BB, CxtI);
1525 assert(OptResult && "Value not available after solving");
1526 }
1527
1528 ValueLatticeElement Result = *OptResult;
1529 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1530 return Result;
1531}
1532
1534 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1535 << "'\n");
1536
1537 if (auto *C = dyn_cast<Constant>(V))
1539
1541 if (auto *I = dyn_cast<Instruction>(V))
1542 Result = getFromRangeMetadata(I);
1543 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1544
1545 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1546 return Result;
1547}
1548
1550getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1551 Instruction *CxtI) {
1552 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1553 << FromBB->getName() << "' to '" << ToBB->getName()
1554 << "'\n");
1555
1556 std::optional<ValueLatticeElement> Result =
1557 getEdgeValue(V, FromBB, ToBB, CxtI);
1558 while (!Result) {
1559 // As the worklist only explicitly tracks block values (but not edge values)
1560 // we may have to call solve() multiple times, as the edge value calculation
1561 // may request additional block values.
1562 solve();
1563 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1564 }
1565
1566 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1567 return *Result;
1568}
1569
1571 Value *V = U.get();
1572 auto *CxtI = cast<Instruction>(U.getUser());
1573 ValueLatticeElement VL = getValueInBlock(V, CxtI->getParent(), CxtI);
1574
1575 // Check whether the only (possibly transitive) use of the value is in a
1576 // position where V can be constrained by a select or branch condition.
1577 const Use *CurrU = &U;
1578 // TODO: Increase limit?
1579 const unsigned MaxUsesToInspect = 3;
1580 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1581 std::optional<ValueLatticeElement> CondVal;
1582 auto *CurrI = cast<Instruction>(CurrU->getUser());
1583 if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1584 // If the value is undef, a different value may be chosen in
1585 // the select condition and at use.
1586 if (!isGuaranteedNotToBeUndef(SI->getCondition(), AC))
1587 break;
1588 if (CurrU->getOperandNo() == 1)
1589 CondVal =
1590 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ true,
1591 /*UseBlockValue*/ false);
1592 else if (CurrU->getOperandNo() == 2)
1593 CondVal =
1594 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ false,
1595 /*UseBlockValue*/ false);
1596 } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1597 // TODO: Use non-local query?
1598 CondVal = *getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU),
1599 PHI->getParent(), /*UseBlockValue*/ false);
1600 }
1601 if (CondVal)
1602 VL = intersect(VL, *CondVal);
1603
1604 // Only follow one-use chain, to allow direct intersection of conditions.
1605 // If there are multiple uses, we would have to intersect with the union of
1606 // all conditions at different uses.
1607 // Stop walking if we hit a non-speculatable instruction. Even if the
1608 // result is only used under a specific condition, executing the
1609 // instruction itself may cause side effects or UB already.
1610 // This also disallows looking through phi nodes: If the phi node is part
1611 // of a cycle, we might end up reasoning about values from different cycle
1612 // iterations (PR60629).
1613 if (!CurrI->hasOneUse() || !isSafeToSpeculativelyExecute(CurrI))
1614 break;
1615 CurrU = &*CurrI->use_begin();
1616 }
1617 return VL;
1618}
1619
1621 BasicBlock *NewSucc) {
1622 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1623}
1624
1625//===----------------------------------------------------------------------===//
1626// LazyValueInfo Impl
1627//===----------------------------------------------------------------------===//
1628
1630 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1631
1632 if (auto *Impl = Info.getImpl())
1633 Impl->clear();
1634
1635 // Fully lazy.
1636 return false;
1637}
1638
1640 AU.setPreservesAll();
1643}
1644
1646
1647/// This lazily constructs the LazyValueInfoImpl.
1648LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1649 if (!PImpl) {
1650 assert(M && "getCache() called with a null Module");
1651 const DataLayout &DL = M->getDataLayout();
1652 Function *GuardDecl =
1653 M->getFunction(Intrinsic::getName(Intrinsic::experimental_guard));
1654 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1655 }
1656 return *static_cast<LazyValueInfoImpl *>(PImpl);
1657}
1658
1659LazyValueInfoImpl *LazyValueInfo::getImpl() {
1660 if (!PImpl)
1661 return nullptr;
1662 return static_cast<LazyValueInfoImpl *>(PImpl);
1663}
1664
1666
1668 // If the cache was allocated, free it.
1669 if (auto *Impl = getImpl()) {
1670 delete &*Impl;
1671 PImpl = nullptr;
1672 }
1673}
1674
1677 // We need to invalidate if we have either failed to preserve this analyses
1678 // result directly or if any of its dependencies have been invalidated.
1679 auto PAC = PA.getChecker<LazyValueAnalysis>();
1680 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1681 return true;
1682
1683 return false;
1684}
1685
1687
1690 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1691
1692 return LazyValueInfo(&AC, &F.getParent()->getDataLayout());
1693}
1694
1695/// Returns true if we can statically tell that this value will never be a
1696/// "useful" constant. In practice, this means we've got something like an
1697/// alloca or a malloc call for which a comparison against a constant can
1698/// only be guarding dead code. Note that we are potentially giving up some
1699/// precision in dead code (a constant result) in favour of avoiding a
1700/// expensive search for a easily answered common query.
1701static bool isKnownNonConstant(Value *V) {
1702 V = V->stripPointerCasts();
1703 // The return val of alloc cannot be a Constant.
1704 if (isa<AllocaInst>(V))
1705 return true;
1706 return false;
1707}
1708
1710 // Bail out early if V is known not to be a Constant.
1711 if (isKnownNonConstant(V))
1712 return nullptr;
1713
1714 BasicBlock *BB = CxtI->getParent();
1715 ValueLatticeElement Result =
1716 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1717
1718 if (Result.isConstant())
1719 return Result.getConstant();
1720 if (Result.isConstantRange()) {
1721 const ConstantRange &CR = Result.getConstantRange();
1722 if (const APInt *SingleVal = CR.getSingleElement())
1723 return ConstantInt::get(V->getContext(), *SingleVal);
1724 }
1725 return nullptr;
1726}
1727
1729 bool UndefAllowed) {
1730 assert(V->getType()->isIntegerTy());
1731 BasicBlock *BB = CxtI->getParent();
1732 ValueLatticeElement Result =
1733 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1734 return toConstantRange(Result, V->getType(), UndefAllowed);
1735}
1736
1738 bool UndefAllowed) {
1739 auto *Inst = cast<Instruction>(U.getUser());
1740 ValueLatticeElement Result =
1741 getOrCreateImpl(Inst->getModule()).getValueAtUse(U);
1742 return toConstantRange(Result, U->getType(), UndefAllowed);
1743}
1744
1745/// Determine whether the specified value is known to be a
1746/// constant on the specified edge. Return null if not.
1748 BasicBlock *ToBB,
1749 Instruction *CxtI) {
1750 Module *M = FromBB->getModule();
1751 ValueLatticeElement Result =
1752 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1753
1754 if (Result.isConstant())
1755 return Result.getConstant();
1756 if (Result.isConstantRange()) {
1757 const ConstantRange &CR = Result.getConstantRange();
1758 if (const APInt *SingleVal = CR.getSingleElement())
1759 return ConstantInt::get(V->getContext(), *SingleVal);
1760 }
1761 return nullptr;
1762}
1763
1765 BasicBlock *FromBB,
1766 BasicBlock *ToBB,
1767 Instruction *CxtI) {
1768 Module *M = FromBB->getModule();
1769 ValueLatticeElement Result =
1770 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1771 // TODO: Should undef be allowed here?
1772 return toConstantRange(Result, V->getType(), /*UndefAllowed*/ true);
1773}
1774
1777 const DataLayout &DL) {
1778 // If we know the value is a constant, evaluate the conditional.
1779 Constant *Res = nullptr;
1780 if (Val.isConstant()) {
1781 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL);
1782 if (ConstantInt *ResCI = dyn_cast_or_null<ConstantInt>(Res))
1783 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1785 }
1786
1787 if (Val.isConstantRange()) {
1788 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1789 if (!CI) return LazyValueInfo::Unknown;
1790
1791 const ConstantRange &CR = Val.getConstantRange();
1792 if (Pred == ICmpInst::ICMP_EQ) {
1793 if (!CR.contains(CI->getValue()))
1794 return LazyValueInfo::False;
1795
1796 if (CR.isSingleElement())
1797 return LazyValueInfo::True;
1798 } else if (Pred == ICmpInst::ICMP_NE) {
1799 if (!CR.contains(CI->getValue()))
1800 return LazyValueInfo::True;
1801
1802 if (CR.isSingleElement())
1803 return LazyValueInfo::False;
1804 } else {
1805 // Handle more complex predicates.
1807 (ICmpInst::Predicate)Pred, CI->getValue());
1808 if (TrueValues.contains(CR))
1809 return LazyValueInfo::True;
1810 if (TrueValues.inverse().contains(CR))
1811 return LazyValueInfo::False;
1812 }
1814 }
1815
1816 if (Val.isNotConstant()) {
1817 // If this is an equality comparison, we can try to fold it knowing that
1818 // "V != C1".
1819 if (Pred == ICmpInst::ICMP_EQ) {
1820 // !C1 == C -> false iff C1 == C.
1822 Val.getNotConstant(), C, DL);
1823 if (Res && Res->isNullValue())
1824 return LazyValueInfo::False;
1825 } else if (Pred == ICmpInst::ICMP_NE) {
1826 // !C1 != C -> true iff C1 == C.
1828 Val.getNotConstant(), C, DL);
1829 if (Res && Res->isNullValue())
1830 return LazyValueInfo::True;
1831 }
1833 }
1834
1836}
1837
1838/// Determine whether the specified value comparison with a constant is known to
1839/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1842 BasicBlock *FromBB, BasicBlock *ToBB,
1843 Instruction *CxtI) {
1844 Module *M = FromBB->getModule();
1845 ValueLatticeElement Result =
1846 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1847
1848 return getPredicateResult(Pred, C, Result, M->getDataLayout());
1849}
1850
1853 Instruction *CxtI, bool UseBlockValue) {
1854 // Is or is not NonNull are common predicates being queried. If
1855 // isKnownNonZero can tell us the result of the predicate, we can
1856 // return it quickly. But this is only a fastpath, and falling
1857 // through would still be correct.
1858 Module *M = CxtI->getModule();
1859 const DataLayout &DL = M->getDataLayout();
1860 if (V->getType()->isPointerTy() && C->isNullValue() &&
1861 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1862 if (Pred == ICmpInst::ICMP_EQ)
1863 return LazyValueInfo::False;
1864 else if (Pred == ICmpInst::ICMP_NE)
1865 return LazyValueInfo::True;
1866 }
1867
1868 auto &Impl = getOrCreateImpl(M);
1869 ValueLatticeElement Result =
1870 UseBlockValue ? Impl.getValueInBlock(V, CxtI->getParent(), CxtI)
1871 : Impl.getValueAt(V, CxtI);
1872 Tristate Ret = getPredicateResult(Pred, C, Result, DL);
1873 if (Ret != Unknown)
1874 return Ret;
1875
1876 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1877 // LVI as a whole tries to compute a lattice value which is conservatively
1878 // correct at a given location. In this case, we have a predicate which we
1879 // weren't able to prove about the merged result, and we're pushing that
1880 // predicate back along each incoming edge to see if we can prove it
1881 // separately for each input. As a motivating example, consider:
1882 // bb1:
1883 // %v1 = ... ; constantrange<1, 5>
1884 // br label %merge
1885 // bb2:
1886 // %v2 = ... ; constantrange<10, 20>
1887 // br label %merge
1888 // merge:
1889 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1890 // %pred = icmp eq i32 %phi, 8
1891 // We can't tell from the lattice value for '%phi' that '%pred' is false
1892 // along each path, but by checking the predicate over each input separately,
1893 // we can.
1894 // We limit the search to one step backwards from the current BB and value.
1895 // We could consider extending this to search further backwards through the
1896 // CFG and/or value graph, but there are non-obvious compile time vs quality
1897 // tradeoffs.
1898 BasicBlock *BB = CxtI->getParent();
1899
1900 // Function entry or an unreachable block. Bail to avoid confusing
1901 // analysis below.
1902 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1903 if (PI == PE)
1904 return Unknown;
1905
1906 // If V is a PHI node in the same block as the context, we need to ask
1907 // questions about the predicate as applied to the incoming value along
1908 // each edge. This is useful for eliminating cases where the predicate is
1909 // known along all incoming edges.
1910 if (auto *PHI = dyn_cast<PHINode>(V))
1911 if (PHI->getParent() == BB) {
1912 Tristate Baseline = Unknown;
1913 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1914 Value *Incoming = PHI->getIncomingValue(i);
1915 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1916 // Note that PredBB may be BB itself.
1917 Tristate Result =
1918 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
1919
1920 // Keep going as long as we've seen a consistent known result for
1921 // all inputs.
1922 Baseline = (i == 0) ? Result /* First iteration */
1923 : (Baseline == Result ? Baseline
1924 : Unknown); /* All others */
1925 if (Baseline == Unknown)
1926 break;
1927 }
1928 if (Baseline != Unknown)
1929 return Baseline;
1930 }
1931
1932 // For a comparison where the V is outside this block, it's possible
1933 // that we've branched on it before. Look to see if the value is known
1934 // on all incoming edges.
1935 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
1936 // For predecessor edge, determine if the comparison is true or false
1937 // on that edge. If they're all true or all false, we can conclude
1938 // the value of the comparison in this block.
1939 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1940 if (Baseline != Unknown) {
1941 // Check that all remaining incoming values match the first one.
1942 while (++PI != PE) {
1943 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1944 if (Ret != Baseline)
1945 break;
1946 }
1947 // If we terminated early, then one of the values didn't match.
1948 if (PI == PE) {
1949 return Baseline;
1950 }
1951 }
1952 }
1953
1954 return Unknown;
1955}
1956
1958 Value *RHS,
1959 Instruction *CxtI,
1960 bool UseBlockValue) {
1962
1963 if (auto *C = dyn_cast<Constant>(RHS))
1964 return getPredicateAt(P, LHS, C, CxtI, UseBlockValue);
1965 if (auto *C = dyn_cast<Constant>(LHS))
1966 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
1967 UseBlockValue);
1968
1969 // Got two non-Constant values. Try to determine the comparison results based
1970 // on the block values of the two operands, e.g. because they have
1971 // non-overlapping ranges.
1972 if (UseBlockValue) {
1973 Module *M = CxtI->getModule();
1975 getOrCreateImpl(M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
1976 if (L.isOverdefined())
1978
1980 getOrCreateImpl(M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
1982 if (Constant *Res = L.getCompare((CmpInst::Predicate)P, Ty, R,
1983 M->getDataLayout())) {
1984 if (Res->isNullValue())
1985 return LazyValueInfo::False;
1986 if (Res->isOneValue())
1987 return LazyValueInfo::True;
1988 }
1989 }
1991}
1992
1994 BasicBlock *NewSucc) {
1995 if (auto *Impl = getImpl())
1996 Impl->threadEdge(PredBB, OldSucc, NewSucc);
1997}
1998
2000 if (auto *Impl = getImpl())
2001 Impl->forgetValue(V);
2002}
2003
2005 if (auto *Impl = getImpl())
2006 Impl->eraseBlock(BB);
2007}
2008
2010 if (auto *Impl = getImpl())
2011 Impl->clear();
2012}
2013
2015 if (auto *Impl = getImpl())
2016 Impl->printLVI(F, DTree, OS);
2017}
2018
2019// Print the LVI for the function arguments at the start of each basic block.
2020void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2021 const BasicBlock *BB, formatted_raw_ostream &OS) {
2022 // Find if there are latticevalues defined for arguments of the function.
2023 auto *F = BB->getParent();
2024 for (const auto &Arg : F->args()) {
2025 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2026 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
2027 if (Result.isUnknown())
2028 continue;
2029 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2030 }
2031}
2032
2033// This function prints the LVI analysis for the instruction I at the beginning
2034// of various basic blocks. It relies on calculated values that are stored in
2035// the LazyValueInfoCache, and in the absence of cached values, recalculate the
2036// LazyValueInfo for `I`, and print that info.
2037void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2039
2040 auto *ParentBB = I->getParent();
2041 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2042 // We can generate (solve) LVI values only for blocks that are dominated by
2043 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2044 // that contain redundant/uninteresting information, we print LVI for
2045 // blocks that may use this LVI information (such as immediate successor
2046 // blocks, and blocks that contain uses of `I`).
2047 auto printResult = [&](const BasicBlock *BB) {
2048 if (!BlocksContainingLVI.insert(BB).second)
2049 return;
2050 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2051 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2052 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2053 BB->printAsOperand(OS, false);
2054 OS << "' is: " << Result << "\n";
2055 };
2056
2057 printResult(ParentBB);
2058 // Print the LVI analysis results for the immediate successor blocks, that
2059 // are dominated by `ParentBB`.
2060 for (const auto *BBSucc : successors(ParentBB))
2061 if (DT.dominates(ParentBB, BBSucc))
2062 printResult(BBSucc);
2063
2064 // Print LVI in blocks where `I` is used.
2065 for (const auto *U : I->users())
2066 if (auto *UseI = dyn_cast<Instruction>(U))
2067 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2068 printResult(UseI->getParent());
2069
2070}
2071
2074 OS << "LVI for function '" << F.getName() << "':\n";
2075 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
2076 auto &DTree = AM.getResult<DominatorTreeAnalysis>(F);
2077 LVI.printLVI(F, DTree, OS);
2078 return PreservedAnalyses::all();
2079}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
basic Basic Alias true
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:148
Given that RA is a live value
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseSet and SmallDenseSet classes.
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static bool isOperationFoldable(User *Usr)
static ConstantRange toConstantRange(const ValueLatticeElement &Val, Type *Ty, bool UndefAllowed=false)
static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val, const DataLayout &DL)
static void AddNonNullPointersByInstruction(Instruction *I, NonNullPointerSet &PtrSet)
static std::optional< ConstantRange > getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS, function_ref< std::optional< ConstantRange >(const APInt &)> Fn)
static bool hasSingleValue(const ValueLatticeElement &Val)
Returns true if this lattice value represents at most one possible value.
static const unsigned MaxProcessedPerValue
static bool usesOperand(User *Usr, Value *Op)
static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, const APInt &OpConstVal, const DataLayout &DL)
static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet)
static ValueLatticeElement getFromRangeMetadata(Instruction *BBI)
static ValueLatticeElement intersect(const ValueLatticeElement &A, const ValueLatticeElement &B)
Combine two sets of facts about the same value into a single set of facts.
static ValueLatticeElement getValueFromOverflowCondition(Value *Val, WithOverflowInst *WO, bool IsTrueDest)
lazy value info
static bool isKnownNonConstant(Value *V)
Returns true if we can statically tell that this value will never be a "useful" constant.
static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val, ICmpInst::Predicate Pred)
Natural Loop Information
Definition: LoopInfo.cpp:1177
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define P(N)
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static bool InBlock(const Value *V, const BasicBlock *BB)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition: APInt.h:197
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:217
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:47
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:387
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:348
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:500
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void clear()
Clear the cache of @llvm.assume intrinsics for a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:429
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:551
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:205
reverse_iterator rend()
Definition: BasicBlock.h:447
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:220
const Instruction & back() const
Definition: BasicBlock.h:454
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:276
Value * getRHS() const
unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Value * getLHS() const
BinaryOps getOpcode() const
Definition: InstrTypes.h:491
Conditional or Unconditional Branch instruction.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1639
Value handle with callbacks on RAUW and destruction.
Definition: ValueHandle.h:383
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:579
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:908
Type * getDestTy() const
Return the destination type, as a convenience.
Definition: InstrTypes.h:915
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1323
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:965
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:994
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:995
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:989
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:988
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:992
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:990
@ ICMP_EQ
equal
Definition: InstrTypes.h:986
@ ICMP_NE
not equal
Definition: InstrTypes.h:987
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:993
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:991
bool isSigned() const
Definition: InstrTypes.h:1226
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:1128
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:1090
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:1066
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:144
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1775
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
ConstantRange umin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned minimum of a value in ...
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
static ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange abs(bool IntMinIsPoison=false) const
Calculate absolute value range.
static bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
ConstantRange overflowingBinaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind) const
Return a new range representing the possible values resulting from an application of the specified ov...
bool isSingleElement() const
Return true if this set contains exactly one member.
ConstantRange umax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned maximum of a value in ...
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
Definition: ConstantRange.h:84
ConstantRange smin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed minimum of a value in thi...
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange smax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed maximum of a value in thi...
ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:400
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:90
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:180
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:80
const BasicBlock * getParent() const
Definition: Instruction.h:151
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:358
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:251
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
Analysis to compute lazy value information.
Result run(Function &F, FunctionAnalysisManager &FAM)
ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* that is true on t...
ValueLatticeElement getValueAt(Value *V, Instruction *CxtI)
This is the query interface to determine the lattice value for the specified Value* at the specified ...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
This is the update interface to inform the cache that an edge from PredBB to OldSucc has been threade...
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Printing the LazyValueInfo Analysis.
void forgetValue(Value *V)
This is part of the update interface to remove information related to this value from the cache.
void eraseBlock(BasicBlock *BB)
This is part of the update interface to inform the cache that a block has been deleted.
void clear()
Complete flush all previously computed values.
LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL, Function *GuardDecl)
ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* at the context in...
ValueLatticeElement getValueAtUse(const Use &U)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Wrapper around LazyValueInfo.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
This pass computes, caches, and vends lazy value constraint information.
Definition: LazyValueInfo.h:31
void eraseBlock(BasicBlock *BB)
Inform the analysis cache that we have erased a block.
ConstantRange getConstantRangeAtUse(const Use &U, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the value at a specific use-site.
ConstantRange getConstantRange(Value *V, Instruction *CxtI, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the specified value at the specified in...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
Inform the analysis cache that we have threaded an edge from PredBB to OldSucc to be from PredBB to N...
Tristate
This is used to return true/false/dunno results.
Definition: LazyValueInfo.h:61
Constant * getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value is known to be a constant on the specified edge.
ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Return the ConstantRage constraint that is known to hold for the specified value on the specified edg...
Tristate getPredicateOnEdge(unsigned Pred, Value *V, Constant *C, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value comparison with a constant is known to be true or false on the ...
Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C, Instruction *CxtI, bool UseBlockValue)
Determine whether the specified value comparison with a constant is known to be true or false at the ...
Constant * getConstant(Value *V, Instruction *CxtI)
Determine whether the specified value is known to be a constant at the specified instruction.
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Print the \LazyValueInfo Analysis.
void forgetValue(Value *V)
Remove information related to this value from the cache.
void clear()
Complete flush all previously computed values.
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
An instruction for reading from memory.
Definition: Instructions.h:184
Metadata node.
Definition: Metadata.h:1067
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:287
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition: Analysis.h:115
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
This class represents the LLVM 'select' instruction.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
Multiway switch.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getIntegerBitWidth() const
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
This class represents lattice values for constants.
Definition: ValueLattice.h:29
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
Definition: ValueLattice.h:214
static ValueLatticeElement getNot(Constant *C)
Definition: ValueLattice.h:208
std::optional< APInt > asConstantInteger() const
Definition: ValueLattice.h:275
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
Definition: ValueLattice.h:269
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
Definition: ValueLattice.h:249
static ValueLatticeElement get(Constant *C)
Definition: ValueLattice.h:203
Constant * getNotConstant() const
Definition: ValueLattice.h:260
Constant * getConstant() const
Definition: ValueLattice.h:255
static ValueLatticeElement getOverdefined()
Definition: ValueLattice.h:231
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:785
use_iterator use_begin()
Definition: Value.h:360
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:5043
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
iterator find_as(const LookupKeyT &Val)
Alternative version of find() which allows a different, and possibly less expensive,...
Definition: DenseSet.h:195
bool erase(const ValueT &V)
Definition: DenseSet.h:101
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
An efficient, type-erasing, non-owning reference to a callable.
self_iterator getIterator()
Definition: ilist_node.h:109
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:121
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:1017
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:821
CastOperator_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:294
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:234
constexpr double e
Definition: MathExtras.h:31
@ FalseVal
Definition: TGLexer.h:59
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2082
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
FunctionPass * createLazyValueInfoPass()
createLazyValueInfoPass - This creates an instance of the LazyValueInfo pass.
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
constexpr unsigned MaxAnalysisRecursionDepth
Definition: ValueTracking.h:47
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2014
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1888
void initializeLazyValueInfoWrapperPassPass(PassRegistry &)
#define N
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?