LLVM 17.0.0git
LazyValueInfo.cpp
Go to the documentation of this file.
1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for lazy computation of value constraint
10// information.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/STLExtras.h"
24#include "llvm/IR/CFG.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/Dominators.h"
31#include "llvm/IR/Intrinsics.h"
32#include "llvm/IR/LLVMContext.h"
34#include "llvm/IR/ValueHandle.h"
36#include "llvm/Support/Debug.h"
40#include <optional>
41using namespace llvm;
42using namespace PatternMatch;
43
44#define DEBUG_TYPE "lazy-value-info"
45
46// This is the number of worklist items we will process to try to discover an
47// answer for a given value.
48static const unsigned MaxProcessedPerValue = 500;
49
53}
55 "Lazy Value Information Analysis", false, true)
60
61namespace llvm {
63}
64
65AnalysisKey LazyValueAnalysis::Key;
66
67/// Returns true if this lattice value represents at most one possible value.
68/// This is as precise as any lattice value can get while still representing
69/// reachable code.
70static bool hasSingleValue(const ValueLatticeElement &Val) {
71 if (Val.isConstantRange() &&
73 // Integer constants are single element ranges
74 return true;
75 if (Val.isConstant())
76 // Non integer constants
77 return true;
78 return false;
79}
80
81/// Combine two sets of facts about the same value into a single set of
82/// facts. Note that this method is not suitable for merging facts along
83/// different paths in a CFG; that's what the mergeIn function is for. This
84/// is for merging facts gathered about the same value at the same location
85/// through two independent means.
86/// Notes:
87/// * This method does not promise to return the most precise possible lattice
88/// value implied by A and B. It is allowed to return any lattice element
89/// which is at least as strong as *either* A or B (unless our facts
90/// conflict, see below).
91/// * Due to unreachable code, the intersection of two lattice values could be
92/// contradictory. If this happens, we return some valid lattice value so as
93/// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
94/// we do not make this guarantee. TODO: This would be a useful enhancement.
96 const ValueLatticeElement &B) {
97 // Undefined is the strongest state. It means the value is known to be along
98 // an unreachable path.
99 if (A.isUnknown())
100 return A;
101 if (B.isUnknown())
102 return B;
103
104 // If we gave up for one, but got a useable fact from the other, use it.
105 if (A.isOverdefined())
106 return B;
107 if (B.isOverdefined())
108 return A;
109
110 // Can't get any more precise than constants.
111 if (hasSingleValue(A))
112 return A;
113 if (hasSingleValue(B))
114 return B;
115
116 // Could be either constant range or not constant here.
117 if (!A.isConstantRange() || !B.isConstantRange()) {
118 // TODO: Arbitrary choice, could be improved
119 return A;
120 }
121
122 // Intersect two constant ranges
123 ConstantRange Range =
124 A.getConstantRange().intersectWith(B.getConstantRange());
125 // Note: An empty range is implicitly converted to unknown or undef depending
126 // on MayIncludeUndef internally.
128 std::move(Range), /*MayIncludeUndef=*/A.isConstantRangeIncludingUndef() ||
129 B.isConstantRangeIncludingUndef());
130}
131
132//===----------------------------------------------------------------------===//
133// LazyValueInfoCache Decl
134//===----------------------------------------------------------------------===//
135
136namespace {
137 /// A callback value handle updates the cache when values are erased.
138 class LazyValueInfoCache;
139 struct LVIValueHandle final : public CallbackVH {
140 LazyValueInfoCache *Parent;
141
142 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
143 : CallbackVH(V), Parent(P) { }
144
145 void deleted() override;
146 void allUsesReplacedWith(Value *V) override {
147 deleted();
148 }
149 };
150} // end anonymous namespace
151
152namespace {
153 using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
154
155 /// This is the cache kept by LazyValueInfo which
156 /// maintains information about queries across the clients' queries.
157 class LazyValueInfoCache {
158 /// This is all of the cached information for one basic block. It contains
159 /// the per-value lattice elements, as well as a separate set for
160 /// overdefined values to reduce memory usage. Additionally pointers
161 /// dereferenced in the block are cached for nullability queries.
162 struct BlockCacheEntry {
164 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
165 // std::nullopt indicates that the nonnull pointers for this basic block
166 // block have not been computed yet.
167 std::optional<NonNullPointerSet> NonNullPointers;
168 };
169
170 /// Cached information per basic block.
171 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
172 BlockCache;
173 /// Set of value handles used to erase values from the cache on deletion.
175
176 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
177 auto It = BlockCache.find_as(BB);
178 if (It == BlockCache.end())
179 return nullptr;
180 return It->second.get();
181 }
182
183 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
184 auto It = BlockCache.find_as(BB);
185 if (It == BlockCache.end())
186 It = BlockCache.insert({ BB, std::make_unique<BlockCacheEntry>() })
187 .first;
188
189 return It->second.get();
190 }
191
192 void addValueHandle(Value *Val) {
193 auto HandleIt = ValueHandles.find_as(Val);
194 if (HandleIt == ValueHandles.end())
195 ValueHandles.insert({ Val, this });
196 }
197
198 public:
199 void insertResult(Value *Val, BasicBlock *BB,
200 const ValueLatticeElement &Result) {
201 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
202
203 // Insert over-defined values into their own cache to reduce memory
204 // overhead.
205 if (Result.isOverdefined())
206 Entry->OverDefined.insert(Val);
207 else
208 Entry->LatticeElements.insert({ Val, Result });
209
210 addValueHandle(Val);
211 }
212
213 std::optional<ValueLatticeElement>
214 getCachedValueInfo(Value *V, BasicBlock *BB) const {
215 const BlockCacheEntry *Entry = getBlockEntry(BB);
216 if (!Entry)
217 return std::nullopt;
218
219 if (Entry->OverDefined.count(V))
221
222 auto LatticeIt = Entry->LatticeElements.find_as(V);
223 if (LatticeIt == Entry->LatticeElements.end())
224 return std::nullopt;
225
226 return LatticeIt->second;
227 }
228
229 bool isNonNullAtEndOfBlock(
230 Value *V, BasicBlock *BB,
231 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
232 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
233 if (!Entry->NonNullPointers) {
234 Entry->NonNullPointers = InitFn(BB);
235 for (Value *V : *Entry->NonNullPointers)
236 addValueHandle(V);
237 }
238
239 return Entry->NonNullPointers->count(V);
240 }
241
242 /// clear - Empty the cache.
243 void clear() {
244 BlockCache.clear();
245 ValueHandles.clear();
246 }
247
248 /// Inform the cache that a given value has been deleted.
249 void eraseValue(Value *V);
250
251 /// This is part of the update interface to inform the cache
252 /// that a block has been deleted.
253 void eraseBlock(BasicBlock *BB);
254
255 /// Updates the cache to remove any influence an overdefined value in
256 /// OldSucc might have (unless also overdefined in NewSucc). This just
257 /// flushes elements from the cache and does not add any.
258 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
259 };
260}
261
262void LazyValueInfoCache::eraseValue(Value *V) {
263 for (auto &Pair : BlockCache) {
264 Pair.second->LatticeElements.erase(V);
265 Pair.second->OverDefined.erase(V);
266 if (Pair.second->NonNullPointers)
267 Pair.second->NonNullPointers->erase(V);
268 }
269
270 auto HandleIt = ValueHandles.find_as(V);
271 if (HandleIt != ValueHandles.end())
272 ValueHandles.erase(HandleIt);
273}
274
275void LVIValueHandle::deleted() {
276 // This erasure deallocates *this, so it MUST happen after we're done
277 // using any and all members of *this.
278 Parent->eraseValue(*this);
279}
280
281void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
282 BlockCache.erase(BB);
283}
284
285void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
286 BasicBlock *NewSucc) {
287 // When an edge in the graph has been threaded, values that we could not
288 // determine a value for before (i.e. were marked overdefined) may be
289 // possible to solve now. We do NOT try to proactively update these values.
290 // Instead, we clear their entries from the cache, and allow lazy updating to
291 // recompute them when needed.
292
293 // The updating process is fairly simple: we need to drop cached info
294 // for all values that were marked overdefined in OldSucc, and for those same
295 // values in any successor of OldSucc (except NewSucc) in which they were
296 // also marked overdefined.
297 std::vector<BasicBlock*> worklist;
298 worklist.push_back(OldSucc);
299
300 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
301 if (!Entry || Entry->OverDefined.empty())
302 return; // Nothing to process here.
303 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
304 Entry->OverDefined.end());
305
306 // Use a worklist to perform a depth-first search of OldSucc's successors.
307 // NOTE: We do not need a visited list since any blocks we have already
308 // visited will have had their overdefined markers cleared already, and we
309 // thus won't loop to their successors.
310 while (!worklist.empty()) {
311 BasicBlock *ToUpdate = worklist.back();
312 worklist.pop_back();
313
314 // Skip blocks only accessible through NewSucc.
315 if (ToUpdate == NewSucc) continue;
316
317 // If a value was marked overdefined in OldSucc, and is here too...
318 auto OI = BlockCache.find_as(ToUpdate);
319 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
320 continue;
321 auto &ValueSet = OI->second->OverDefined;
322
323 bool changed = false;
324 for (Value *V : ValsToClear) {
325 if (!ValueSet.erase(V))
326 continue;
327
328 // If we removed anything, then we potentially need to update
329 // blocks successors too.
330 changed = true;
331 }
332
333 if (!changed) continue;
334
335 llvm::append_range(worklist, successors(ToUpdate));
336 }
337}
338
339
340namespace {
341/// An assembly annotator class to print LazyValueCache information in
342/// comments.
343class LazyValueInfoImpl;
344class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
345 LazyValueInfoImpl *LVIImpl;
346 // While analyzing which blocks we can solve values for, we need the dominator
347 // information.
348 DominatorTree &DT;
349
350public:
351 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
352 : LVIImpl(L), DT(DTree) {}
353
354 void emitBasicBlockStartAnnot(const BasicBlock *BB,
355 formatted_raw_ostream &OS) override;
356
357 void emitInstructionAnnot(const Instruction *I,
358 formatted_raw_ostream &OS) override;
359};
360}
361namespace {
362// The actual implementation of the lazy analysis and update. Note that the
363// inheritance from LazyValueInfoCache is intended to be temporary while
364// splitting the code and then transitioning to a has-a relationship.
365class LazyValueInfoImpl {
366
367 /// Cached results from previous queries
368 LazyValueInfoCache TheCache;
369
370 /// This stack holds the state of the value solver during a query.
371 /// It basically emulates the callstack of the naive
372 /// recursive value lookup process.
374
375 /// Keeps track of which block-value pairs are in BlockValueStack.
377
378 /// Push BV onto BlockValueStack unless it's already in there.
379 /// Returns true on success.
380 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
381 if (!BlockValueSet.insert(BV).second)
382 return false; // It's already in the stack.
383
384 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
385 << BV.first->getName() << "\n");
386 BlockValueStack.push_back(BV);
387 return true;
388 }
389
390 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
391 const DataLayout &DL; ///< A mandatory DataLayout
392
393 /// Declaration of the llvm.experimental.guard() intrinsic,
394 /// if it exists in the module.
395 Function *GuardDecl;
396
397 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
398 Instruction *CxtI);
399 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
400 BasicBlock *T,
401 Instruction *CxtI = nullptr);
402
403 // These methods process one work item and may add more. A false value
404 // returned means that the work item was not completely processed and must
405 // be revisited after going through the new items.
406 bool solveBlockValue(Value *Val, BasicBlock *BB);
407 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
408 BasicBlock *BB);
409 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
410 BasicBlock *BB);
411 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
412 BasicBlock *BB);
413 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
414 BasicBlock *BB);
415 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
416 BasicBlock *BB);
417 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
419 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
420 OpFn);
421 std::optional<ValueLatticeElement>
422 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
423 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
424 BasicBlock *BB);
425 std::optional<ValueLatticeElement>
426 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
427 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
428 BasicBlock *BB);
429 std::optional<ValueLatticeElement>
430 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
431 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
432 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
434 Instruction *BBI);
435
436 void solve();
437
438public:
439 /// This is the query interface to determine the lattice value for the
440 /// specified Value* at the context instruction (if specified) or at the
441 /// start of the block.
442 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
443 Instruction *CxtI = nullptr);
444
445 /// This is the query interface to determine the lattice value for the
446 /// specified Value* at the specified instruction using only information
447 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
448 /// recursive query is performed.
449 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
450
451 /// This is the query interface to determine the lattice
452 /// value for the specified Value* that is true on the specified edge.
454 BasicBlock *ToBB,
455 Instruction *CxtI = nullptr);
456
457 /// Complete flush all previously computed values
458 void clear() {
459 TheCache.clear();
460 }
461
462 /// Printing the LazyValueInfo Analysis.
463 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
464 LazyValueInfoAnnotatedWriter Writer(this, DTree);
465 F.print(OS, &Writer);
466 }
467
468 /// This is part of the update interface to inform the cache
469 /// that a block has been deleted.
470 void eraseBlock(BasicBlock *BB) {
471 TheCache.eraseBlock(BB);
472 }
473
474 /// This is the update interface to inform the cache that an edge from
475 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
476 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
477
478 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
479 Function *GuardDecl)
480 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
481};
482} // end anonymous namespace
483
484
485void LazyValueInfoImpl::solve() {
487 BlockValueStack.begin(), BlockValueStack.end());
488
489 unsigned processedCount = 0;
490 while (!BlockValueStack.empty()) {
491 processedCount++;
492 // Abort if we have to process too many values to get a result for this one.
493 // Because of the design of the overdefined cache currently being per-block
494 // to avoid naming-related issues (IE it wants to try to give different
495 // results for the same name in different blocks), overdefined results don't
496 // get cached globally, which in turn means we will often try to rediscover
497 // the same overdefined result again and again. Once something like
498 // PredicateInfo is used in LVI or CVP, we should be able to make the
499 // overdefined cache global, and remove this throttle.
500 if (processedCount > MaxProcessedPerValue) {
502 dbgs() << "Giving up on stack because we are getting too deep\n");
503 // Fill in the original values
504 while (!StartingStack.empty()) {
505 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
506 TheCache.insertResult(e.second, e.first,
508 StartingStack.pop_back();
509 }
510 BlockValueSet.clear();
511 BlockValueStack.clear();
512 return;
513 }
514 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
515 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
516
517 if (solveBlockValue(e.second, e.first)) {
518 // The work item was completely processed.
519 assert(BlockValueStack.back() == e && "Nothing should have been pushed!");
520#ifndef NDEBUG
521 std::optional<ValueLatticeElement> BBLV =
522 TheCache.getCachedValueInfo(e.second, e.first);
523 assert(BBLV && "Result should be in cache!");
525 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
526 << *BBLV << "\n");
527#endif
528
529 BlockValueStack.pop_back();
530 BlockValueSet.erase(e);
531 } else {
532 // More work needs to be done before revisiting.
533 assert(BlockValueStack.back() != e && "Stack should have been pushed!");
534 }
535 }
536}
537
538std::optional<ValueLatticeElement>
539LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
540 Instruction *CxtI) {
541 // If already a constant, there is nothing to compute.
542 if (Constant *VC = dyn_cast<Constant>(Val))
543 return ValueLatticeElement::get(VC);
544
545 if (std::optional<ValueLatticeElement> OptLatticeVal =
546 TheCache.getCachedValueInfo(Val, BB)) {
547 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
548 return OptLatticeVal;
549 }
550
551 // We have hit a cycle, assume overdefined.
552 if (!pushBlockValue({ BB, Val }))
554
555 // Yet to be resolved.
556 return std::nullopt;
557}
558
560 switch (BBI->getOpcode()) {
561 default: break;
562 case Instruction::Load:
563 case Instruction::Call:
564 case Instruction::Invoke:
565 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
566 if (isa<IntegerType>(BBI->getType())) {
569 }
570 break;
571 };
572 // Nothing known - will be intersected with other facts
574}
575
576bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
577 assert(!isa<Constant>(Val) && "Value should not be constant");
578 assert(!TheCache.getCachedValueInfo(Val, BB) &&
579 "Value should not be in cache");
580
581 // Hold off inserting this value into the Cache in case we have to return
582 // false and come back later.
583 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
584 if (!Res)
585 // Work pushed, will revisit
586 return false;
587
588 TheCache.insertResult(Val, BB, *Res);
589 return true;
590}
591
592std::optional<ValueLatticeElement>
593LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
594 Instruction *BBI = dyn_cast<Instruction>(Val);
595 if (!BBI || BBI->getParent() != BB)
596 return solveBlockValueNonLocal(Val, BB);
597
598 if (PHINode *PN = dyn_cast<PHINode>(BBI))
599 return solveBlockValuePHINode(PN, BB);
600
601 if (auto *SI = dyn_cast<SelectInst>(BBI))
602 return solveBlockValueSelect(SI, BB);
603
604 // If this value is a nonnull pointer, record it's range and bailout. Note
605 // that for all other pointer typed values, we terminate the search at the
606 // definition. We could easily extend this to look through geps, bitcasts,
607 // and the like to prove non-nullness, but it's not clear that's worth it
608 // compile time wise. The context-insensitive value walk done inside
609 // isKnownNonZero gets most of the profitable cases at much less expense.
610 // This does mean that we have a sensitivity to where the defining
611 // instruction is placed, even if it could legally be hoisted much higher.
612 // That is unfortunate.
613 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
614 if (PT && isKnownNonZero(BBI, DL))
616
617 if (BBI->getType()->isIntegerTy()) {
618 if (auto *CI = dyn_cast<CastInst>(BBI))
619 return solveBlockValueCast(CI, BB);
620
621 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
622 return solveBlockValueBinaryOp(BO, BB);
623
624 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
625 return solveBlockValueExtractValue(EVI, BB);
626
627 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
628 return solveBlockValueIntrinsic(II, BB);
629 }
630
631 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
632 << "' - unknown inst def found.\n");
633 return getFromRangeMetadata(BBI);
634}
635
636static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet) {
637 // TODO: Use NullPointerIsDefined instead.
638 if (Ptr->getType()->getPointerAddressSpace() == 0)
639 PtrSet.insert(getUnderlyingObject(Ptr));
640}
641
643 Instruction *I, NonNullPointerSet &PtrSet) {
644 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
645 AddNonNullPointer(L->getPointerOperand(), PtrSet);
646 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
647 AddNonNullPointer(S->getPointerOperand(), PtrSet);
648 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
649 if (MI->isVolatile()) return;
650
651 // FIXME: check whether it has a valuerange that excludes zero?
652 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
653 if (!Len || Len->isZero()) return;
654
655 AddNonNullPointer(MI->getRawDest(), PtrSet);
656 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
657 AddNonNullPointer(MTI->getRawSource(), PtrSet);
658 }
659}
660
661bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
664 return false;
665
666 Val = Val->stripInBoundsOffsets();
667 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
668 NonNullPointerSet NonNullPointers;
669 for (Instruction &I : *BB)
670 AddNonNullPointersByInstruction(&I, NonNullPointers);
671 return NonNullPointers;
672 });
673}
674
675std::optional<ValueLatticeElement>
676LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
677 ValueLatticeElement Result; // Start Undefined.
678
679 // If this is the entry block, we must be asking about an argument. The
680 // value is overdefined.
681 if (BB->isEntryBlock()) {
682 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
684 }
685
686 // Loop over all of our predecessors, merging what we know from them into
687 // result. If we encounter an unexplored predecessor, we eagerly explore it
688 // in a depth first manner. In practice, this has the effect of discovering
689 // paths we can't analyze eagerly without spending compile times analyzing
690 // other paths. This heuristic benefits from the fact that predecessors are
691 // frequently arranged such that dominating ones come first and we quickly
692 // find a path to function entry. TODO: We should consider explicitly
693 // canonicalizing to make this true rather than relying on this happy
694 // accident.
695 for (BasicBlock *Pred : predecessors(BB)) {
696 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
697 if (!EdgeResult)
698 // Explore that input, then return here
699 return std::nullopt;
700
701 Result.mergeIn(*EdgeResult);
702
703 // If we hit overdefined, exit early. The BlockVals entry is already set
704 // to overdefined.
705 if (Result.isOverdefined()) {
706 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
707 << "' - overdefined because of pred '"
708 << Pred->getName() << "' (non local).\n");
709 return Result;
710 }
711 }
712
713 // Return the merged value, which is more precise than 'overdefined'.
714 assert(!Result.isOverdefined());
715 return Result;
716}
717
718std::optional<ValueLatticeElement>
719LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
720 ValueLatticeElement Result; // Start Undefined.
721
722 // Loop over all of our predecessors, merging what we know from them into
723 // result. See the comment about the chosen traversal order in
724 // solveBlockValueNonLocal; the same reasoning applies here.
725 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
726 BasicBlock *PhiBB = PN->getIncomingBlock(i);
727 Value *PhiVal = PN->getIncomingValue(i);
728 // Note that we can provide PN as the context value to getEdgeValue, even
729 // though the results will be cached, because PN is the value being used as
730 // the cache key in the caller.
731 std::optional<ValueLatticeElement> EdgeResult =
732 getEdgeValue(PhiVal, PhiBB, BB, PN);
733 if (!EdgeResult)
734 // Explore that input, then return here
735 return std::nullopt;
736
737 Result.mergeIn(*EdgeResult);
738
739 // If we hit overdefined, exit early. The BlockVals entry is already set
740 // to overdefined.
741 if (Result.isOverdefined()) {
742 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
743 << "' - overdefined because of pred (local).\n");
744
745 return Result;
746 }
747 }
748
749 // Return the merged value, which is more precise than 'overdefined'.
750 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
751 return Result;
752}
753
755 bool isTrueDest = true);
756
757// If we can determine a constraint on the value given conditions assumed by
758// the program, intersect those constraints with BBLV
759void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
760 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
761 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
762 if (!BBI)
763 return;
764
765 BasicBlock *BB = BBI->getParent();
766 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
767 if (!AssumeVH)
768 continue;
769
770 // Only check assumes in the block of the context instruction. Other
771 // assumes will have already been taken into account when the value was
772 // propagated from predecessor blocks.
773 auto *I = cast<CallInst>(AssumeVH);
774 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
775 continue;
776
777 BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0)));
778 }
779
780 // If guards are not used in the module, don't spend time looking for them
781 if (GuardDecl && !GuardDecl->use_empty() &&
782 BBI->getIterator() != BB->begin()) {
783 for (Instruction &I : make_range(std::next(BBI->getIterator().getReverse()),
784 BB->rend())) {
785 Value *Cond = nullptr;
786 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
787 BBLV = intersect(BBLV, getValueFromCondition(Val, Cond));
788 }
789 }
790
791 if (BBLV.isOverdefined()) {
792 // Check whether we're checking at the terminator, and the pointer has
793 // been dereferenced in this block.
794 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
795 if (PTy && BB->getTerminator() == BBI &&
796 isNonNullAtEndOfBlock(Val, BB))
798 }
799}
800
802 Type *Ty, const DataLayout &DL) {
803 if (Val.isConstantRange())
804 return Val.getConstantRange();
805 return ConstantRange::getFull(DL.getTypeSizeInBits(Ty));
806}
807
808std::optional<ValueLatticeElement>
809LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
810 // Recurse on our inputs if needed
811 std::optional<ValueLatticeElement> OptTrueVal =
812 getBlockValue(SI->getTrueValue(), BB, SI);
813 if (!OptTrueVal)
814 return std::nullopt;
815 ValueLatticeElement &TrueVal = *OptTrueVal;
816
817 std::optional<ValueLatticeElement> OptFalseVal =
818 getBlockValue(SI->getFalseValue(), BB, SI);
819 if (!OptFalseVal)
820 return std::nullopt;
821 ValueLatticeElement &FalseVal = *OptFalseVal;
822
823 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
824 const ConstantRange &TrueCR =
825 getConstantRangeOrFull(TrueVal, SI->getType(), DL);
826 const ConstantRange &FalseCR =
827 getConstantRangeOrFull(FalseVal, SI->getType(), DL);
828 Value *LHS = nullptr;
829 Value *RHS = nullptr;
830 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
831 // Is this a min specifically of our two inputs? (Avoid the risk of
832 // ValueTracking getting smarter looking back past our immediate inputs.)
834 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
835 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
836 ConstantRange ResultCR = [&]() {
837 switch (SPR.Flavor) {
838 default:
839 llvm_unreachable("unexpected minmax type!");
840 case SPF_SMIN: /// Signed minimum
841 return TrueCR.smin(FalseCR);
842 case SPF_UMIN: /// Unsigned minimum
843 return TrueCR.umin(FalseCR);
844 case SPF_SMAX: /// Signed maximum
845 return TrueCR.smax(FalseCR);
846 case SPF_UMAX: /// Unsigned maximum
847 return TrueCR.umax(FalseCR);
848 };
849 }();
851 ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
852 FalseVal.isConstantRangeIncludingUndef());
853 }
854
855 if (SPR.Flavor == SPF_ABS) {
856 if (LHS == SI->getTrueValue())
858 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
859 if (LHS == SI->getFalseValue())
861 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
862 }
863
864 if (SPR.Flavor == SPF_NABS) {
866 if (LHS == SI->getTrueValue())
868 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
869 if (LHS == SI->getFalseValue())
871 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
872 }
873 }
874
875 // Can we constrain the facts about the true and false values by using the
876 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
877 // TODO: We could potentially refine an overdefined true value above.
878 Value *Cond = SI->getCondition();
879 // If the value is undef, a different value may be chosen in
880 // the select condition.
882 TrueVal = intersect(TrueVal,
883 getValueFromCondition(SI->getTrueValue(), Cond, true));
885 FalseVal, getValueFromCondition(SI->getFalseValue(), Cond, false));
886 }
887
889 Result.mergeIn(FalseVal);
890 return Result;
891}
892
893std::optional<ConstantRange>
894LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
895 std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
896 if (!OptVal)
897 return std::nullopt;
898 return getConstantRangeOrFull(*OptVal, V->getType(), DL);
899}
900
901std::optional<ValueLatticeElement>
902LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
903 // Without knowing how wide the input is, we can't analyze it in any useful
904 // way.
905 if (!CI->getOperand(0)->getType()->isSized())
907
908 // Filter out casts we don't know how to reason about before attempting to
909 // recurse on our operand. This can cut a long search short if we know we're
910 // not going to be able to get any useful information anways.
911 switch (CI->getOpcode()) {
912 case Instruction::Trunc:
913 case Instruction::SExt:
914 case Instruction::ZExt:
915 case Instruction::BitCast:
916 break;
917 default:
918 // Unhandled instructions are overdefined.
919 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
920 << "' - overdefined (unknown cast).\n");
922 }
923
924 // Figure out the range of the LHS. If that fails, we still apply the
925 // transfer rule on the full set since we may be able to locally infer
926 // interesting facts.
927 std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
928 if (!LHSRes)
929 // More work to do before applying this transfer rule.
930 return std::nullopt;
931 const ConstantRange &LHSRange = *LHSRes;
932
933 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
934
935 // NOTE: We're currently limited by the set of operations that ConstantRange
936 // can evaluate symbolically. Enhancing that set will allows us to analyze
937 // more definitions.
938 return ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
939 ResultBitWidth));
940}
941
942std::optional<ValueLatticeElement>
943LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
945 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
946 OpFn) {
947 // Figure out the ranges of the operands. If that fails, use a
948 // conservative range, but apply the transfer rule anyways. This
949 // lets us pick up facts from expressions like "and i32 (call i32
950 // @foo()), 32"
951 std::optional<ConstantRange> LHSRes = getRangeFor(I->getOperand(0), I, BB);
952 std::optional<ConstantRange> RHSRes = getRangeFor(I->getOperand(1), I, BB);
953 if (!LHSRes || !RHSRes)
954 // More work to do before applying this transfer rule.
955 return std::nullopt;
956
957 const ConstantRange &LHSRange = *LHSRes;
958 const ConstantRange &RHSRange = *RHSRes;
959 return ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
960}
961
962std::optional<ValueLatticeElement>
963LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
964 assert(BO->getOperand(0)->getType()->isSized() &&
965 "all operands to binary operators are sized");
966 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
967 unsigned NoWrapKind = 0;
968 if (OBO->hasNoUnsignedWrap())
970 if (OBO->hasNoSignedWrap())
972
973 return solveBlockValueBinaryOpImpl(
974 BO, BB,
975 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
976 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
977 });
978 }
979
980 return solveBlockValueBinaryOpImpl(
981 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
982 return CR1.binaryOp(BO->getOpcode(), CR2);
983 });
984}
985
986std::optional<ValueLatticeElement>
987LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
988 BasicBlock *BB) {
989 return solveBlockValueBinaryOpImpl(
990 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
991 return CR1.binaryOp(WO->getBinaryOp(), CR2);
992 });
993}
994
995std::optional<ValueLatticeElement>
996LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
999 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1000 << "' - unknown intrinsic.\n");
1001 return MetadataVal;
1002 }
1003
1005 for (Value *Op : II->args()) {
1006 std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1007 if (!Range)
1008 return std::nullopt;
1009 OpRanges.push_back(*Range);
1010 }
1011
1013 II->getIntrinsicID(), OpRanges)),
1014 MetadataVal);
1015}
1016
1017std::optional<ValueLatticeElement>
1018LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1019 BasicBlock *BB) {
1020 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1021 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1022 return solveBlockValueOverflowIntrinsic(WO, BB);
1023
1024 // Handle extractvalue of insertvalue to allow further simplification
1025 // based on replaced with.overflow intrinsics.
1027 EVI->getAggregateOperand(), EVI->getIndices(),
1028 EVI->getModule()->getDataLayout()))
1029 return getBlockValue(V, BB, EVI);
1030
1031 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1032 << "' - overdefined (unknown extractvalue).\n");
1034}
1035
1036static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val,
1037 ICmpInst::Predicate Pred) {
1038 if (LHS == Val)
1039 return true;
1040
1041 // Handle range checking idiom produced by InstCombine. We will subtract the
1042 // offset from the allowed range for RHS in this case.
1043 const APInt *C;
1044 if (match(LHS, m_Add(m_Specific(Val), m_APInt(C)))) {
1045 Offset = *C;
1046 return true;
1047 }
1048
1049 // Handle the symmetric case. This appears in saturation patterns like
1050 // (x == 16) ? 16 : (x + 1).
1051 if (match(Val, m_Add(m_Specific(LHS), m_APInt(C)))) {
1052 Offset = -*C;
1053 return true;
1054 }
1055
1056 // If (x | y) < C, then (x < C) && (y < C).
1057 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1058 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1059 return true;
1060
1061 // If (x & y) > C, then (x > C) && (y > C).
1062 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1063 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1064 return true;
1065
1066 return false;
1067}
1068
1069/// Get value range for a "(Val + Offset) Pred RHS" condition.
1071 CmpInst::Predicate Pred, Value *RHS, const APInt &Offset) {
1073 /*isFullSet=*/true);
1074 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
1075 RHSRange = ConstantRange(CI->getValue());
1076 else if (Instruction *I = dyn_cast<Instruction>(RHS))
1077 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
1078 RHSRange = getConstantRangeFromMetadata(*Ranges);
1079
1080 ConstantRange TrueValues =
1082 return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1083}
1084
1086 bool isTrueDest) {
1087 Value *LHS = ICI->getOperand(0);
1088 Value *RHS = ICI->getOperand(1);
1089
1090 // Get the predicate that must hold along the considered edge.
1091 CmpInst::Predicate EdgePred =
1092 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1093
1094 if (isa<Constant>(RHS)) {
1095 if (ICI->isEquality() && LHS == Val) {
1096 if (EdgePred == ICmpInst::ICMP_EQ)
1097 return ValueLatticeElement::get(cast<Constant>(RHS));
1098 else if (!isa<UndefValue>(RHS))
1099 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1100 }
1101 }
1102
1103 Type *Ty = Val->getType();
1104 if (!Ty->isIntegerTy())
1106
1107 unsigned BitWidth = Ty->getScalarSizeInBits();
1108 APInt Offset(BitWidth, 0);
1109 if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1110 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset);
1111
1112 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1113 if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1114 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset);
1115
1116 const APInt *Mask, *C;
1117 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1118 match(RHS, m_APInt(C))) {
1119 // If (Val & Mask) == C then all the masked bits are known and we can
1120 // compute a value range based on that.
1121 if (EdgePred == ICmpInst::ICMP_EQ) {
1122 KnownBits Known;
1123 Known.Zero = ~*C & *Mask;
1124 Known.One = *C & *Mask;
1126 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1127 }
1128 // If (Val & Mask) != 0 then the value must be larger than the lowest set
1129 // bit of Mask.
1130 if (EdgePred == ICmpInst::ICMP_NE && !Mask->isZero() && C->isZero()) {
1132 APInt::getOneBitSet(BitWidth, Mask->countr_zero()),
1134 }
1135 }
1136
1137 // If (X urem Modulus) >= C, then X >= C.
1138 // If trunc X >= C, then X >= C.
1139 // TODO: An upper bound could be computed as well.
1141 m_Trunc(m_Specific(Val)))) &&
1142 match(RHS, m_APInt(C))) {
1143 // Use the icmp region so we don't have to deal with different predicates.
1145 if (!CR.isEmptySet())
1148 }
1149
1151}
1152
1153// Handle conditions of the form
1154// extractvalue(op.with.overflow(%x, C), 1).
1156 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1157 // TODO: This only works with a constant RHS for now. We could also compute
1158 // the range of the RHS, but this doesn't fit into the current structure of
1159 // the edge value calculation.
1160 const APInt *C;
1161 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1163
1164 // Calculate the possible values of %x for which no overflow occurs.
1166 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1167
1168 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1169 // constrained to it's inverse (all values that might cause overflow).
1170 if (IsTrueDest)
1171 NWR = NWR.inverse();
1173}
1174
1175// Tracks a Value * condition and whether we're interested in it or its inverse
1177
1178static std::optional<ValueLatticeElement> getValueFromConditionImpl(
1179 Value *Val, CondValue CondVal, bool isRevisit,
1181 SmallVectorImpl<CondValue> &Worklist) {
1182
1183 Value *Cond = CondVal.getPointer();
1184 bool isTrueDest = CondVal.getInt();
1185 if (!isRevisit) {
1186 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1187 return getValueFromICmpCondition(Val, ICI, isTrueDest);
1188
1189 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1190 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1191 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1192 return getValueFromOverflowCondition(Val, WO, isTrueDest);
1193 }
1194
1195 Value *N;
1196 if (match(Cond, m_Not(m_Value(N)))) {
1197 CondValue NKey(N, !isTrueDest);
1198 auto NV = Visited.find(NKey);
1199 if (NV == Visited.end()) {
1200 Worklist.push_back(NKey);
1201 return std::nullopt;
1202 }
1203 return NV->second;
1204 }
1205
1206 Value *L, *R;
1207 bool IsAnd;
1208 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1209 IsAnd = true;
1210 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1211 IsAnd = false;
1212 else
1214
1215 auto LV = Visited.find(CondValue(L, isTrueDest));
1216 auto RV = Visited.find(CondValue(R, isTrueDest));
1217
1218 // if (L && R) -> intersect L and R
1219 // if (!(L || R)) -> intersect !L and !R
1220 // if (L || R) -> union L and R
1221 // if (!(L && R)) -> union !L and !R
1222 if ((isTrueDest ^ IsAnd) && (LV != Visited.end())) {
1223 ValueLatticeElement V = LV->second;
1224 if (V.isOverdefined())
1225 return V;
1226 if (RV != Visited.end()) {
1227 V.mergeIn(RV->second);
1228 return V;
1229 }
1230 }
1231
1232 if (LV == Visited.end() || RV == Visited.end()) {
1233 assert(!isRevisit);
1234 if (LV == Visited.end())
1235 Worklist.push_back(CondValue(L, isTrueDest));
1236 if (RV == Visited.end())
1237 Worklist.push_back(CondValue(R, isTrueDest));
1238 return std::nullopt;
1239 }
1240
1241 return intersect(LV->second, RV->second);
1242}
1243
1245 bool isTrueDest) {
1246 assert(Cond && "precondition");
1248 SmallVector<CondValue> Worklist;
1249
1250 CondValue CondKey(Cond, isTrueDest);
1251 Worklist.push_back(CondKey);
1252 do {
1253 CondValue CurrentCond = Worklist.back();
1254 // Insert an Overdefined placeholder into the set to prevent
1255 // infinite recursion if there exists IRs that use not
1256 // dominated by its def as in this example:
1257 // "%tmp3 = or i1 undef, %tmp4"
1258 // "%tmp4 = or i1 undef, %tmp3"
1259 auto Iter =
1260 Visited.try_emplace(CurrentCond, ValueLatticeElement::getOverdefined());
1261 bool isRevisit = !Iter.second;
1262 std::optional<ValueLatticeElement> Result = getValueFromConditionImpl(
1263 Val, CurrentCond, isRevisit, Visited, Worklist);
1264 if (Result) {
1265 Visited[CurrentCond] = *Result;
1266 Worklist.pop_back();
1267 }
1268 } while (!Worklist.empty());
1269
1270 auto Result = Visited.find(CondKey);
1271 assert(Result != Visited.end());
1272 return Result->second;
1273}
1274
1275// Return true if Usr has Op as an operand, otherwise false.
1276static bool usesOperand(User *Usr, Value *Op) {
1277 return is_contained(Usr->operands(), Op);
1278}
1279
1280// Return true if the instruction type of Val is supported by
1281// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1282// Call this before calling constantFoldUser() to find out if it's even worth
1283// attempting to call it.
1284static bool isOperationFoldable(User *Usr) {
1285 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1286}
1287
1288// Check if Usr can be simplified to an integer constant when the value of one
1289// of its operands Op is an integer constant OpConstVal. If so, return it as an
1290// lattice value range with a single element or otherwise return an overdefined
1291// lattice value.
1293 const APInt &OpConstVal,
1294 const DataLayout &DL) {
1295 assert(isOperationFoldable(Usr) && "Precondition");
1296 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1297 // Check if Usr can be simplified to a constant.
1298 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1299 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1300 if (auto *C = dyn_cast_or_null<ConstantInt>(
1301 simplifyCastInst(CI->getOpcode(), OpConst,
1302 CI->getDestTy(), DL))) {
1303 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1304 }
1305 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1306 bool Op0Match = BO->getOperand(0) == Op;
1307 bool Op1Match = BO->getOperand(1) == Op;
1308 assert((Op0Match || Op1Match) &&
1309 "Operand 0 nor Operand 1 isn't a match");
1310 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1311 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1312 if (auto *C = dyn_cast_or_null<ConstantInt>(
1313 simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1314 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1315 }
1316 } else if (isa<FreezeInst>(Usr)) {
1317 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1318 return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1319 }
1321}
1322
1323/// Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
1324/// Val is not constrained on the edge. Result is unspecified if return value
1325/// is false.
1326static std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
1327 BasicBlock *BBFrom,
1328 BasicBlock *BBTo) {
1329 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1330 // know that v != 0.
1331 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1332 // If this is a conditional branch and only one successor goes to BBTo, then
1333 // we may be able to infer something from the condition.
1334 if (BI->isConditional() &&
1335 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1336 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1337 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1338 "BBTo isn't a successor of BBFrom");
1339 Value *Condition = BI->getCondition();
1340
1341 // If V is the condition of the branch itself, then we know exactly what
1342 // it is.
1343 if (Condition == Val)
1345 Type::getInt1Ty(Val->getContext()), isTrueDest));
1346
1347 // If the condition of the branch is an equality comparison, we may be
1348 // able to infer the value.
1349 ValueLatticeElement Result = getValueFromCondition(Val, Condition,
1350 isTrueDest);
1351 if (!Result.isOverdefined())
1352 return Result;
1353
1354 if (User *Usr = dyn_cast<User>(Val)) {
1355 assert(Result.isOverdefined() && "Result isn't overdefined");
1356 // Check with isOperationFoldable() first to avoid linearly iterating
1357 // over the operands unnecessarily which can be expensive for
1358 // instructions with many operands.
1359 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1360 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1361 if (usesOperand(Usr, Condition)) {
1362 // If Val has Condition as an operand and Val can be folded into a
1363 // constant with either Condition == true or Condition == false,
1364 // propagate the constant.
1365 // eg.
1366 // ; %Val is true on the edge to %then.
1367 // %Val = and i1 %Condition, true.
1368 // br %Condition, label %then, label %else
1369 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1370 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1371 } else {
1372 // If one of Val's operand has an inferred value, we may be able to
1373 // infer the value of Val.
1374 // eg.
1375 // ; %Val is 94 on the edge to %then.
1376 // %Val = add i8 %Op, 1
1377 // %Condition = icmp eq i8 %Op, 93
1378 // br i1 %Condition, label %then, label %else
1379 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1380 Value *Op = Usr->getOperand(i);
1381 ValueLatticeElement OpLatticeVal =
1382 getValueFromCondition(Op, Condition, isTrueDest);
1383 if (std::optional<APInt> OpConst =
1384 OpLatticeVal.asConstantInteger()) {
1385 Result = constantFoldUser(Usr, Op, *OpConst, DL);
1386 break;
1387 }
1388 }
1389 }
1390 }
1391 }
1392 if (!Result.isOverdefined())
1393 return Result;
1394 }
1395 }
1396
1397 // If the edge was formed by a switch on the value, then we may know exactly
1398 // what it is.
1399 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1400 Value *Condition = SI->getCondition();
1401 if (!isa<IntegerType>(Val->getType()))
1402 return std::nullopt;
1403 bool ValUsesConditionAndMayBeFoldable = false;
1404 if (Condition != Val) {
1405 // Check if Val has Condition as an operand.
1406 if (User *Usr = dyn_cast<User>(Val))
1407 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1408 usesOperand(Usr, Condition);
1409 if (!ValUsesConditionAndMayBeFoldable)
1410 return std::nullopt;
1411 }
1412 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1413 "Condition != Val nor Val doesn't use Condition");
1414
1415 bool DefaultCase = SI->getDefaultDest() == BBTo;
1416 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1417 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1418
1419 for (auto Case : SI->cases()) {
1420 APInt CaseValue = Case.getCaseValue()->getValue();
1421 ConstantRange EdgeVal(CaseValue);
1422 if (ValUsesConditionAndMayBeFoldable) {
1423 User *Usr = cast<User>(Val);
1424 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1425 ValueLatticeElement EdgeLatticeVal =
1426 constantFoldUser(Usr, Condition, CaseValue, DL);
1427 if (EdgeLatticeVal.isOverdefined())
1428 return std::nullopt;
1429 EdgeVal = EdgeLatticeVal.getConstantRange();
1430 }
1431 if (DefaultCase) {
1432 // It is possible that the default destination is the destination of
1433 // some cases. We cannot perform difference for those cases.
1434 // We know Condition != CaseValue in BBTo. In some cases we can use
1435 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1436 // only do this when f is identity (i.e. Val == Condition), but we
1437 // should be able to do this for any injective f.
1438 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1439 EdgesVals = EdgesVals.difference(EdgeVal);
1440 } else if (Case.getCaseSuccessor() == BBTo)
1441 EdgesVals = EdgesVals.unionWith(EdgeVal);
1442 }
1443 return ValueLatticeElement::getRange(std::move(EdgesVals));
1444 }
1445 return std::nullopt;
1446}
1447
1448/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1449/// the basic block if the edge does not constrain Val.
1450std::optional<ValueLatticeElement>
1451LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1452 BasicBlock *BBTo, Instruction *CxtI) {
1453 // If already a constant, there is nothing to compute.
1454 if (Constant *VC = dyn_cast<Constant>(Val))
1455 return ValueLatticeElement::get(VC);
1456
1457 ValueLatticeElement LocalResult =
1458 getEdgeValueLocal(Val, BBFrom, BBTo)
1460 if (hasSingleValue(LocalResult))
1461 // Can't get any more precise here
1462 return LocalResult;
1463
1464 std::optional<ValueLatticeElement> OptInBlock =
1465 getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1466 if (!OptInBlock)
1467 return std::nullopt;
1468 ValueLatticeElement &InBlock = *OptInBlock;
1469
1470 // We can use the context instruction (generically the ultimate instruction
1471 // the calling pass is trying to simplify) here, even though the result of
1472 // this function is generally cached when called from the solve* functions
1473 // (and that cached result might be used with queries using a different
1474 // context instruction), because when this function is called from the solve*
1475 // functions, the context instruction is not provided. When called from
1476 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1477 // but then the result is not cached.
1478 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1479
1480 return intersect(LocalResult, InBlock);
1481}
1482
1483ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1484 Instruction *CxtI) {
1485 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1486 << BB->getName() << "'\n");
1487
1488 assert(BlockValueStack.empty() && BlockValueSet.empty());
1489 std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1490 if (!OptResult) {
1491 solve();
1492 OptResult = getBlockValue(V, BB, CxtI);
1493 assert(OptResult && "Value not available after solving");
1494 }
1495
1496 ValueLatticeElement Result = *OptResult;
1497 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1498 return Result;
1499}
1500
1501ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1502 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1503 << "'\n");
1504
1505 if (auto *C = dyn_cast<Constant>(V))
1507
1509 if (auto *I = dyn_cast<Instruction>(V))
1511 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1512
1513 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1514 return Result;
1515}
1516
1517ValueLatticeElement LazyValueInfoImpl::
1518getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1519 Instruction *CxtI) {
1520 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1521 << FromBB->getName() << "' to '" << ToBB->getName()
1522 << "'\n");
1523
1524 std::optional<ValueLatticeElement> Result =
1525 getEdgeValue(V, FromBB, ToBB, CxtI);
1526 if (!Result) {
1527 solve();
1528 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1529 assert(Result && "More work to do after problem solved?");
1530 }
1531
1532 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1533 return *Result;
1534}
1535
1536void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1537 BasicBlock *NewSucc) {
1538 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1539}
1540
1541//===----------------------------------------------------------------------===//
1542// LazyValueInfo Impl
1543//===----------------------------------------------------------------------===//
1544
1545/// This lazily constructs the LazyValueInfoImpl.
1546static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC,
1547 const Module *M) {
1548 if (!PImpl) {
1549 assert(M && "getCache() called with a null Module");
1550 const DataLayout &DL = M->getDataLayout();
1551 Function *GuardDecl = M->getFunction(
1552 Intrinsic::getName(Intrinsic::experimental_guard));
1553 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1554 }
1555 return *static_cast<LazyValueInfoImpl*>(PImpl);
1556}
1557
1559 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1560 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1561
1562 if (Info.PImpl)
1563 getImpl(Info.PImpl, Info.AC, F.getParent()).clear();
1564
1565 // Fully lazy.
1566 return false;
1567}
1568
1570 AU.setPreservesAll();
1573}
1574
1576
1578
1580 // If the cache was allocated, free it.
1581 if (PImpl) {
1582 delete &getImpl(PImpl, AC, nullptr);
1583 PImpl = nullptr;
1584 }
1585}
1586
1589 // We need to invalidate if we have either failed to preserve this analyses
1590 // result directly or if any of its dependencies have been invalidated.
1591 auto PAC = PA.getChecker<LazyValueAnalysis>();
1592 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1593 return true;
1594
1595 return false;
1596}
1597
1599
1602 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1603 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1604
1605 return LazyValueInfo(&AC, &F.getParent()->getDataLayout(), &TLI);
1606}
1607
1608/// Returns true if we can statically tell that this value will never be a
1609/// "useful" constant. In practice, this means we've got something like an
1610/// alloca or a malloc call for which a comparison against a constant can
1611/// only be guarding dead code. Note that we are potentially giving up some
1612/// precision in dead code (a constant result) in favour of avoiding a
1613/// expensive search for a easily answered common query.
1614static bool isKnownNonConstant(Value *V) {
1615 V = V->stripPointerCasts();
1616 // The return val of alloc cannot be a Constant.
1617 if (isa<AllocaInst>(V))
1618 return true;
1619 return false;
1620}
1621
1623 // Bail out early if V is known not to be a Constant.
1624 if (isKnownNonConstant(V))
1625 return nullptr;
1626
1627 BasicBlock *BB = CxtI->getParent();
1628 ValueLatticeElement Result =
1629 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
1630
1631 if (Result.isConstant())
1632 return Result.getConstant();
1633 if (Result.isConstantRange()) {
1634 const ConstantRange &CR = Result.getConstantRange();
1635 if (const APInt *SingleVal = CR.getSingleElement())
1636 return ConstantInt::get(V->getContext(), *SingleVal);
1637 }
1638 return nullptr;
1639}
1640
1642 bool UndefAllowed) {
1643 assert(V->getType()->isIntegerTy());
1644 unsigned Width = V->getType()->getIntegerBitWidth();
1645 BasicBlock *BB = CxtI->getParent();
1646 ValueLatticeElement Result =
1647 getImpl(PImpl, AC, BB->getModule()).getValueInBlock(V, BB, CxtI);
1648 if (Result.isUnknown())
1649 return ConstantRange::getEmpty(Width);
1650 if (Result.isConstantRange(UndefAllowed))
1651 return Result.getConstantRange(UndefAllowed);
1652 // We represent ConstantInt constants as constant ranges but other kinds
1653 // of integer constants, i.e. ConstantExpr will be tagged as constants
1654 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1655 "ConstantInt value must be represented as constantrange");
1656 return ConstantRange::getFull(Width);
1657}
1658
1660 bool UndefAllowed) {
1661 Value *V = U.get();
1662 ConstantRange CR =
1663 getConstantRange(V, cast<Instruction>(U.getUser()), UndefAllowed);
1664
1665 // Check whether the only (possibly transitive) use of the value is in a
1666 // position where V can be constrained by a select or branch condition.
1667 const Use *CurrU = &U;
1668 // TODO: Increase limit?
1669 const unsigned MaxUsesToInspect = 3;
1670 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1671 std::optional<ValueLatticeElement> CondVal;
1672 auto *CurrI = cast<Instruction>(CurrU->getUser());
1673 if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1674 // If the value is undef, a different value may be chosen in
1675 // the select condition and at use.
1676 if (!isGuaranteedNotToBeUndefOrPoison(SI->getCondition(), AC))
1677 break;
1678 if (CurrU->getOperandNo() == 1)
1679 CondVal = getValueFromCondition(V, SI->getCondition(), true);
1680 else if (CurrU->getOperandNo() == 2)
1681 CondVal = getValueFromCondition(V, SI->getCondition(), false);
1682 } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1683 // TODO: Use non-local query?
1684 CondVal =
1685 getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU), PHI->getParent());
1686 }
1687 if (CondVal && CondVal->isConstantRange())
1688 CR = CR.intersectWith(CondVal->getConstantRange());
1689
1690 // Only follow one-use chain, to allow direct intersection of conditions.
1691 // If there are multiple uses, we would have to intersect with the union of
1692 // all conditions at different uses.
1693 // Stop walking if we hit a non-speculatable instruction. Even if the
1694 // result is only used under a specific condition, executing the
1695 // instruction itself may cause side effects or UB already.
1696 // This also disallows looking through phi nodes: If the phi node is part
1697 // of a cycle, we might end up reasoning about values from different cycle
1698 // iterations (PR60629).
1699 if (!CurrI->hasOneUse() || !isSafeToSpeculativelyExecute(CurrI))
1700 break;
1701 CurrU = &*CurrI->use_begin();
1702 }
1703 return CR;
1704}
1705
1706/// Determine whether the specified value is known to be a
1707/// constant on the specified edge. Return null if not.
1709 BasicBlock *ToBB,
1710 Instruction *CxtI) {
1711 Module *M = FromBB->getModule();
1712 ValueLatticeElement Result =
1713 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1714
1715 if (Result.isConstant())
1716 return Result.getConstant();
1717 if (Result.isConstantRange()) {
1718 const ConstantRange &CR = Result.getConstantRange();
1719 if (const APInt *SingleVal = CR.getSingleElement())
1720 return ConstantInt::get(V->getContext(), *SingleVal);
1721 }
1722 return nullptr;
1723}
1724
1726 BasicBlock *FromBB,
1727 BasicBlock *ToBB,
1728 Instruction *CxtI) {
1729 unsigned Width = V->getType()->getIntegerBitWidth();
1730 Module *M = FromBB->getModule();
1731 ValueLatticeElement Result =
1732 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1733
1734 if (Result.isUnknown())
1735 return ConstantRange::getEmpty(Width);
1736 if (Result.isConstantRange())
1737 return Result.getConstantRange();
1738 // We represent ConstantInt constants as constant ranges but other kinds
1739 // of integer constants, i.e. ConstantExpr will be tagged as constants
1740 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1741 "ConstantInt value must be represented as constantrange");
1742 return ConstantRange::getFull(Width);
1743}
1744
1747 const DataLayout &DL, TargetLibraryInfo *TLI) {
1748 // If we know the value is a constant, evaluate the conditional.
1749 Constant *Res = nullptr;
1750 if (Val.isConstant()) {
1751 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL, TLI);
1752 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
1753 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1755 }
1756
1757 if (Val.isConstantRange()) {
1758 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1759 if (!CI) return LazyValueInfo::Unknown;
1760
1761 const ConstantRange &CR = Val.getConstantRange();
1762 if (Pred == ICmpInst::ICMP_EQ) {
1763 if (!CR.contains(CI->getValue()))
1764 return LazyValueInfo::False;
1765
1766 if (CR.isSingleElement())
1767 return LazyValueInfo::True;
1768 } else if (Pred == ICmpInst::ICMP_NE) {
1769 if (!CR.contains(CI->getValue()))
1770 return LazyValueInfo::True;
1771
1772 if (CR.isSingleElement())
1773 return LazyValueInfo::False;
1774 } else {
1775 // Handle more complex predicates.
1777 (ICmpInst::Predicate)Pred, CI->getValue());
1778 if (TrueValues.contains(CR))
1779 return LazyValueInfo::True;
1780 if (TrueValues.inverse().contains(CR))
1781 return LazyValueInfo::False;
1782 }
1784 }
1785
1786 if (Val.isNotConstant()) {
1787 // If this is an equality comparison, we can try to fold it knowing that
1788 // "V != C1".
1789 if (Pred == ICmpInst::ICMP_EQ) {
1790 // !C1 == C -> false iff C1 == C.
1792 Val.getNotConstant(), C, DL,
1793 TLI);
1794 if (Res->isNullValue())
1795 return LazyValueInfo::False;
1796 } else if (Pred == ICmpInst::ICMP_NE) {
1797 // !C1 != C -> true iff C1 == C.
1799 Val.getNotConstant(), C, DL,
1800 TLI);
1801 if (Res->isNullValue())
1802 return LazyValueInfo::True;
1803 }
1805 }
1806
1808}
1809
1810/// Determine whether the specified value comparison with a constant is known to
1811/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1814 BasicBlock *FromBB, BasicBlock *ToBB,
1815 Instruction *CxtI) {
1816 Module *M = FromBB->getModule();
1817 ValueLatticeElement Result =
1818 getImpl(PImpl, AC, M).getValueOnEdge(V, FromBB, ToBB, CxtI);
1819
1820 return getPredicateResult(Pred, C, Result, M->getDataLayout(), TLI);
1821}
1822
1825 Instruction *CxtI, bool UseBlockValue) {
1826 // Is or is not NonNull are common predicates being queried. If
1827 // isKnownNonZero can tell us the result of the predicate, we can
1828 // return it quickly. But this is only a fastpath, and falling
1829 // through would still be correct.
1830 Module *M = CxtI->getModule();
1831 const DataLayout &DL = M->getDataLayout();
1832 if (V->getType()->isPointerTy() && C->isNullValue() &&
1833 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1834 if (Pred == ICmpInst::ICMP_EQ)
1835 return LazyValueInfo::False;
1836 else if (Pred == ICmpInst::ICMP_NE)
1837 return LazyValueInfo::True;
1838 }
1839
1840 ValueLatticeElement Result = UseBlockValue
1841 ? getImpl(PImpl, AC, M).getValueInBlock(V, CxtI->getParent(), CxtI)
1842 : getImpl(PImpl, AC, M).getValueAt(V, CxtI);
1843 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
1844 if (Ret != Unknown)
1845 return Ret;
1846
1847 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1848 // LVI as a whole tries to compute a lattice value which is conservatively
1849 // correct at a given location. In this case, we have a predicate which we
1850 // weren't able to prove about the merged result, and we're pushing that
1851 // predicate back along each incoming edge to see if we can prove it
1852 // separately for each input. As a motivating example, consider:
1853 // bb1:
1854 // %v1 = ... ; constantrange<1, 5>
1855 // br label %merge
1856 // bb2:
1857 // %v2 = ... ; constantrange<10, 20>
1858 // br label %merge
1859 // merge:
1860 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1861 // %pred = icmp eq i32 %phi, 8
1862 // We can't tell from the lattice value for '%phi' that '%pred' is false
1863 // along each path, but by checking the predicate over each input separately,
1864 // we can.
1865 // We limit the search to one step backwards from the current BB and value.
1866 // We could consider extending this to search further backwards through the
1867 // CFG and/or value graph, but there are non-obvious compile time vs quality
1868 // tradeoffs.
1869 BasicBlock *BB = CxtI->getParent();
1870
1871 // Function entry or an unreachable block. Bail to avoid confusing
1872 // analysis below.
1873 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1874 if (PI == PE)
1875 return Unknown;
1876
1877 // If V is a PHI node in the same block as the context, we need to ask
1878 // questions about the predicate as applied to the incoming value along
1879 // each edge. This is useful for eliminating cases where the predicate is
1880 // known along all incoming edges.
1881 if (auto *PHI = dyn_cast<PHINode>(V))
1882 if (PHI->getParent() == BB) {
1883 Tristate Baseline = Unknown;
1884 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1885 Value *Incoming = PHI->getIncomingValue(i);
1886 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1887 // Note that PredBB may be BB itself.
1888 Tristate Result =
1889 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
1890
1891 // Keep going as long as we've seen a consistent known result for
1892 // all inputs.
1893 Baseline = (i == 0) ? Result /* First iteration */
1894 : (Baseline == Result ? Baseline
1895 : Unknown); /* All others */
1896 if (Baseline == Unknown)
1897 break;
1898 }
1899 if (Baseline != Unknown)
1900 return Baseline;
1901 }
1902
1903 // For a comparison where the V is outside this block, it's possible
1904 // that we've branched on it before. Look to see if the value is known
1905 // on all incoming edges.
1906 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
1907 // For predecessor edge, determine if the comparison is true or false
1908 // on that edge. If they're all true or all false, we can conclude
1909 // the value of the comparison in this block.
1910 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1911 if (Baseline != Unknown) {
1912 // Check that all remaining incoming values match the first one.
1913 while (++PI != PE) {
1914 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1915 if (Ret != Baseline)
1916 break;
1917 }
1918 // If we terminated early, then one of the values didn't match.
1919 if (PI == PE) {
1920 return Baseline;
1921 }
1922 }
1923 }
1924
1925 return Unknown;
1926}
1927
1929 Value *RHS,
1930 Instruction *CxtI,
1931 bool UseBlockValue) {
1933
1934 if (auto *C = dyn_cast<Constant>(RHS))
1935 return getPredicateAt(P, LHS, C, CxtI, UseBlockValue);
1936 if (auto *C = dyn_cast<Constant>(LHS))
1937 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
1938 UseBlockValue);
1939
1940 // Got two non-Constant values. Try to determine the comparison results based
1941 // on the block values of the two operands, e.g. because they have
1942 // non-overlapping ranges.
1943 if (UseBlockValue) {
1944 Module *M = CxtI->getModule();
1946 getImpl(PImpl, AC, M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
1947 if (L.isOverdefined())
1949
1951 getImpl(PImpl, AC, M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
1953 if (Constant *Res = L.getCompare((CmpInst::Predicate)P, Ty, R,
1954 M->getDataLayout())) {
1955 if (Res->isNullValue())
1956 return LazyValueInfo::False;
1957 if (Res->isOneValue())
1958 return LazyValueInfo::True;
1959 }
1960 }
1962}
1963
1965 BasicBlock *NewSucc) {
1966 if (PImpl) {
1967 getImpl(PImpl, AC, PredBB->getModule())
1968 .threadEdge(PredBB, OldSucc, NewSucc);
1969 }
1970}
1971
1973 if (PImpl) {
1974 getImpl(PImpl, AC, BB->getModule()).eraseBlock(BB);
1975 }
1976}
1977
1979 if (PImpl) {
1980 getImpl(PImpl, AC, M).clear();
1981 }
1982}
1983
1985 if (PImpl) {
1986 getImpl(PImpl, AC, F.getParent()).printLVI(F, DTree, OS);
1987 }
1988}
1989
1990// Print the LVI for the function arguments at the start of each basic block.
1991void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
1992 const BasicBlock *BB, formatted_raw_ostream &OS) {
1993 // Find if there are latticevalues defined for arguments of the function.
1994 auto *F = BB->getParent();
1995 for (const auto &Arg : F->args()) {
1996 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1997 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
1998 if (Result.isUnknown())
1999 continue;
2000 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2001 }
2002}
2003
2004// This function prints the LVI analysis for the instruction I at the beginning
2005// of various basic blocks. It relies on calculated values that are stored in
2006// the LazyValueInfoCache, and in the absence of cached values, recalculate the
2007// LazyValueInfo for `I`, and print that info.
2008void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2010
2011 auto *ParentBB = I->getParent();
2012 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2013 // We can generate (solve) LVI values only for blocks that are dominated by
2014 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2015 // that contain redundant/uninteresting information, we print LVI for
2016 // blocks that may use this LVI information (such as immediate successor
2017 // blocks, and blocks that contain uses of `I`).
2018 auto printResult = [&](const BasicBlock *BB) {
2019 if (!BlocksContainingLVI.insert(BB).second)
2020 return;
2021 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2022 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2023 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2024 BB->printAsOperand(OS, false);
2025 OS << "' is: " << Result << "\n";
2026 };
2027
2028 printResult(ParentBB);
2029 // Print the LVI analysis results for the immediate successor blocks, that
2030 // are dominated by `ParentBB`.
2031 for (const auto *BBSucc : successors(ParentBB))
2032 if (DT.dominates(ParentBB, BBSucc))
2033 printResult(BBSucc);
2034
2035 // Print LVI in blocks where `I` is used.
2036 for (const auto *U : I->users())
2037 if (auto *UseI = dyn_cast<Instruction>(U))
2038 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2039 printResult(UseI->getParent());
2040
2041}
2042
2043namespace {
2044// Printer class for LazyValueInfo results.
2045class LazyValueInfoPrinter : public FunctionPass {
2046public:
2047 static char ID; // Pass identification, replacement for typeid
2048 LazyValueInfoPrinter() : FunctionPass(ID) {
2050 }
2051
2052 void getAnalysisUsage(AnalysisUsage &AU) const override {
2053 AU.setPreservesAll();
2056 }
2057
2058 // Get the mandatory dominator tree analysis and pass this in to the
2059 // LVIPrinter. We cannot rely on the LVI's DT, since it's optional.
2060 bool runOnFunction(Function &F) override {
2061 dbgs() << "LVI for function '" << F.getName() << "':\n";
2062 auto &LVI = getAnalysis<LazyValueInfoWrapperPass>().getLVI();
2063 auto &DTree = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2064 LVI.printLVI(F, DTree, dbgs());
2065 return false;
2066 }
2067};
2068}
2069
2070char LazyValueInfoPrinter::ID = 0;
2071INITIALIZE_PASS_BEGIN(LazyValueInfoPrinter, "print-lazy-value-info",
2072 "Lazy Value Info Printer Pass", false, false)
2074INITIALIZE_PASS_END(LazyValueInfoPrinter, "print-lazy-value-info",
2075 "Lazy Value Info Printer Pass", false, false)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
Rewrite undef for PHI
basic Basic Alias true
SmallVector< MachineOperand, 4 > Cond
block Block Frequency Analysis
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static void clear(coro::Shape &Shape)
Definition: Coroutines.cpp:149
static Value * getValueOnEdge(LazyValueInfo *LVI, Value *Incoming, BasicBlock *From, BasicBlock *To, Instruction *CxtI)
Given that RA is a live value
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static bool isOperationFoldable(User *Usr)
static ValueLatticeElement getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS, const APInt &Offset)
Get value range for a "(Val + Offset) Pred RHS" condition.
static ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest=true)
static std::optional< ValueLatticeElement > getValueFromConditionImpl(Value *Val, CondValue CondVal, bool isRevisit, SmallDenseMap< CondValue, ValueLatticeElement > &Visited, SmallVectorImpl< CondValue > &Worklist)
static void AddNonNullPointersByInstruction(Instruction *I, NonNullPointerSet &PtrSet)
static bool hasSingleValue(const ValueLatticeElement &Val)
Returns true if this lattice value represents at most one possible value.
static const unsigned MaxProcessedPerValue
static LazyValueInfoImpl & getImpl(void *&PImpl, AssumptionCache *AC, const Module *M)
This lazily constructs the LazyValueInfoImpl.
static std::optional< ValueLatticeElement > getEdgeValueLocal(Value *Val, BasicBlock *BBFrom, BasicBlock *BBTo)
Compute the value of Val on the edge BBFrom -> BBTo.
static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest)
static bool usesOperand(User *Usr, Value *Op)
static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, const APInt &OpConstVal, const DataLayout &DL)
static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet)
static ValueLatticeElement getFromRangeMetadata(Instruction *BBI)
static ValueLatticeElement intersect(const ValueLatticeElement &A, const ValueLatticeElement &B)
Combine two sets of facts about the same value into a single set of facts.
static ConstantRange getConstantRangeOrFull(const ValueLatticeElement &Val, Type *Ty, const DataLayout &DL)
static ValueLatticeElement getValueFromOverflowCondition(Value *Val, WithOverflowInst *WO, bool IsTrueDest)
static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val, const DataLayout &DL, TargetLibraryInfo *TLI)
static bool isKnownNonConstant(Value *V)
Returns true if we can statically tell that this value will never be a "useful" constant.
PointerIntPair< Value *, 1, bool > CondValue
lazy value info
static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val, ICmpInst::Predicate Pred)
Natural Loop Information
Definition: LoopInfo.cpp:1176
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define P(N)
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:55
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:59
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:52
@ SI
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static bool InBlock(const Value *V, const BasicBlock *BB)
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition: APInt.h:75
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:972
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:177
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:222
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: PassManager.h:90
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:661
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:620
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:774
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:326
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:403
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
reverse_iterator rend()
Definition: BasicBlock.h:333
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
const Instruction & back() const
Definition: BasicBlock.h:340
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:145
Value * getRHS() const
unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Value * getLHS() const
BinaryOps getOpcode() const
Definition: InstrTypes.h:391
Conditional or Unconditional Branch instruction.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1348
Value handle with callbacks on RAUW and destruction.
Definition: ValueHandle.h:383
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:428
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:668
Type * getDestTy() const
Return the destination type, as a convenience.
Definition: InstrTypes.h:675
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition: InstrTypes.h:1058
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:735
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:734
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:736
@ ICMP_EQ
equal
Definition: InstrTypes.h:732
@ ICMP_NE
not equal
Definition: InstrTypes.h:733
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:737
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:863
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:825
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:801
This is the shared class of boolean and integer constants.
Definition: Constants.h:78
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:888
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:136
static ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
Definition: Constants.cpp:1691
This class represents a range of values.
Definition: ConstantRange.h:47
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
ConstantRange umin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned minimum of a value in ...
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
ConstantRange difference(const ConstantRange &CR) const
Subtract the specified range from this range (aka relative complement of the sets).
static ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange abs(bool IntMinIsPoison=false) const
Calculate absolute value range.
static bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
ConstantRange overflowingBinaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind) const
Return a new range representing the possible values resulting from an application of the specified ov...
bool isSingleElement() const
Return true if this set contains exactly one member.
ConstantRange umax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned maximum of a value in ...
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantRange inverse() const
Return a new range that is the logical not of the current set.
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
Definition: ConstantRange.h:84
ConstantRange smin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed minimum of a value in thi...
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange smax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed maximum of a value in thi...
ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
Definition: Constants.cpp:386
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition: Constants.cpp:76
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&... Args)
Definition: DenseMap.h:235
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition: DenseMap.h:180
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Implements a dense probed hash-table based set.
Definition: DenseSet.h:271
Legacy analysis pass which computes a DominatorTree.
Definition: Dominators.h:314
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:166
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:70
const BasicBlock * getParent() const
Definition: Instruction.h:90
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:275
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:168
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
Definition: IntrinsicInst.h:54
Analysis to compute lazy value information.
Result run(Function &F, FunctionAnalysisManager &FAM)
Wrapper around LazyValueInfo.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
This pass computes, caches, and vends lazy value constraint information.
Definition: LazyValueInfo.h:31
ConstantRange getConstantRangeAtUse(const Use &U, bool UndefAllowed=true)
Return the ConstantRange constraint that is known to hold for the value at a specific use-site.
void eraseBlock(BasicBlock *BB)
Inform the analysis cache that we have erased a block.
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
Inform the analysis cache that we have threaded an edge from PredBB to OldSucc to be from PredBB to N...
Tristate
This is used to return true/false/dunno results.
Definition: LazyValueInfo.h:60
Constant * getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value is known to be a constant on the specified edge.
ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Return the ConstantRage constraint that is known to hold for the specified value on the specified edg...
Tristate getPredicateOnEdge(unsigned Pred, Value *V, Constant *C, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value comparison with a constant is known to be true or false on the ...
void clear(const Module *M)
Complete flush all previously computed values.
Tristate getPredicateAt(unsigned Pred, Value *V, Constant *C, Instruction *CxtI, bool UseBlockValue)
Determine whether the specified value comparison with a constant is known to be true or false at the ...
Constant * getConstant(Value *V, Instruction *CxtI)
Determine whether the specified value is known to be a constant at the specified instruction.
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Print the \LazyValueInfo Analysis.
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
ConstantRange getConstantRange(Value *V, Instruction *CxtI, bool UndefAllowed=true)
Return the ConstantRange constraint that is known to hold for the specified value at the specified in...
An instruction for reading from memory.
Definition: Instructions.h:177
Metadata node.
Definition: Metadata.h:950
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:398
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
PointerIntPair - This class implements a pair of a pointer and small integer.
IntType getInt() const
PointerTy getPointer() const
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:152
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: PassManager.h:310
This class represents the LLVM 'select' instruction.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:365
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:450
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
An instruction for storing to memory.
Definition: Instructions.h:301
Multiway switch.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getIntegerBitWidth() const
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:302
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:229
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
op_range operands()
Definition: User.h:242
Value * getOperand(unsigned i) const
Definition: User.h:169
This class represents lattice values for constants.
Definition: ValueLattice.h:29
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
Definition: ValueLattice.h:217
static ValueLatticeElement getNot(Constant *C)
Definition: ValueLattice.h:211
std::optional< APInt > asConstantInteger() const
Definition: ValueLattice.h:278
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
Definition: ValueLattice.h:272
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
Definition: ValueLattice.h:252
static ValueLatticeElement get(Constant *C)
Definition: ValueLattice.h:203
Constant * getNotConstant() const
Definition: ValueLattice.h:263
Constant * getConstant() const
Definition: ValueLattice.h:258
static ValueLatticeElement getOverdefined()
Definition: ValueLattice.h:234
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition: Value.cpp:780
use_iterator use_begin()
Definition: Value.h:360
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
Definition: AsmWriter.cpp:4778
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1069
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
iterator find_as(const LookupKeyT &Val)
Alternative version of find() which allows a different, and possibly less expensive,...
Definition: DenseSet.h:195
bool erase(const ValueT &V)
Definition: DenseSet.h:101
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:97
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
An efficient, type-erasing, non-owning reference to a callable.
ilist_iterator< OptionsT, !IsReverse, IsConst > getReverse() const
Get a reverse iterator to the same node.
self_iterator getIterator()
Definition: ilist_node.h:82
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition: CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
Definition: Function.cpp:992
Solution solve(PBQPRAGraph &G)
Definition: RegAllocPBQP.h:522
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
Definition: PatternMatch.h:979
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:772
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
CastClass_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:278
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:76
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
Definition: PatternMatch.h:218
constexpr double e
Definition: MathExtras.h:31
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return true if the given value is known to be non-zero when defined.
auto successors(const MachineBasicBlock *BB)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2129
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
void initializeLazyValueInfoPrinterPass(PassRegistry &)
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
FunctionPass * createLazyValueInfoPass()
createLazyValueInfoPass - This creates an instance of the LazyValueInfo pass.
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:112
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr)
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMIN
@ SPF_SMAX
Unsigned minimum.
SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2102
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:109
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:184
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976
void initializeLazyValueInfoWrapperPassPass(PassRegistry &)
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
#define N
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: PassManager.h:69
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?