LLVM 23.0.0git
LazyValueInfo.cpp
Go to the documentation of this file.
1//===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interface for lazy computation of value constraint
10// information.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/DenseSet.h"
16#include "llvm/ADT/STLExtras.h"
26#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constants.h"
29#include "llvm/IR/DataLayout.h"
30#include "llvm/IR/Dominators.h"
31#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Intrinsics.h"
35#include "llvm/IR/LLVMContext.h"
36#include "llvm/IR/Module.h"
38#include "llvm/IR/ValueHandle.h"
40#include "llvm/Support/Debug.h"
44#include <optional>
45using namespace llvm;
46using namespace PatternMatch;
47
48#define DEBUG_TYPE "lazy-value-info"
49
50// This is the number of worklist items we will process to try to discover an
51// answer for a given value.
52static const unsigned MaxProcessedPerValue = 500;
53
57 "Lazy Value Information Analysis", false, true)
61 "Lazy Value Information Analysis", false, true)
62
63static cl::opt<bool> PerPredRanges(
64 "lvi-per-pred-ranges", cl::Hidden, cl::init(false),
65 cl::desc("Enable tracking of ranges for a value in a block for"
66 "each block predecessor (default = false)"));
67
68namespace llvm {
72} // namespace llvm
73
74AnalysisKey LazyValueAnalysis::Key;
75
76/// Returns true if this lattice value represents at most one possible value.
77/// This is as precise as any lattice value can get while still representing
78/// reachable code.
79static bool hasSingleValue(const ValueLatticeElement &Val) {
80 if (Val.isConstantRange() &&
82 // Integer constants are single element ranges
83 return true;
84 if (Val.isConstant())
85 // Non integer constants
86 return true;
87 return false;
88}
89
90//===----------------------------------------------------------------------===//
91// LazyValueInfoCache Decl
92//===----------------------------------------------------------------------===//
93
94namespace {
95 /// A callback value handle updates the cache when values are erased.
96 class LazyValueInfoCache;
97 struct LVIValueHandle final : public CallbackVH {
98 LazyValueInfoCache *Parent;
99
100 LVIValueHandle(Value *V, LazyValueInfoCache *P = nullptr)
101 : CallbackVH(V), Parent(P) { }
102
103 void deleted() override;
104 void allUsesReplacedWith(Value *V) override {
105 deleted();
106 }
107 };
108} // end anonymous namespace
109
110namespace {
111using NonNullPointerSet = SmallDenseSet<AssertingVH<Value>, 2>;
112using BBLatticeElementMap =
114using PredecessorValueLatticeMap =
115 SmallDenseMap<AssertingVH<Value>, BBLatticeElementMap, 2>;
116
117/// This is the cache kept by LazyValueInfo which
118/// maintains information about queries across the clients' queries.
119class LazyValueInfoCache {
120 /// This is all of the cached information for one basic block. It contains
121 /// the per-value lattice elements, as well as a separate set for
122 /// overdefined values to reduce memory usage. Additionally pointers
123 /// dereferenced in the block are cached for nullability queries.
124 struct BlockCacheEntry {
125 SmallDenseMap<AssertingVH<Value>, ValueLatticeElement, 4> LatticeElements;
126 SmallDenseSet<AssertingVH<Value>, 4> OverDefined;
127 // std::nullopt indicates that the nonnull pointers for this basic block
128 // block have not been computed yet.
129 std::optional<NonNullPointerSet> NonNullPointers;
130 // This is an extension of the above LatticeElements, caching, for each
131 // Value, a ValueLatticeElement, for each predecessor of the BB tracked by
132 // this entry.
133 std::optional<PredecessorValueLatticeMap> PredecessorLatticeElements;
134 };
135
136 /// Cached information per basic block.
137 DenseMap<PoisoningVH<BasicBlock>, std::unique_ptr<BlockCacheEntry>>
138 BlockCache;
139 /// Set of value handles used to erase values from the cache on deletion.
140 DenseSet<LVIValueHandle, DenseMapInfo<Value *>> ValueHandles;
141
142 const BlockCacheEntry *getBlockEntry(BasicBlock *BB) const {
143 auto It = BlockCache.find_as(BB);
144 if (It == BlockCache.end())
145 return nullptr;
146 return It->second.get();
147 }
148
149 BlockCacheEntry *getOrCreateBlockEntry(BasicBlock *BB) {
150 auto It = BlockCache.find_as(BB);
151 if (It == BlockCache.end()) {
152 std::unique_ptr<BlockCacheEntry> BCE =
153 std::make_unique<BlockCacheEntry>();
154 if (PerPredRanges)
155 BCE->PredecessorLatticeElements =
156 std::make_optional<PredecessorValueLatticeMap>();
157 It = BlockCache.insert({BB, std::move(BCE)}).first;
158 }
159
160 return It->second.get();
161 }
162
163 void addValueHandle(Value *Val) {
164 auto HandleIt = ValueHandles.find_as(Val);
165 if (HandleIt == ValueHandles.end())
166 ValueHandles.insert({Val, this});
167 }
168
169public:
170 void insertResult(Value *Val, BasicBlock *BB,
171 const ValueLatticeElement &Result) {
172 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
173
174 // Insert over-defined values into their own cache to reduce memory
175 // overhead.
176 if (Result.isOverdefined())
177 Entry->OverDefined.insert(Val);
178 else
179 Entry->LatticeElements.insert({Val, Result});
180
181 addValueHandle(Val);
182 }
183
184 void insertPredecessorResults(Value *Val, BasicBlock *BB,
185 BBLatticeElementMap &PredLatticeElements) {
186 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
187
188 Entry->PredecessorLatticeElements->insert({Val, PredLatticeElements});
189
190 addValueHandle(Val);
191 }
192
193 std::optional<BBLatticeElementMap>
194 getCachedPredecessorInfo(Value *V, BasicBlock *BB) const {
195 const BlockCacheEntry *Entry = getBlockEntry(BB);
196 if (!Entry)
197 return std::nullopt;
198
199 auto LatticeIt = Entry->PredecessorLatticeElements->find_as(V);
200 if (LatticeIt == Entry->PredecessorLatticeElements->end())
201 return std::nullopt;
202
203 return LatticeIt->second;
204 }
205
206 std::optional<ValueLatticeElement> getCachedValueInfo(Value *V,
207 BasicBlock *BB) const {
208 const BlockCacheEntry *Entry = getBlockEntry(BB);
209 if (!Entry)
210 return std::nullopt;
211
212 if (Entry->OverDefined.count(V))
214
215 auto LatticeIt = Entry->LatticeElements.find_as(V);
216 if (LatticeIt == Entry->LatticeElements.end())
217 return std::nullopt;
218
219 return LatticeIt->second;
220 }
221
222 bool
223 isNonNullAtEndOfBlock(Value *V, BasicBlock *BB,
224 function_ref<NonNullPointerSet(BasicBlock *)> InitFn) {
225 BlockCacheEntry *Entry = getOrCreateBlockEntry(BB);
226 if (!Entry->NonNullPointers) {
227 Entry->NonNullPointers = InitFn(BB);
228 for (Value *V : *Entry->NonNullPointers)
229 addValueHandle(V);
230 }
231
232 return Entry->NonNullPointers->count(V);
233 }
234
235 /// clear - Empty the cache.
236 void clear() {
237 BlockCache.clear();
238 ValueHandles.clear();
239 }
240
241 /// Inform the cache that a given value has been deleted.
242 void eraseValue(Value *V);
243
244 /// This is part of the update interface to inform the cache
245 /// that a block has been deleted.
246 void eraseBlock(BasicBlock *BB);
247
248 /// Updates the cache to remove any influence an overdefined value in
249 /// OldSucc might have (unless also overdefined in NewSucc). This just
250 /// flushes elements from the cache and does not add any.
251 void threadEdgeImpl(BasicBlock *OldSucc, BasicBlock *NewSucc);
252};
253} // namespace
254
255void LazyValueInfoCache::eraseValue(Value *V) {
256 for (auto &Pair : BlockCache) {
257 Pair.second->LatticeElements.erase(V);
258 Pair.second->OverDefined.erase(V);
259 if (Pair.second->NonNullPointers)
260 Pair.second->NonNullPointers->erase(V);
261 if (PerPredRanges)
262 Pair.second->PredecessorLatticeElements->erase(V);
263 }
264
265 auto HandleIt = ValueHandles.find_as(V);
266 if (HandleIt != ValueHandles.end())
267 ValueHandles.erase(HandleIt);
268}
269
270void LVIValueHandle::deleted() {
271 // This erasure deallocates *this, so it MUST happen after we're done
272 // using any and all members of *this.
273 Parent->eraseValue(*this);
274}
275
276void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
277 // Clear all when a BB is removed.
278 if (PerPredRanges)
279 for (auto &Pair : BlockCache)
280 Pair.second->PredecessorLatticeElements->clear();
281 BlockCache.erase(BB);
282}
283
284void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
285 BasicBlock *NewSucc) {
286 // When an edge in the graph has been threaded, values that we could not
287 // determine a value for before (i.e. were marked overdefined) may be
288 // possible to solve now. We do NOT try to proactively update these values.
289 // Instead, we clear their entries from the cache, and allow lazy updating to
290 // recompute them when needed.
291
292 // The updating process is fairly simple: we need to drop cached info
293 // for all values that were marked overdefined in OldSucc, and for those same
294 // values in any successor of OldSucc (except NewSucc) in which they were
295 // also marked overdefined.
296 std::vector<BasicBlock*> worklist;
297 worklist.push_back(OldSucc);
298
299 const BlockCacheEntry *Entry = getBlockEntry(OldSucc);
300 if (!Entry || Entry->OverDefined.empty())
301 return; // Nothing to process here.
302 SmallVector<Value *, 4> ValsToClear(Entry->OverDefined.begin(),
303 Entry->OverDefined.end());
304
305 // Use a worklist to perform a depth-first search of OldSucc's successors.
306 // NOTE: We do not need a visited list since any blocks we have already
307 // visited will have had their overdefined markers cleared already, and we
308 // thus won't loop to their successors.
309 while (!worklist.empty()) {
310 BasicBlock *ToUpdate = worklist.back();
311 worklist.pop_back();
312
313 // Skip blocks only accessible through NewSucc.
314 if (ToUpdate == NewSucc) continue;
315
316 // If a value was marked overdefined in OldSucc, and is here too...
317 auto OI = BlockCache.find_as(ToUpdate);
318 if (OI == BlockCache.end() || OI->second->OverDefined.empty())
319 continue;
320 auto &ValueSet = OI->second->OverDefined;
321
322 bool changed = false;
323 for (Value *V : ValsToClear) {
324 if (!ValueSet.erase(V))
325 continue;
326
327 // If we removed anything, then we potentially need to update
328 // blocks successors too.
329 changed = true;
330 }
331
332 if (!changed) continue;
333
334 llvm::append_range(worklist, successors(ToUpdate));
335 }
336}
337
338namespace llvm {
339namespace {
340/// An assembly annotator class to print LazyValueCache information in
341/// comments.
342class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
343 LazyValueInfoImpl *LVIImpl;
344 // While analyzing which blocks we can solve values for, we need the dominator
345 // information.
346 DominatorTree &DT;
347
348public:
349 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
350 : LVIImpl(L), DT(DTree) {}
351
352 void emitBasicBlockStartAnnot(const BasicBlock *BB,
353 formatted_raw_ostream &OS) override;
354
355 void emitInstructionAnnot(const Instruction *I,
356 formatted_raw_ostream &OS) override;
357};
358} // namespace
359// The actual implementation of the lazy analysis and update.
361
362 /// Cached results from previous queries
363 LazyValueInfoCache TheCache;
364
365 /// This stack holds the state of the value solver during a query.
366 /// It basically emulates the callstack of the naive
367 /// recursive value lookup process.
369
370 /// Keeps track of which block-value pairs are in BlockValueStack.
372
373 /// Push BV onto BlockValueStack unless it's already in there.
374 /// Returns true on success.
375 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
376 if (!BlockValueSet.insert(BV).second)
377 return false; // It's already in the stack.
378
379 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
380 << BV.first->getName() << "\n");
381 BlockValueStack.push_back(BV);
382 return true;
383 }
384
385 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
386 const DataLayout &DL; ///< A mandatory DataLayout
387
388 /// Declaration of the llvm.experimental.guard() intrinsic,
389 /// if it exists in the module.
390 Function *GuardDecl;
391
392 std::optional<ValueLatticeElement> getBlockValue(Value *Val, BasicBlock *BB,
393 Instruction *CxtI);
394 std::optional<ValueLatticeElement> getEdgeValue(Value *V, BasicBlock *F,
395 BasicBlock *T,
396 Instruction *CxtI = nullptr);
397
398 // These methods process one work item and may add more. A false value
399 // returned means that the work item was not completely processed and must
400 // be revisited after going through the new items.
401 bool solveBlockValue(Value *Val, BasicBlock *BB);
402 std::optional<ValueLatticeElement> solveBlockValueImpl(Value *Val,
403 BasicBlock *BB);
404 std::optional<ValueLatticeElement> solveBlockValueNonLocal(Value *Val,
405 BasicBlock *BB);
406 std::optional<ValueLatticeElement> solveBlockValuePHINode(PHINode *PN,
407 BasicBlock *BB);
408 std::optional<ValueLatticeElement> solveBlockValueSelect(SelectInst *S,
409 BasicBlock *BB);
410 std::optional<ConstantRange> getRangeFor(Value *V, Instruction *CxtI,
411 BasicBlock *BB);
412 std::optional<ValueLatticeElement> solveBlockValueBinaryOpImpl(
414 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
415 OpFn);
416 std::optional<ValueLatticeElement>
417 solveBlockValueBinaryOp(BinaryOperator *BBI, BasicBlock *BB);
418 std::optional<ValueLatticeElement> solveBlockValueCast(CastInst *CI,
419 BasicBlock *BB);
420 std::optional<ValueLatticeElement>
421 solveBlockValueOverflowIntrinsic(WithOverflowInst *WO, BasicBlock *BB);
422 std::optional<ValueLatticeElement> solveBlockValueIntrinsic(IntrinsicInst *II,
423 BasicBlock *BB);
424 std::optional<ValueLatticeElement>
425 solveBlockValueInsertElement(InsertElementInst *IEI, BasicBlock *BB);
426 std::optional<ValueLatticeElement>
427 solveBlockValueExtractValue(ExtractValueInst *EVI, BasicBlock *BB);
428 bool isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB);
429 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
431 Instruction *BBI);
432
433 void solve();
434
435 // For the following methods, if UseBlockValue is true, the function may
436 // push additional values to the worklist and return nullopt. If
437 // UseBlockValue is false, it will never return nullopt.
438
439 std::optional<ValueLatticeElement>
440 getValueFromSimpleICmpCondition(CmpInst::Predicate Pred, Value *RHS,
441 const APInt &Offset, Instruction *CxtI,
442 bool UseBlockValue);
443
444 std::optional<ValueLatticeElement>
445 getValueFromICmpCondition(Value *Val, ICmpInst *ICI, bool isTrueDest,
446 bool UseBlockValue);
447 ValueLatticeElement getValueFromTrunc(Value *Val, TruncInst *Trunc,
448 bool IsTrueDest);
449
450 std::optional<ValueLatticeElement>
451 getValueFromCondition(Value *Val, Value *Cond, bool IsTrueDest,
452 bool UseBlockValue, unsigned Depth = 0);
453
454 std::optional<ValueLatticeElement> getEdgeValueLocal(Value *Val,
455 BasicBlock *BBFrom,
456 BasicBlock *BBTo,
457 bool UseBlockValue);
458
459public:
460 /// This is the query interface to determine the lattice value for the
461 /// specified Value* at the context instruction (if specified) or at the
462 /// start of the block.
464 Instruction *CxtI = nullptr);
465
466 /// This is the query interface to determine the lattice value for the
467 /// specified Value* at the specified instruction using only information
468 /// from assumes/guards and range metadata. Unlike getValueInBlock(), no
469 /// recursive query is performed.
471
472 /// This is the query interface to determine the lattice
473 /// value for the specified Value* that is true on the specified edge.
475 BasicBlock *ToBB,
476 Instruction *CxtI = nullptr);
477
479
480 /// Complete flush all previously computed values
481 void clear() {
482 TheCache.clear();
483 }
484
485 /// Printing the LazyValueInfo Analysis.
487 LazyValueInfoAnnotatedWriter Writer(this, DTree);
488 F.print(OS, &Writer);
489 }
490
491 /// This is part of the update interface to remove information related to this
492 /// value from the cache.
493 void forgetValue(Value *V) { TheCache.eraseValue(V); }
494
495 /// This is part of the update interface to inform the cache
496 /// that a block has been deleted.
498 TheCache.eraseBlock(BB);
499 }
500
501 /// This is the update interface to inform the cache that an edge from
502 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
503 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
504
506 Function *GuardDecl)
507 : AC(AC), DL(DL), GuardDecl(GuardDecl) {}
508};
509} // namespace llvm
510
511void LazyValueInfoImpl::solve() {
513 BlockValueStack;
514
515 unsigned processedCount = 0;
516 while (!BlockValueStack.empty()) {
517 processedCount++;
518 // Abort if we have to process too many values to get a result for this one.
519 // Because of the design of the overdefined cache currently being per-block
520 // to avoid naming-related issues (IE it wants to try to give different
521 // results for the same name in different blocks), overdefined results don't
522 // get cached globally, which in turn means we will often try to rediscover
523 // the same overdefined result again and again. Once something like
524 // PredicateInfo is used in LVI or CVP, we should be able to make the
525 // overdefined cache global, and remove this throttle.
526 if (processedCount > MaxProcessedPerValue) {
528 dbgs() << "Giving up on stack because we are getting too deep\n");
529 // Fill in the original values
530 while (!StartingStack.empty()) {
531 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
532 TheCache.insertResult(e.second, e.first,
534 StartingStack.pop_back();
535 }
536 BlockValueSet.clear();
537 BlockValueStack.clear();
538 return;
539 }
540 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
541 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
542 unsigned StackSize = BlockValueStack.size();
543 (void) StackSize;
544
545 if (solveBlockValue(e.second, e.first)) {
546 // The work item was completely processed.
547 assert(BlockValueStack.size() == StackSize &&
548 BlockValueStack.back() == e && "Nothing should have been pushed!");
549#ifndef NDEBUG
550 std::optional<ValueLatticeElement> BBLV =
551 TheCache.getCachedValueInfo(e.second, e.first);
552 assert(BBLV && "Result should be in cache!");
554 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
555 << *BBLV << "\n");
556#endif
557
558 BlockValueStack.pop_back();
559 BlockValueSet.erase(e);
560 } else {
561 // More work needs to be done before revisiting.
562 assert(BlockValueStack.size() == StackSize + 1 &&
563 "Exactly one element should have been pushed!");
564 }
565 }
566}
567
568std::optional<ValueLatticeElement>
569LazyValueInfoImpl::getBlockValue(Value *Val, BasicBlock *BB,
570 Instruction *CxtI) {
571 // If already a constant, there is nothing to compute.
572 if (Constant *VC = dyn_cast<Constant>(Val))
573 return ValueLatticeElement::get(VC);
574
575 if (std::optional<ValueLatticeElement> OptLatticeVal =
576 TheCache.getCachedValueInfo(Val, BB)) {
577 intersectAssumeOrGuardBlockValueConstantRange(Val, *OptLatticeVal, CxtI);
578 return OptLatticeVal;
579 }
580
581 // We have hit a cycle, assume overdefined.
582 if (!pushBlockValue({ BB, Val }))
584
585 // Yet to be resolved.
586 return std::nullopt;
587}
588
590 switch (BBI->getOpcode()) {
591 default:
592 break;
593 case Instruction::Call:
594 case Instruction::Invoke:
595 if (std::optional<ConstantRange> Range = cast<CallBase>(BBI)->getRange())
597 [[fallthrough]];
598 case Instruction::Load:
599 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
600 if (isa<IntegerType>(BBI->getType())) {
603 }
604 break;
605 };
606 // Nothing known - will be intersected with other facts
608}
609
610bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
611 assert(!isa<Constant>(Val) && "Value should not be constant");
612 assert(!TheCache.getCachedValueInfo(Val, BB) &&
613 "Value should not be in cache");
614
615 // Hold off inserting this value into the Cache in case we have to return
616 // false and come back later.
617 std::optional<ValueLatticeElement> Res = solveBlockValueImpl(Val, BB);
618 if (!Res)
619 // Work pushed, will revisit
620 return false;
621
622 TheCache.insertResult(Val, BB, *Res);
623 return true;
624}
625
626std::optional<ValueLatticeElement>
627LazyValueInfoImpl::solveBlockValueImpl(Value *Val, BasicBlock *BB) {
629 if (!BBI || BBI->getParent() != BB)
630 return solveBlockValueNonLocal(Val, BB);
631
632 if (PHINode *PN = dyn_cast<PHINode>(BBI))
633 return solveBlockValuePHINode(PN, BB);
634
635 if (auto *SI = dyn_cast<SelectInst>(BBI))
636 return solveBlockValueSelect(SI, BB);
637
638 // If this value is a nonnull pointer, record it's range and bailout. Note
639 // that for all other pointer typed values, we terminate the search at the
640 // definition. We could easily extend this to look through geps, bitcasts,
641 // and the like to prove non-nullness, but it's not clear that's worth it
642 // compile time wise. The context-insensitive value walk done inside
643 // isKnownNonZero gets most of the profitable cases at much less expense.
644 // This does mean that we have a sensitivity to where the defining
645 // instruction is placed, even if it could legally be hoisted much higher.
646 // That is unfortunate.
648 if (PT && isKnownNonZero(BBI, DL))
650
651 if (BBI->getType()->isIntOrIntVectorTy()) {
652 if (auto *CI = dyn_cast<CastInst>(BBI))
653 return solveBlockValueCast(CI, BB);
654
655 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
656 return solveBlockValueBinaryOp(BO, BB);
657
658 if (auto *IEI = dyn_cast<InsertElementInst>(BBI))
659 return solveBlockValueInsertElement(IEI, BB);
660
661 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
662 return solveBlockValueExtractValue(EVI, BB);
663
664 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
665 return solveBlockValueIntrinsic(II, BB);
666 }
667
668 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
669 << "' - unknown inst def found.\n");
670 return getFromRangeMetadata(BBI);
671}
672
673static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet,
674 bool IsDereferenced = true) {
675 // TODO: Use NullPointerIsDefined instead.
676 if (Ptr->getType()->getPointerAddressSpace() == 0)
677 PtrSet.insert(IsDereferenced ? getUnderlyingObject(Ptr)
678 : Ptr->stripInBoundsOffsets());
679}
680
682 Instruction *I, NonNullPointerSet &PtrSet) {
683 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
684 AddNonNullPointer(L->getPointerOperand(), PtrSet);
685 } else if (StoreInst *S = dyn_cast<StoreInst>(I)) {
686 AddNonNullPointer(S->getPointerOperand(), PtrSet);
687 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
688 if (MI->isVolatile()) return;
689
690 // FIXME: check whether it has a valuerange that excludes zero?
691 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
692 if (!Len || Len->isZero()) return;
693
694 AddNonNullPointer(MI->getRawDest(), PtrSet);
696 AddNonNullPointer(MTI->getRawSource(), PtrSet);
697 } else if (auto *CB = dyn_cast<CallBase>(I)) {
698 for (auto &U : CB->args()) {
699 if (U->getType()->isPointerTy() &&
700 CB->paramHasNonNullAttr(CB->getArgOperandNo(&U),
701 /*AllowUndefOrPoison=*/false))
702 AddNonNullPointer(U.get(), PtrSet, /*IsDereferenced=*/false);
703 }
704 }
705}
706
707bool LazyValueInfoImpl::isNonNullAtEndOfBlock(Value *Val, BasicBlock *BB) {
710 return false;
711
712 Val = Val->stripInBoundsOffsets();
713 return TheCache.isNonNullAtEndOfBlock(Val, BB, [](BasicBlock *BB) {
714 NonNullPointerSet NonNullPointers;
715 for (Instruction &I : *BB)
716 AddNonNullPointersByInstruction(&I, NonNullPointers);
717 return NonNullPointers;
718 });
719}
720
721std::optional<ValueLatticeElement>
722LazyValueInfoImpl::solveBlockValueNonLocal(Value *Val, BasicBlock *BB) {
723 ValueLatticeElement Result; // Start Undefined.
724
725 // If this is the entry block, we must be asking about an argument.
726 if (BB->isEntryBlock()) {
727 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
728 if (std::optional<ConstantRange> Range = cast<Argument>(Val)->getRange())
731 }
732
733 // Loop over all of our predecessors, merging what we know from them into
734 // result. If we encounter an unexplored predecessor, we eagerly explore it
735 // in a depth first manner. In practice, this has the effect of discovering
736 // paths we can't analyze eagerly without spending compile times analyzing
737 // other paths. This heuristic benefits from the fact that predecessors are
738 // frequently arranged such that dominating ones come first and we quickly
739 // find a path to function entry. TODO: We should consider explicitly
740 // canonicalizing to make this true rather than relying on this happy
741 // accident.
742 std::optional<BBLatticeElementMap> PredLatticeElements;
743 if (PerPredRanges)
744 PredLatticeElements = std::make_optional<BBLatticeElementMap>();
745 for (BasicBlock *Pred : predecessors(BB)) {
746 // Skip self loops.
747 if (Pred == BB)
748 continue;
749 std::optional<ValueLatticeElement> EdgeResult = getEdgeValue(Val, Pred, BB);
750 if (!EdgeResult)
751 // Explore that input, then return here
752 return std::nullopt;
753
754 Result.mergeIn(*EdgeResult);
755
756 // If we hit overdefined, exit early. The BlockVals entry is already set
757 // to overdefined.
758 if (Result.isOverdefined()) {
759 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
760 << "' - overdefined because of pred '"
761 << Pred->getName() << "' (non local).\n");
762 return Result;
763 }
764 if (PerPredRanges)
765 PredLatticeElements->insert({Pred, *EdgeResult});
766 }
767
768 if (PerPredRanges)
769 TheCache.insertPredecessorResults(Val, BB, *PredLatticeElements);
770
771 // Return the merged value, which is more precise than 'overdefined'.
772 assert(!Result.isOverdefined());
773 return Result;
774}
775
776std::optional<ValueLatticeElement>
777LazyValueInfoImpl::solveBlockValuePHINode(PHINode *PN, BasicBlock *BB) {
778 ValueLatticeElement Result; // Start Undefined.
779
780 // Loop over all of our predecessors, merging what we know from them into
781 // result. See the comment about the chosen traversal order in
782 // solveBlockValueNonLocal; the same reasoning applies here.
783 std::optional<BBLatticeElementMap> PredLatticeElements;
784 if (PerPredRanges)
785 PredLatticeElements = std::make_optional<BBLatticeElementMap>();
786 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
787 BasicBlock *PhiBB = PN->getIncomingBlock(i);
788 Value *PhiVal = PN->getIncomingValue(i);
789 // Note that we can provide PN as the context value to getEdgeValue, even
790 // though the results will be cached, because PN is the value being used as
791 // the cache key in the caller.
792 std::optional<ValueLatticeElement> EdgeResult =
793 getEdgeValue(PhiVal, PhiBB, BB, PN);
794 if (!EdgeResult)
795 // Explore that input, then return here
796 return std::nullopt;
797
798 Result.mergeIn(*EdgeResult);
799
800 // If we hit overdefined, exit early. The BlockVals entry is already set
801 // to overdefined.
802 if (Result.isOverdefined()) {
803 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
804 << "' - overdefined because of pred (local).\n");
805
806 return Result;
807 }
808
809 if (PerPredRanges)
810 PredLatticeElements->insert({PhiBB, *EdgeResult});
811 }
812
813 if (PerPredRanges)
814 TheCache.insertPredecessorResults(PN, BB, *PredLatticeElements);
815
816 // Return the merged value, which is more precise than 'overdefined'.
817 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
818 return Result;
819}
820
821// If we can determine a constraint on the value given conditions assumed by
822// the program, intersect those constraints with BBLV
823void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
824 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
825 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
826 if (!BBI)
827 return;
828
829 BasicBlock *BB = BBI->getParent();
830 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
831 if (!AssumeVH)
832 continue;
833
834 // Only check assumes in the block of the context instruction. Other
835 // assumes will have already been taken into account when the value was
836 // propagated from predecessor blocks.
837 auto *I = cast<AssumeInst>(AssumeVH);
838
839 if (I->getParent() != BB || !isValidAssumeForContext(I, BBI))
840 continue;
841
842 if (AssumeVH.Index != AssumptionCache::ExprResultIdx) {
843 if (RetainedKnowledge RK = getKnowledgeFromBundle(
844 *I, I->bundle_op_info_begin()[AssumeVH.Index])) {
845 if (RK.WasOn != Val)
846 continue;
847 switch (RK.AttrKind) {
848 case Attribute::NonNull:
850 Constant::getNullValue(RK.WasOn->getType())));
851 break;
852
853 case Attribute::Dereferenceable:
854 if (auto *CI = dyn_cast<ConstantInt>(RK.IRArgValue);
855 CI && !CI->isZero())
857 Constant::getNullValue(RK.IRArgValue->getType())));
858 break;
859
860 default:
861 break;
862 }
863 }
864 } else {
865 BBLV = BBLV.intersect(*getValueFromCondition(Val, I->getArgOperand(0),
866 /*IsTrueDest*/ true,
867 /*UseBlockValue*/ false));
868 }
869 }
870
871 // If guards are not used in the module, don't spend time looking for them
872 if (GuardDecl && !GuardDecl->use_empty() &&
873 BBI->getIterator() != BB->begin()) {
874 for (Instruction &I :
875 make_range(std::next(BBI->getIterator().getReverse()), BB->rend())) {
876 Value *Cond = nullptr;
878 BBLV = BBLV.intersect(*getValueFromCondition(Val, Cond,
879 /*IsTrueDest*/ true,
880 /*UseBlockValue*/ false));
881 }
882 }
883
884 if (BBLV.isOverdefined()) {
885 // Check whether we're checking at the terminator, and the pointer has
886 // been dereferenced in this block.
888 if (PTy && BB->getTerminator() == BBI &&
889 isNonNullAtEndOfBlock(Val, BB))
891 }
892}
893
894std::optional<ValueLatticeElement>
895LazyValueInfoImpl::solveBlockValueSelect(SelectInst *SI, BasicBlock *BB) {
896 // Recurse on our inputs if needed
897 std::optional<ValueLatticeElement> OptTrueVal =
898 getBlockValue(SI->getTrueValue(), BB, SI);
899 if (!OptTrueVal)
900 return std::nullopt;
901 ValueLatticeElement &TrueVal = *OptTrueVal;
902
903 std::optional<ValueLatticeElement> OptFalseVal =
904 getBlockValue(SI->getFalseValue(), BB, SI);
905 if (!OptFalseVal)
906 return std::nullopt;
907 ValueLatticeElement &FalseVal = *OptFalseVal;
908
909 if (TrueVal.isConstantRange() || FalseVal.isConstantRange()) {
910 const ConstantRange &TrueCR = TrueVal.asConstantRange(SI->getType());
911 const ConstantRange &FalseCR = FalseVal.asConstantRange(SI->getType());
912 Value *LHS = nullptr;
913 Value *RHS = nullptr;
914 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
915 // Is this a min specifically of our two inputs? (Avoid the risk of
916 // ValueTracking getting smarter looking back past our immediate inputs.)
918 ((LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) ||
919 (RHS == SI->getTrueValue() && LHS == SI->getFalseValue()))) {
920 ConstantRange ResultCR = [&]() {
921 switch (SPR.Flavor) {
922 default:
923 llvm_unreachable("unexpected minmax type!");
924 case SPF_SMIN: /// Signed minimum
925 return TrueCR.smin(FalseCR);
926 case SPF_UMIN: /// Unsigned minimum
927 return TrueCR.umin(FalseCR);
928 case SPF_SMAX: /// Signed maximum
929 return TrueCR.smax(FalseCR);
930 case SPF_UMAX: /// Unsigned maximum
931 return TrueCR.umax(FalseCR);
932 };
933 }();
935 ResultCR, TrueVal.isConstantRangeIncludingUndef() ||
936 FalseVal.isConstantRangeIncludingUndef());
937 }
938
939 if (SPR.Flavor == SPF_ABS) {
940 if (LHS == SI->getTrueValue())
942 TrueCR.abs(), TrueVal.isConstantRangeIncludingUndef());
943 if (LHS == SI->getFalseValue())
945 FalseCR.abs(), FalseVal.isConstantRangeIncludingUndef());
946 }
947
948 if (SPR.Flavor == SPF_NABS) {
949 ConstantRange Zero(APInt::getZero(TrueCR.getBitWidth()));
950 if (LHS == SI->getTrueValue())
952 Zero.sub(TrueCR.abs()), FalseVal.isConstantRangeIncludingUndef());
953 if (LHS == SI->getFalseValue())
955 Zero.sub(FalseCR.abs()), FalseVal.isConstantRangeIncludingUndef());
956 }
957 }
958
959 // Can we constrain the facts about the true and false values by using the
960 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
961 // TODO: We could potentially refine an overdefined true value above.
962 Value *Cond = SI->getCondition();
963 // If the value is undef, a different value may be chosen in
964 // the select condition.
966 TrueVal =
967 TrueVal.intersect(*getValueFromCondition(SI->getTrueValue(), Cond,
968 /*IsTrueDest*/ true,
969 /*UseBlockValue*/ false));
970 FalseVal =
971 FalseVal.intersect(*getValueFromCondition(SI->getFalseValue(), Cond,
972 /*IsTrueDest*/ false,
973 /*UseBlockValue*/ false));
974 }
975
976 TrueVal.mergeIn(FalseVal);
977 return TrueVal;
978}
979
980std::optional<ConstantRange>
981LazyValueInfoImpl::getRangeFor(Value *V, Instruction *CxtI, BasicBlock *BB) {
982 std::optional<ValueLatticeElement> OptVal = getBlockValue(V, BB, CxtI);
983 if (!OptVal)
984 return std::nullopt;
985 return OptVal->asConstantRange(V->getType());
986}
987
988std::optional<ValueLatticeElement>
989LazyValueInfoImpl::solveBlockValueCast(CastInst *CI, BasicBlock *BB) {
990 // Filter out casts we don't know how to reason about before attempting to
991 // recurse on our operand. This can cut a long search short if we know we're
992 // not going to be able to get any useful information anways.
993 switch (CI->getOpcode()) {
994 case Instruction::Trunc:
995 case Instruction::SExt:
996 case Instruction::ZExt:
997 break;
998 default:
999 // Unhandled instructions are overdefined.
1000 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1001 << "' - overdefined (unknown cast).\n");
1003 }
1004
1005 // Figure out the range of the LHS. If that fails, we still apply the
1006 // transfer rule on the full set since we may be able to locally infer
1007 // interesting facts.
1008 std::optional<ConstantRange> LHSRes = getRangeFor(CI->getOperand(0), CI, BB);
1009 if (!LHSRes)
1010 // More work to do before applying this transfer rule.
1011 return std::nullopt;
1012 const ConstantRange &LHSRange = *LHSRes;
1013
1014 const unsigned ResultBitWidth = CI->getType()->getScalarSizeInBits();
1015
1016 // NOTE: We're currently limited by the set of operations that ConstantRange
1017 // can evaluate symbolically. Enhancing that set will allows us to analyze
1018 // more definitions.
1019 ConstantRange Res = ConstantRange::getEmpty(ResultBitWidth);
1020 if (auto *Trunc = dyn_cast<TruncInst>(CI))
1021 Res = LHSRange.truncate(ResultBitWidth, Trunc->getNoWrapKind());
1022 else
1023 Res = LHSRange.castOp(CI->getOpcode(), ResultBitWidth);
1024
1026}
1027
1028std::optional<ValueLatticeElement>
1029LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
1030 Instruction *I, BasicBlock *BB,
1031 std::function<ConstantRange(const ConstantRange &, const ConstantRange &)>
1032 OpFn) {
1033 Value *LHS = I->getOperand(0);
1034 Value *RHS = I->getOperand(1);
1035
1036 auto ThreadBinOpOverSelect =
1037 [&](Value *X, const ConstantRange &CRX, SelectInst *Y,
1038 bool XIsLHS) -> std::optional<ValueLatticeElement> {
1039 Value *Cond = Y->getCondition();
1040 // Only handle selects with constant values.
1041 Constant *TrueC = dyn_cast<Constant>(Y->getTrueValue());
1042 if (!TrueC)
1043 return std::nullopt;
1044 Constant *FalseC = dyn_cast<Constant>(Y->getFalseValue());
1045 if (!FalseC)
1046 return std::nullopt;
1048 return std::nullopt;
1049
1050 ConstantRange TrueX =
1051 CRX.intersectWith(getValueFromCondition(X, Cond, /*CondIsTrue=*/true,
1052 /*UseBlockValue=*/false)
1053 ->asConstantRange(X->getType()));
1054 ConstantRange FalseX =
1055 CRX.intersectWith(getValueFromCondition(X, Cond, /*CondIsTrue=*/false,
1056 /*UseBlockValue=*/false)
1057 ->asConstantRange(X->getType()));
1058 ConstantRange TrueY = TrueC->toConstantRange();
1059 ConstantRange FalseY = FalseC->toConstantRange();
1060
1061 if (XIsLHS)
1063 OpFn(TrueX, TrueY).unionWith(OpFn(FalseX, FalseY)));
1065 OpFn(TrueY, TrueX).unionWith(OpFn(FalseY, FalseX)));
1066 };
1067
1068 // Figure out the ranges of the operands. If that fails, use a
1069 // conservative range, but apply the transfer rule anyways. This
1070 // lets us pick up facts from expressions like "and i32 (call i32
1071 // @foo()), 32"
1072 std::optional<ConstantRange> LHSRes = getRangeFor(LHS, I, BB);
1073 if (!LHSRes)
1074 return std::nullopt;
1075
1076 // Try to thread binop over rhs select
1077 if (auto *SI = dyn_cast<SelectInst>(RHS)) {
1078 if (auto Res = ThreadBinOpOverSelect(LHS, *LHSRes, SI, /*XIsLHS=*/true))
1079 return *Res;
1080 }
1081
1082 std::optional<ConstantRange> RHSRes = getRangeFor(RHS, I, BB);
1083 if (!RHSRes)
1084 return std::nullopt;
1085
1086 // Try to thread binop over lhs select
1087 if (auto *SI = dyn_cast<SelectInst>(LHS)) {
1088 if (auto Res = ThreadBinOpOverSelect(RHS, *RHSRes, SI, /*XIsLHS=*/false))
1089 return *Res;
1090 }
1091
1092 const ConstantRange &LHSRange = *LHSRes;
1093 const ConstantRange &RHSRange = *RHSRes;
1094
1095 std::optional<ValueLatticeElement> MergedResult =
1096 ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
1097
1098 if (!PerPredRanges)
1099 return MergedResult;
1100
1101 std::optional<BBLatticeElementMap> PredLHS =
1102 TheCache.getCachedPredecessorInfo(LHS, BB);
1103 if (!PredLHS)
1104 return MergedResult;
1105 std::optional<BBLatticeElementMap> PredRHS =
1106 TheCache.getCachedPredecessorInfo(RHS, BB);
1107 if (!PredRHS)
1108 return MergedResult;
1109
1110 const BBLatticeElementMap &LHSPredMap = *PredLHS;
1111 const BBLatticeElementMap &RHSPredMap = *PredRHS;
1112
1113 BBLatticeElementMap PredLatticeElements;
1114 ValueLatticeElement OverallPredResult;
1115 for (auto *Pred : predecessors(BB)) {
1116 auto LHSIt = LHSPredMap.find_as(Pred);
1117 if (LHSIt == LHSPredMap.end())
1118 return MergedResult;
1119 const ValueLatticeElement &LHSFromPred = LHSIt->second;
1120 std::optional<ConstantRange> LHSFromPredRes =
1121 LHSFromPred.asConstantRange(LHS->getType());
1122 if (!LHSFromPredRes)
1123 return MergedResult;
1124
1125 auto RHSIt = RHSPredMap.find_as(Pred);
1126 if (RHSIt == RHSPredMap.end())
1127 return MergedResult;
1128 const ValueLatticeElement &RHSFromPred = RHSIt->second;
1129 std::optional<ConstantRange> RHSFromPredRes =
1130 RHSFromPred.asConstantRange(RHS->getType());
1131 if (!RHSFromPredRes)
1132 return MergedResult;
1133
1134 const ConstantRange &LHSFromPredRange = *LHSFromPredRes;
1135 const ConstantRange &RHSFromPredRange = *RHSFromPredRes;
1136 std::optional<ValueLatticeElement> PredResult =
1137 ValueLatticeElement::getRange(OpFn(LHSFromPredRange, RHSFromPredRange));
1138 if (!PredResult)
1139 return MergedResult;
1140 if (PredResult->isOverdefined()) {
1141 LLVM_DEBUG(
1142 dbgs() << " pred BB '" << Pred->getName() << "' for BB '"
1143 << BB->getName()
1144 << "' overdefined. Discarding all predecessor intervals.\n");
1145 return MergedResult;
1146 }
1147 PredLatticeElements.insert({Pred, *PredResult});
1148 OverallPredResult.mergeIn(*PredResult);
1149 }
1150
1151 // If this point is reached, all predecessors for both LHS and RHS have
1152 // constant ranges previously computed. Can cache result and use the
1153 // OverallPredResult;
1154 TheCache.insertPredecessorResults(I, BB, PredLatticeElements);
1155
1156 LLVM_DEBUG(dbgs() << " Using predecessor intervals, evaluated " << *I
1157 << " to: " << OverallPredResult << ".\n");
1158
1159 if (!MergedResult)
1160 return OverallPredResult;
1161
1162 LLVM_DEBUG(dbgs() << " Intersecting intervals for " << *I << ": "
1163 << OverallPredResult << " and " << MergedResult << ".\n");
1164 return MergedResult->intersect(OverallPredResult);
1165}
1166
1167std::optional<ValueLatticeElement>
1168LazyValueInfoImpl::solveBlockValueBinaryOp(BinaryOperator *BO, BasicBlock *BB) {
1169 assert(BO->getOperand(0)->getType()->isSized() &&
1170 "all operands to binary operators are sized");
1171 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO)) {
1172 unsigned NoWrapKind = OBO->getNoWrapKind();
1173 return solveBlockValueBinaryOpImpl(
1174 BO, BB,
1175 [BO, NoWrapKind](const ConstantRange &CR1, const ConstantRange &CR2) {
1176 return CR1.overflowingBinaryOp(BO->getOpcode(), CR2, NoWrapKind);
1177 });
1178 }
1179
1180 return solveBlockValueBinaryOpImpl(
1181 BO, BB, [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1182 return CR1.binaryOp(BO->getOpcode(), CR2);
1183 });
1184}
1185
1186std::optional<ValueLatticeElement>
1187LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(WithOverflowInst *WO,
1188 BasicBlock *BB) {
1189 return solveBlockValueBinaryOpImpl(
1190 WO, BB, [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1191 return CR1.binaryOp(WO->getBinaryOp(), CR2);
1192 });
1193}
1194
1195std::optional<ValueLatticeElement>
1196LazyValueInfoImpl::solveBlockValueIntrinsic(IntrinsicInst *II, BasicBlock *BB) {
1197 ValueLatticeElement MetadataVal = getFromRangeMetadata(II);
1198 if (!ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
1199 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1200 << "' - unknown intrinsic.\n");
1201 return MetadataVal;
1202 }
1203
1205 for (Value *Op : II->args()) {
1206 std::optional<ConstantRange> Range = getRangeFor(Op, II, BB);
1207 if (!Range)
1208 return std::nullopt;
1209 OpRanges.push_back(*Range);
1210 }
1211
1213 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges))
1214 .intersect(MetadataVal);
1215}
1216
1217std::optional<ValueLatticeElement>
1218LazyValueInfoImpl::solveBlockValueInsertElement(InsertElementInst *IEI,
1219 BasicBlock *BB) {
1220 std::optional<ValueLatticeElement> OptEltVal =
1221 getBlockValue(IEI->getOperand(1), BB, IEI);
1222 if (!OptEltVal)
1223 return std::nullopt;
1224 ValueLatticeElement &Res = *OptEltVal;
1225
1226 std::optional<ValueLatticeElement> OptVecVal =
1227 getBlockValue(IEI->getOperand(0), BB, IEI);
1228 if (!OptVecVal)
1229 return std::nullopt;
1230
1231 // Bail out if the inserted element is a constant expression. Unlike other
1232 // ValueLattice types, these are not considered an implicit splat when a
1233 // vector type is used.
1234 // We could call ConstantFoldInsertElementInstruction here to handle these.
1235 if (OptEltVal->isConstant())
1237
1238 Res.mergeIn(*OptVecVal);
1239 return Res;
1240}
1241
1242std::optional<ValueLatticeElement>
1243LazyValueInfoImpl::solveBlockValueExtractValue(ExtractValueInst *EVI,
1244 BasicBlock *BB) {
1245 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1246 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1247 return solveBlockValueOverflowIntrinsic(WO, BB);
1248
1249 // Handle extractvalue of insertvalue to allow further simplification
1250 // based on replaced with.overflow intrinsics.
1252 EVI->getAggregateOperand(), EVI->getIndices(),
1253 EVI->getDataLayout()))
1254 return getBlockValue(V, BB, EVI);
1255
1256 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1257 << "' - overdefined (unknown extractvalue).\n");
1259}
1260
1262 ICmpInst::Predicate Pred) {
1263 if (LHS == Val)
1264 return true;
1265
1266 // Handle range checking idiom produced by InstCombine. We will subtract the
1267 // offset from the allowed range for RHS in this case.
1268 const APInt *C;
1269 if (match(LHS, m_AddLike(m_Specific(Val), m_APInt(C)))) {
1270 Offset = *C;
1271 return true;
1272 }
1273
1274 // Handle the symmetric case. This appears in saturation patterns like
1275 // (x == 16) ? 16 : (x + 1).
1276 if (match(Val, m_AddLike(m_Specific(LHS), m_APInt(C)))) {
1277 Offset = -*C;
1278 return true;
1279 }
1280
1281 // If (x | y) < C, then (x < C) && (y < C).
1282 if (match(LHS, m_c_Or(m_Specific(Val), m_Value())) &&
1283 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE))
1284 return true;
1285
1286 // If (x & y) > C, then (x > C) && (y > C).
1287 if (match(LHS, m_c_And(m_Specific(Val), m_Value())) &&
1288 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE))
1289 return true;
1290
1291 return false;
1292}
1293
1294/// Get value range for a "(Val + Offset) Pred RHS" condition.
1295std::optional<ValueLatticeElement>
1296LazyValueInfoImpl::getValueFromSimpleICmpCondition(CmpInst::Predicate Pred,
1297 Value *RHS,
1298 const APInt &Offset,
1299 Instruction *CxtI,
1300 bool UseBlockValue) {
1301 ConstantRange RHSRange(RHS->getType()->getScalarSizeInBits(),
1302 /*isFullSet=*/true);
1303 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1304 RHSRange = ConstantRange(CI->getValue());
1305 } else if (UseBlockValue) {
1306 std::optional<ValueLatticeElement> R =
1307 getBlockValue(RHS, CxtI->getParent(), CxtI);
1308 if (!R)
1309 return std::nullopt;
1310 RHSRange = R->asConstantRange(RHS->getType());
1311 }
1312
1313 ConstantRange TrueValues =
1315 return ValueLatticeElement::getRange(TrueValues.subtract(Offset));
1316}
1317
1318static std::optional<ConstantRange>
1320 function_ref<std::optional<ConstantRange>(const APInt &)> Fn) {
1321 bool Invert = false;
1322 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
1323 Pred = ICmpInst::getInversePredicate(Pred);
1324 Invert = true;
1325 }
1326 if (Pred == ICmpInst::ICMP_SLE) {
1327 Pred = ICmpInst::ICMP_SLT;
1328 if (RHS.isMaxSignedValue())
1329 return std::nullopt; // Could also return full/empty here, if we wanted.
1330 ++RHS;
1331 }
1332 assert(Pred == ICmpInst::ICMP_SLT && "Must be signed predicate");
1333 if (auto CR = Fn(RHS))
1334 return Invert ? CR->inverse() : CR;
1335 return std::nullopt;
1336}
1337
1338/// Get value range for a "ctpop(Val) Pred RHS" condition.
1340 Value *RHS) {
1341 unsigned BitWidth = RHS->getType()->getScalarSizeInBits();
1342
1343 auto *RHSConst = dyn_cast<ConstantInt>(RHS);
1344 if (!RHSConst)
1346
1347 ConstantRange ResValRange =
1348 ConstantRange::makeExactICmpRegion(Pred, RHSConst->getValue());
1349
1350 unsigned ResMin = ResValRange.getUnsignedMin().getLimitedValue(BitWidth);
1351 unsigned ResMax = ResValRange.getUnsignedMax().getLimitedValue(BitWidth);
1352
1353 APInt ValMin = APInt::getLowBitsSet(BitWidth, ResMin);
1354 APInt ValMax = APInt::getHighBitsSet(BitWidth, ResMax);
1356 ConstantRange::getNonEmpty(std::move(ValMin), ValMax + 1));
1357}
1358
1359std::optional<ValueLatticeElement> LazyValueInfoImpl::getValueFromICmpCondition(
1360 Value *Val, ICmpInst *ICI, bool isTrueDest, bool UseBlockValue) {
1361 Value *LHS = ICI->getOperand(0);
1362 Value *RHS = ICI->getOperand(1);
1363
1364 // Get the predicate that must hold along the considered edge.
1365 CmpInst::Predicate EdgePred =
1366 isTrueDest ? ICI->getPredicate() : ICI->getInversePredicate();
1367
1368 if (isa<Constant>(RHS)) {
1369 if (ICI->isEquality() && LHS == Val) {
1370 if (EdgePred == ICmpInst::ICMP_EQ)
1372 else if (!isa<UndefValue>(RHS))
1374 }
1375 }
1376
1377 Type *Ty = Val->getType();
1378 if (!Ty->isIntegerTy())
1380
1381 unsigned BitWidth = Ty->getScalarSizeInBits();
1382 APInt Offset(BitWidth, 0);
1383 if (matchICmpOperand(Offset, LHS, Val, EdgePred))
1384 return getValueFromSimpleICmpCondition(EdgePred, RHS, Offset, ICI,
1385 UseBlockValue);
1386
1387 CmpInst::Predicate SwappedPred = CmpInst::getSwappedPredicate(EdgePred);
1388 if (matchICmpOperand(Offset, RHS, Val, SwappedPred))
1389 return getValueFromSimpleICmpCondition(SwappedPred, LHS, Offset, ICI,
1390 UseBlockValue);
1391
1393 return getValueFromICmpCtpop(EdgePred, RHS);
1394
1395 const APInt *Mask, *C;
1396 if (match(LHS, m_And(m_Specific(Val), m_APInt(Mask))) &&
1397 match(RHS, m_APInt(C))) {
1398 // If (Val & Mask) == C then all the masked bits are known and we can
1399 // compute a value range based on that.
1400 if (EdgePred == ICmpInst::ICMP_EQ) {
1401 KnownBits Known;
1402 Known.Zero = ~*C & *Mask;
1403 Known.One = *C & *Mask;
1405 ConstantRange::fromKnownBits(Known, /*IsSigned*/ false));
1406 }
1407
1408 if (EdgePred == ICmpInst::ICMP_NE)
1411 }
1412
1413 // If (X urem Modulus) >= C, then X >= C.
1414 // If trunc X >= C, then X >= C.
1415 // TODO: An upper bound could be computed as well.
1417 m_Trunc(m_Specific(Val)))) &&
1418 match(RHS, m_APInt(C))) {
1419 // Use the icmp region so we don't have to deal with different predicates.
1420 ConstantRange CR = ConstantRange::makeExactICmpRegion(EdgePred, *C);
1421 if (!CR.isEmptySet())
1423 CR.getUnsignedMin().zext(BitWidth), APInt(BitWidth, 0)));
1424 }
1425
1426 // Recognize:
1427 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, C << ShAmtC
1428 // Preconditions: (C << ShAmtC) >> ShAmtC == C
1429 const APInt *ShAmtC;
1430 if (CmpInst::isSigned(EdgePred) &&
1431 match(LHS, m_AShr(m_Specific(Val), m_APInt(ShAmtC))) &&
1432 match(RHS, m_APInt(C))) {
1433 auto CR = getRangeViaSLT(
1434 EdgePred, *C, [&](const APInt &RHS) -> std::optional<ConstantRange> {
1435 APInt New = RHS << *ShAmtC;
1436 if ((New.ashr(*ShAmtC)) != RHS)
1437 return std::nullopt;
1439 APInt::getSignedMinValue(New.getBitWidth()), New);
1440 });
1441 if (CR)
1443 }
1444
1445 // a - b or ptrtoint(a) - ptrtoint(b) ==/!= 0 if a ==/!= b
1446 Value *X, *Y;
1447 if (ICI->isEquality() && match(Val, m_Sub(m_Value(X), m_Value(Y)))) {
1448 // Peek through ptrtoints
1451 if ((X == LHS && Y == RHS) || (X == RHS && Y == LHS)) {
1452 Constant *NullVal = Constant::getNullValue(Val->getType());
1453 if (EdgePred == ICmpInst::ICMP_EQ)
1454 return ValueLatticeElement::get(NullVal);
1455 return ValueLatticeElement::getNot(NullVal);
1456 }
1457 }
1458
1460}
1461
1462ValueLatticeElement LazyValueInfoImpl::getValueFromTrunc(Value *Val,
1463 TruncInst *Trunc,
1464 bool IsTrueDest) {
1465 assert(Trunc->getType()->isIntOrIntVectorTy(1));
1466
1467 if (Trunc->getOperand(0) != Val)
1469
1470 Type *Ty = Val->getType();
1471
1472 if (Trunc->hasNoUnsignedWrap()) {
1473 if (IsTrueDest)
1474 return ValueLatticeElement::get(ConstantInt::get(Ty, 1));
1476 }
1477
1478 if (IsTrueDest)
1481}
1482
1483// Handle conditions of the form
1484// extractvalue(op.with.overflow(%x, C), 1).
1486 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1487 // TODO: This only works with a constant RHS for now. We could also compute
1488 // the range of the RHS, but this doesn't fit into the current structure of
1489 // the edge value calculation.
1490 const APInt *C;
1491 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1493
1494 // Calculate the possible values of %x for which no overflow occurs.
1496 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1497
1498 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1499 // constrained to it's inverse (all values that might cause overflow).
1500 if (IsTrueDest)
1501 NWR = NWR.inverse();
1503}
1504
1505std::optional<ValueLatticeElement>
1506LazyValueInfoImpl::getValueFromCondition(Value *Val, Value *Cond,
1507 bool IsTrueDest, bool UseBlockValue,
1508 unsigned Depth) {
1509 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1510 return getValueFromICmpCondition(Val, ICI, IsTrueDest, UseBlockValue);
1511
1512 if (auto *Trunc = dyn_cast<TruncInst>(Cond))
1513 return getValueFromTrunc(Val, Trunc, IsTrueDest);
1514
1515 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1516 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1517 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1518 return getValueFromOverflowCondition(Val, WO, IsTrueDest);
1519
1522
1523 Value *N;
1524 if (match(Cond, m_Not(m_Value(N))))
1525 return getValueFromCondition(Val, N, !IsTrueDest, UseBlockValue, Depth);
1526
1527 Value *L, *R;
1528 bool IsAnd;
1529 if (match(Cond, m_LogicalAnd(m_Value(L), m_Value(R))))
1530 IsAnd = true;
1531 else if (match(Cond, m_LogicalOr(m_Value(L), m_Value(R))))
1532 IsAnd = false;
1533 else
1535
1536 std::optional<ValueLatticeElement> LV =
1537 getValueFromCondition(Val, L, IsTrueDest, UseBlockValue, Depth);
1538 if (!LV)
1539 return std::nullopt;
1540 std::optional<ValueLatticeElement> RV =
1541 getValueFromCondition(Val, R, IsTrueDest, UseBlockValue, Depth);
1542 if (!RV)
1543 return std::nullopt;
1544
1545 // if (L && R) -> intersect L and R
1546 // if (!(L || R)) -> intersect !L and !R
1547 // if (L || R) -> union L and R
1548 // if (!(L && R)) -> union !L and !R
1549 if (IsTrueDest ^ IsAnd) {
1550 LV->mergeIn(*RV);
1551 return *LV;
1552 }
1553
1554 return LV->intersect(*RV);
1555}
1556
1557// Return true if Usr has Op as an operand, otherwise false.
1558static bool usesOperand(User *Usr, Value *Op) {
1559 return is_contained(Usr->operands(), Op);
1560}
1561
1562// Return true if the instruction type of Val is supported by
1563// constantFoldUser(). Currently CastInst, BinaryOperator and FreezeInst only.
1564// Call this before calling constantFoldUser() to find out if it's even worth
1565// attempting to call it.
1566static bool isOperationFoldable(User *Usr) {
1567 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr) || isa<FreezeInst>(Usr);
1568}
1569
1570// Check if Usr can be simplified to an integer constant when the value of one
1571// of its operands Op is an integer constant OpConstVal. If so, return it as an
1572// lattice value range with a single element or otherwise return an overdefined
1573// lattice value.
1575 const APInt &OpConstVal,
1576 const DataLayout &DL) {
1577 assert(isOperationFoldable(Usr) && "Precondition");
1578 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1579 // Check if Usr can be simplified to a constant.
1580 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1581 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1582 if (auto *C = dyn_cast_or_null<ConstantInt>(
1583 simplifyCastInst(CI->getOpcode(), OpConst,
1584 CI->getDestTy(), DL))) {
1585 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1586 }
1587 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1588 bool Op0Match = BO->getOperand(0) == Op;
1589 bool Op1Match = BO->getOperand(1) == Op;
1590 assert((Op0Match || Op1Match) &&
1591 "Operand 0 nor Operand 1 isn't a match");
1592 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1593 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1594 if (auto *C = dyn_cast_or_null<ConstantInt>(
1595 simplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1596 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1597 }
1598 } else if (isa<FreezeInst>(Usr)) {
1599 assert(cast<FreezeInst>(Usr)->getOperand(0) == Op && "Operand 0 isn't Op");
1600 return ValueLatticeElement::getRange(ConstantRange(OpConstVal));
1601 }
1603}
1604
1605/// Compute the value of Val on the edge BBFrom -> BBTo.
1606std::optional<ValueLatticeElement>
1607LazyValueInfoImpl::getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1608 BasicBlock *BBTo, bool UseBlockValue) {
1609 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1610 // know that v != 0.
1611 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1612 // If this is a conditional branch and only one successor goes to BBTo, then
1613 // we may be able to infer something from the condition.
1614 if (BI->isConditional() &&
1615 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1616 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1617 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1618 "BBTo isn't a successor of BBFrom");
1619 Value *Condition = BI->getCondition();
1620
1621 // If V is the condition of the branch itself, then we know exactly what
1622 // it is.
1623 // NB: The condition on a `br` can't be a vector type.
1624 if (Condition == Val)
1625 return ValueLatticeElement::get(ConstantInt::get(
1626 Type::getInt1Ty(Val->getContext()), isTrueDest));
1627
1628 // If the condition of the branch is an equality comparison, we may be
1629 // able to infer the value.
1630 std::optional<ValueLatticeElement> Result =
1631 getValueFromCondition(Val, Condition, isTrueDest, UseBlockValue);
1632 if (!Result)
1633 return std::nullopt;
1634
1635 if (!Result->isOverdefined())
1636 return Result;
1637
1638 if (User *Usr = dyn_cast<User>(Val)) {
1639 assert(Result->isOverdefined() && "Result isn't overdefined");
1640 // Check with isOperationFoldable() first to avoid linearly iterating
1641 // over the operands unnecessarily which can be expensive for
1642 // instructions with many operands.
1643 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1644 const DataLayout &DL = BBTo->getDataLayout();
1645 if (usesOperand(Usr, Condition)) {
1646 // If Val has Condition as an operand and Val can be folded into a
1647 // constant with either Condition == true or Condition == false,
1648 // propagate the constant.
1649 // eg.
1650 // ; %Val is true on the edge to %then.
1651 // %Val = and i1 %Condition, true.
1652 // br %Condition, label %then, label %else
1653 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1654 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1655 } else if (isa<TruncInst, ZExtInst, SExtInst>(Usr)) {
1656 ValueLatticeElement OpLatticeVal =
1657 *getValueFromCondition(Usr->getOperand(0), Condition,
1658 isTrueDest, /*UseBlockValue*/ false);
1659
1660 if (OpLatticeVal.isConstantRange()) {
1661 const unsigned ResultBitWidth =
1662 Usr->getType()->getScalarSizeInBits();
1663 if (auto *Trunc = dyn_cast<TruncInst>(Usr))
1665 OpLatticeVal.getConstantRange().truncate(
1666 ResultBitWidth, Trunc->getNoWrapKind()));
1667
1669 OpLatticeVal.getConstantRange().castOp(
1670 cast<CastInst>(Usr)->getOpcode(), ResultBitWidth));
1671 }
1672 if (OpLatticeVal.isConstant()) {
1673 Constant *C = OpLatticeVal.getConstant();
1674 if (auto *CastC = ConstantFoldCastOperand(
1675 cast<CastInst>(Usr)->getOpcode(), C, Usr->getType(), DL))
1676 return ValueLatticeElement::get(CastC);
1677 }
1679 } else {
1680 // If one of Val's operand has an inferred value, we may be able to
1681 // infer the value of Val.
1682 // eg.
1683 // ; %Val is 94 on the edge to %then.
1684 // %Val = add i8 %Op, 1
1685 // %Condition = icmp eq i8 %Op, 93
1686 // br i1 %Condition, label %then, label %else
1687 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1688 Value *Op = Usr->getOperand(i);
1689 ValueLatticeElement OpLatticeVal = *getValueFromCondition(
1690 Op, Condition, isTrueDest, /*UseBlockValue*/ false);
1691 if (std::optional<APInt> OpConst =
1692 OpLatticeVal.asConstantInteger()) {
1693 Result = constantFoldUser(Usr, Op, *OpConst, DL);
1694 break;
1695 }
1696 }
1697 }
1698 }
1699 }
1700 if (!Result->isOverdefined())
1701 return Result;
1702 }
1703 }
1704
1705 // If the edge was formed by a switch on the value, then we may know exactly
1706 // what it is.
1707 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1708 Value *Condition = SI->getCondition();
1709 if (!isa<IntegerType>(Val->getType()))
1711 bool ValUsesConditionAndMayBeFoldable = false;
1712 if (Condition != Val) {
1713 // Check if Val has Condition as an operand.
1714 if (User *Usr = dyn_cast<User>(Val))
1715 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1716 usesOperand(Usr, Condition);
1717 if (!ValUsesConditionAndMayBeFoldable)
1719 }
1720 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1721 "Condition != Val nor Val doesn't use Condition");
1722
1723 bool DefaultCase = SI->getDefaultDest() == BBTo;
1724 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1725 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1726
1727 for (auto Case : SI->cases()) {
1728 APInt CaseValue = Case.getCaseValue()->getValue();
1729 ConstantRange EdgeVal(CaseValue);
1730 if (ValUsesConditionAndMayBeFoldable) {
1731 User *Usr = cast<User>(Val);
1732 const DataLayout &DL = BBTo->getDataLayout();
1733 ValueLatticeElement EdgeLatticeVal =
1734 constantFoldUser(Usr, Condition, CaseValue, DL);
1735 if (EdgeLatticeVal.isOverdefined())
1737 EdgeVal = EdgeLatticeVal.getConstantRange();
1738 }
1739 if (DefaultCase) {
1740 // It is possible that the default destination is the destination of
1741 // some cases. We cannot perform difference for those cases.
1742 // We know Condition != CaseValue in BBTo. In some cases we can use
1743 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1744 // only do this when f is identity (i.e. Val == Condition), but we
1745 // should be able to do this for any injective f.
1746 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1747 EdgesVals = EdgesVals.difference(EdgeVal);
1748 } else if (Case.getCaseSuccessor() == BBTo)
1749 EdgesVals = EdgesVals.unionWith(EdgeVal);
1750 }
1751 return ValueLatticeElement::getRange(std::move(EdgesVals));
1752 }
1754}
1755
1756/// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1757/// the basic block if the edge does not constrain Val.
1758std::optional<ValueLatticeElement>
1759LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1760 BasicBlock *BBTo, Instruction *CxtI) {
1761 // If already a constant, there is nothing to compute.
1762 if (Constant *VC = dyn_cast<Constant>(Val))
1763 return ValueLatticeElement::get(VC);
1764
1765 std::optional<ValueLatticeElement> LocalResult =
1766 getEdgeValueLocal(Val, BBFrom, BBTo, /*UseBlockValue*/ true);
1767 if (!LocalResult)
1768 return std::nullopt;
1769
1770 if (hasSingleValue(*LocalResult))
1771 // Can't get any more precise here
1772 return LocalResult;
1773
1774 std::optional<ValueLatticeElement> OptInBlock =
1775 getBlockValue(Val, BBFrom, BBFrom->getTerminator());
1776 if (!OptInBlock)
1777 return std::nullopt;
1778 ValueLatticeElement &InBlock = *OptInBlock;
1779
1780 // We can use the context instruction (generically the ultimate instruction
1781 // the calling pass is trying to simplify) here, even though the result of
1782 // this function is generally cached when called from the solve* functions
1783 // (and that cached result might be used with queries using a different
1784 // context instruction), because when this function is called from the solve*
1785 // functions, the context instruction is not provided. When called from
1786 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1787 // but then the result is not cached.
1788 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1789
1790 return LocalResult->intersect(InBlock);
1791}
1792
1794 Instruction *CxtI) {
1795 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1796 << BB->getName() << "'\n");
1797
1798 assert(BlockValueStack.empty() && BlockValueSet.empty());
1799 std::optional<ValueLatticeElement> OptResult = getBlockValue(V, BB, CxtI);
1800 if (!OptResult) {
1801 solve();
1802 OptResult = getBlockValue(V, BB, CxtI);
1803 assert(OptResult && "Value not available after solving");
1804 }
1805
1806 LLVM_DEBUG(dbgs() << " Result = " << *OptResult << "\n");
1807 return *OptResult;
1808}
1809
1811 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1812 << "'\n");
1813
1814 if (auto *C = dyn_cast<Constant>(V))
1816
1818 if (auto *I = dyn_cast<Instruction>(V))
1819 Result = getFromRangeMetadata(I);
1820 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1821
1822 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1823 return Result;
1824}
1825
1827getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1828 Instruction *CxtI) {
1829 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1830 << FromBB->getName() << "' to '" << ToBB->getName()
1831 << "'\n");
1832
1833 std::optional<ValueLatticeElement> Result =
1834 getEdgeValue(V, FromBB, ToBB, CxtI);
1835 while (!Result) {
1836 // As the worklist only explicitly tracks block values (but not edge values)
1837 // we may have to call solve() multiple times, as the edge value calculation
1838 // may request additional block values.
1839 solve();
1840 Result = getEdgeValue(V, FromBB, ToBB, CxtI);
1841 }
1842
1843 LLVM_DEBUG(dbgs() << " Result = " << *Result << "\n");
1844 return *Result;
1845}
1846
1848 Value *V = U.get();
1849 auto *CxtI = cast<Instruction>(U.getUser());
1850 ValueLatticeElement VL = getValueInBlock(V, CxtI->getParent(), CxtI);
1851
1852 // Check whether the only (possibly transitive) use of the value is in a
1853 // position where V can be constrained by a select or branch condition.
1854 const Use *CurrU = &U;
1855 // TODO: Increase limit?
1856 const unsigned MaxUsesToInspect = 3;
1857 for (unsigned I = 0; I < MaxUsesToInspect; ++I) {
1858 std::optional<ValueLatticeElement> CondVal;
1859 auto *CurrI = cast<Instruction>(CurrU->getUser());
1860 if (auto *SI = dyn_cast<SelectInst>(CurrI)) {
1861 // If the value is undef, a different value may be chosen in
1862 // the select condition and at use.
1863 if (!isGuaranteedNotToBeUndef(SI->getCondition(), AC))
1864 break;
1865 if (CurrU->getOperandNo() == 1)
1866 CondVal =
1867 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ true,
1868 /*UseBlockValue*/ false);
1869 else if (CurrU->getOperandNo() == 2)
1870 CondVal =
1871 *getValueFromCondition(V, SI->getCondition(), /*IsTrueDest*/ false,
1872 /*UseBlockValue*/ false);
1873 } else if (auto *PHI = dyn_cast<PHINode>(CurrI)) {
1874 // TODO: Use non-local query?
1875 CondVal = *getEdgeValueLocal(V, PHI->getIncomingBlock(*CurrU),
1876 PHI->getParent(), /*UseBlockValue*/ false);
1877 }
1878 if (CondVal)
1879 VL = VL.intersect(*CondVal);
1880
1881 // Only follow one-use chain, to allow direct intersection of conditions.
1882 // If there are multiple uses, we would have to intersect with the union of
1883 // all conditions at different uses.
1884 // Stop walking if we hit a non-speculatable instruction. Even if the
1885 // result is only used under a specific condition, executing the
1886 // instruction itself may cause side effects or UB already.
1887 // This also disallows looking through phi nodes: If the phi node is part
1888 // of a cycle, we might end up reasoning about values from different cycle
1889 // iterations (PR60629).
1890 if (!CurrI->hasOneUse() ||
1892 CurrI, /*IgnoreUBImplyingAttrs=*/false))
1893 break;
1894 CurrU = &*CurrI->use_begin();
1895 }
1896 return VL;
1897}
1898
1900 BasicBlock *NewSucc) {
1901 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1902}
1903
1904//===----------------------------------------------------------------------===//
1905// LazyValueInfo Impl
1906//===----------------------------------------------------------------------===//
1907
1909 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1910
1911 if (auto *Impl = Info.getImpl())
1912 Impl->clear();
1913
1914 // Fully lazy.
1915 return false;
1916}
1917
1923
1925
1926/// This lazily constructs the LazyValueInfoImpl.
1927LazyValueInfoImpl &LazyValueInfo::getOrCreateImpl(const Module *M) {
1928 if (!PImpl) {
1929 assert(M && "getCache() called with a null Module");
1930 const DataLayout &DL = M->getDataLayout();
1931 Function *GuardDecl =
1932 Intrinsic::getDeclarationIfExists(M, Intrinsic::experimental_guard);
1933 PImpl = new LazyValueInfoImpl(AC, DL, GuardDecl);
1934 }
1935 return *PImpl;
1936}
1937
1938LazyValueInfoImpl *LazyValueInfo::getImpl() { return PImpl; }
1939
1941
1943 // If the cache was allocated, free it.
1944 if (auto *Impl = getImpl()) {
1945 delete &*Impl;
1946 PImpl = nullptr;
1947 }
1948}
1949
1951 FunctionAnalysisManager::Invalidator &Inv) {
1952 // We need to invalidate if we have either failed to preserve this analyses
1953 // result directly or if any of its dependencies have been invalidated.
1954 auto PAC = PA.getChecker<LazyValueAnalysis>();
1955 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()))
1956 return true;
1957
1958 return false;
1959}
1960
1961void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1962
1965 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1966
1967 return LazyValueInfo(&AC, &F.getDataLayout());
1968}
1969
1970/// Returns true if we can statically tell that this value will never be a
1971/// "useful" constant. In practice, this means we've got something like an
1972/// alloca or a malloc call for which a comparison against a constant can
1973/// only be guarding dead code. Note that we are potentially giving up some
1974/// precision in dead code (a constant result) in favour of avoiding a
1975/// expensive search for a easily answered common query.
1976static bool isKnownNonConstant(Value *V) {
1977 V = V->stripPointerCasts();
1978 // The return val of alloc cannot be a Constant.
1979 if (isa<AllocaInst>(V))
1980 return true;
1981 return false;
1982}
1983
1985 // Bail out early if V is known not to be a Constant.
1986 if (isKnownNonConstant(V))
1987 return nullptr;
1988
1989 BasicBlock *BB = CxtI->getParent();
1990 ValueLatticeElement Result =
1991 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
1992
1993 if (Result.isConstant())
1994 return Result.getConstant();
1995 if (Result.isConstantRange()) {
1996 const ConstantRange &CR = Result.getConstantRange();
1997 if (const APInt *SingleVal = CR.getSingleElement())
1998 return ConstantInt::get(V->getType(), *SingleVal);
1999 }
2000 return nullptr;
2001}
2002
2004 bool UndefAllowed) {
2005 BasicBlock *BB = CxtI->getParent();
2006 ValueLatticeElement Result =
2007 getOrCreateImpl(BB->getModule()).getValueInBlock(V, BB, CxtI);
2008 return Result.asConstantRange(V->getType(), UndefAllowed);
2009}
2010
2012 bool UndefAllowed) {
2013 auto *Inst = cast<Instruction>(U.getUser());
2014 ValueLatticeElement Result =
2015 getOrCreateImpl(Inst->getModule()).getValueAtUse(U);
2016 return Result.asConstantRange(U->getType(), UndefAllowed);
2017}
2018
2019/// Determine whether the specified value is known to be a
2020/// constant on the specified edge. Return null if not.
2022 BasicBlock *ToBB,
2023 Instruction *CxtI) {
2024 Module *M = FromBB->getModule();
2025 ValueLatticeElement Result =
2026 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
2027
2028 if (Result.isConstant())
2029 return Result.getConstant();
2030 if (Result.isConstantRange()) {
2031 const ConstantRange &CR = Result.getConstantRange();
2032 if (const APInt *SingleVal = CR.getSingleElement())
2033 return ConstantInt::get(V->getType(), *SingleVal);
2034 }
2035 return nullptr;
2036}
2037
2039 BasicBlock *FromBB,
2040 BasicBlock *ToBB,
2041 Instruction *CxtI) {
2042 Module *M = FromBB->getModule();
2043 ValueLatticeElement Result =
2044 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
2045 // TODO: Should undef be allowed here?
2046 return Result.asConstantRange(V->getType(), /*UndefAllowed*/ true);
2047}
2048
2050 const ValueLatticeElement &Val,
2051 const DataLayout &DL) {
2052 // If we know the value is a constant, evaluate the conditional.
2053 if (Val.isConstant())
2054 return ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL);
2055
2056 Type *ResTy = CmpInst::makeCmpResultType(C->getType());
2057 if (Val.isConstantRange()) {
2058 const ConstantRange &CR = Val.getConstantRange();
2059 ConstantRange RHS = C->toConstantRange();
2060 if (CR.icmp(Pred, RHS))
2061 return ConstantInt::getTrue(ResTy);
2062 if (CR.icmp(CmpInst::getInversePredicate(Pred), RHS))
2063 return ConstantInt::getFalse(ResTy);
2064 return nullptr;
2065 }
2066
2067 if (Val.isNotConstant()) {
2068 // If this is an equality comparison, we can try to fold it knowing that
2069 // "V != C1".
2070 if (Pred == ICmpInst::ICMP_EQ) {
2071 // !C1 == C -> false iff C1 == C.
2074 if (Res && Res->isNullValue())
2075 return ConstantInt::getFalse(ResTy);
2076 } else if (Pred == ICmpInst::ICMP_NE) {
2077 // !C1 != C -> true iff C1 == C.
2080 if (Res && Res->isNullValue())
2081 return ConstantInt::getTrue(ResTy);
2082 }
2083 return nullptr;
2084 }
2085
2086 return nullptr;
2087}
2088
2089/// Determine whether the specified value comparison with a constant is known to
2090/// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
2092 Constant *C, BasicBlock *FromBB,
2093 BasicBlock *ToBB,
2094 Instruction *CxtI) {
2095 Module *M = FromBB->getModule();
2096 ValueLatticeElement Result =
2097 getOrCreateImpl(M).getValueOnEdge(V, FromBB, ToBB, CxtI);
2098
2099 return getPredicateResult(Pred, C, Result, M->getDataLayout());
2100}
2101
2103 Constant *C, Instruction *CxtI,
2104 bool UseBlockValue) {
2105 // Is or is not NonNull are common predicates being queried. If
2106 // isKnownNonZero can tell us the result of the predicate, we can
2107 // return it quickly. But this is only a fastpath, and falling
2108 // through would still be correct.
2109 Module *M = CxtI->getModule();
2110 const DataLayout &DL = M->getDataLayout();
2111 if (V->getType()->isPointerTy() && C->isNullValue() &&
2112 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
2113 Type *ResTy = CmpInst::makeCmpResultType(C->getType());
2114 if (Pred == ICmpInst::ICMP_EQ)
2115 return ConstantInt::getFalse(ResTy);
2116 else if (Pred == ICmpInst::ICMP_NE)
2117 return ConstantInt::getTrue(ResTy);
2118 }
2119
2120 auto &Impl = getOrCreateImpl(M);
2121 ValueLatticeElement Result =
2122 UseBlockValue ? Impl.getValueInBlock(V, CxtI->getParent(), CxtI)
2123 : Impl.getValueAt(V, CxtI);
2124 Constant *Ret = getPredicateResult(Pred, C, Result, DL);
2125 if (Ret)
2126 return Ret;
2127
2128 // Note: The following bit of code is somewhat distinct from the rest of LVI;
2129 // LVI as a whole tries to compute a lattice value which is conservatively
2130 // correct at a given location. In this case, we have a predicate which we
2131 // weren't able to prove about the merged result, and we're pushing that
2132 // predicate back along each incoming edge to see if we can prove it
2133 // separately for each input. As a motivating example, consider:
2134 // bb1:
2135 // %v1 = ... ; constantrange<1, 5>
2136 // br label %merge
2137 // bb2:
2138 // %v2 = ... ; constantrange<10, 20>
2139 // br label %merge
2140 // merge:
2141 // %phi = phi [%v1, %v2] ; constantrange<1,20>
2142 // %pred = icmp eq i32 %phi, 8
2143 // We can't tell from the lattice value for '%phi' that '%pred' is false
2144 // along each path, but by checking the predicate over each input separately,
2145 // we can.
2146 // We limit the search to one step backwards from the current BB and value.
2147 // We could consider extending this to search further backwards through the
2148 // CFG and/or value graph, but there are non-obvious compile time vs quality
2149 // tradeoffs.
2150 BasicBlock *BB = CxtI->getParent();
2151
2152 // Function entry or an unreachable block. Bail to avoid confusing
2153 // analysis below.
2154 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
2155 if (PI == PE)
2156 return nullptr;
2157
2158 // If V is a PHI node in the same block as the context, we need to ask
2159 // questions about the predicate as applied to the incoming value along
2160 // each edge. This is useful for eliminating cases where the predicate is
2161 // known along all incoming edges.
2162 if (auto *PHI = dyn_cast<PHINode>(V))
2163 if (PHI->getParent() == BB) {
2164 Constant *Baseline = nullptr;
2165 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
2166 Value *Incoming = PHI->getIncomingValue(i);
2167 BasicBlock *PredBB = PHI->getIncomingBlock(i);
2168 // Note that PredBB may be BB itself.
2169 Constant *Result =
2170 getPredicateOnEdge(Pred, Incoming, C, PredBB, BB, CxtI);
2171
2172 // Keep going as long as we've seen a consistent known result for
2173 // all inputs.
2174 Baseline = (i == 0) ? Result /* First iteration */
2175 : (Baseline == Result ? Baseline
2176 : nullptr); /* All others */
2177 if (!Baseline)
2178 break;
2179 }
2180 if (Baseline)
2181 return Baseline;
2182 }
2183
2184 // For a comparison where the V is outside this block, it's possible
2185 // that we've branched on it before. Look to see if the value is known
2186 // on all incoming edges.
2187 if (!isa<Instruction>(V) || cast<Instruction>(V)->getParent() != BB) {
2188 // For predecessor edge, determine if the comparison is true or false
2189 // on that edge. If they're all true or all false, we can conclude
2190 // the value of the comparison in this block.
2191 Constant *Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
2192 if (Baseline) {
2193 // Check that all remaining incoming values match the first one.
2194 while (++PI != PE) {
2195 Constant *Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
2196 if (Ret != Baseline)
2197 break;
2198 }
2199 // If we terminated early, then one of the values didn't match.
2200 if (PI == PE) {
2201 return Baseline;
2202 }
2203 }
2204 }
2205
2206 return nullptr;
2207}
2208
2210 Value *RHS, Instruction *CxtI,
2211 bool UseBlockValue) {
2212 if (auto *C = dyn_cast<Constant>(RHS))
2213 return getPredicateAt(Pred, LHS, C, CxtI, UseBlockValue);
2214 if (auto *C = dyn_cast<Constant>(LHS))
2215 return getPredicateAt(CmpInst::getSwappedPredicate(Pred), RHS, C, CxtI,
2216 UseBlockValue);
2217
2218 // Got two non-Constant values. Try to determine the comparison results based
2219 // on the block values of the two operands, e.g. because they have
2220 // non-overlapping ranges.
2221 if (UseBlockValue) {
2222 Module *M = CxtI->getModule();
2224 getOrCreateImpl(M).getValueInBlock(LHS, CxtI->getParent(), CxtI);
2225 if (L.isOverdefined())
2226 return nullptr;
2227
2229 getOrCreateImpl(M).getValueInBlock(RHS, CxtI->getParent(), CxtI);
2230 Type *Ty = CmpInst::makeCmpResultType(LHS->getType());
2231 return L.getCompare(Pred, Ty, R, M->getDataLayout());
2232 }
2233 return nullptr;
2234}
2235
2237 BasicBlock *NewSucc) {
2238 if (auto *Impl = getImpl())
2239 Impl->threadEdge(PredBB, OldSucc, NewSucc);
2240}
2241
2243 if (auto *Impl = getImpl())
2244 Impl->forgetValue(V);
2245}
2246
2248 if (auto *Impl = getImpl())
2249 Impl->eraseBlock(BB);
2250}
2251
2253 if (auto *Impl = getImpl())
2254 Impl->clear();
2255}
2256
2258 if (auto *Impl = getImpl())
2259 Impl->printLVI(F, DTree, OS);
2260}
2261
2262// Print the LVI for the function arguments at the start of each basic block.
2263void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
2264 const BasicBlock *BB, formatted_raw_ostream &OS) {
2265 // Find if there are latticevalues defined for arguments of the function.
2266 auto *F = BB->getParent();
2267 for (const auto &Arg : F->args()) {
2268 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2269 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
2270 if (Result.isUnknown())
2271 continue;
2272 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
2273 }
2274}
2275
2276// This function prints the LVI analysis for the instruction I at the beginning
2277// of various basic blocks. It relies on calculated values that are stored in
2278// the LazyValueInfoCache, and in the absence of cached values, recalculate the
2279// LazyValueInfo for `I`, and print that info.
2280void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
2281 const Instruction *I, formatted_raw_ostream &OS) {
2282
2283 auto *ParentBB = I->getParent();
2284 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
2285 // We can generate (solve) LVI values only for blocks that are dominated by
2286 // the I's parent. However, to avoid generating LVI for all dominating blocks,
2287 // that contain redundant/uninteresting information, we print LVI for
2288 // blocks that may use this LVI information (such as immediate successor
2289 // blocks, and blocks that contain uses of `I`).
2290 auto printResult = [&](const BasicBlock *BB) {
2291 if (!BlocksContainingLVI.insert(BB).second)
2292 return;
2293 ValueLatticeElement Result = LVIImpl->getValueInBlock(
2294 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
2295 OS << "; LatticeVal for: '" << *I << "' in BB: '";
2296 BB->printAsOperand(OS, false);
2297 OS << "' is: " << Result << "\n";
2298 };
2299
2300 printResult(ParentBB);
2301 // Print the LVI analysis results for the immediate successor blocks, that
2302 // are dominated by `ParentBB`.
2303 for (const auto *BBSucc : successors(ParentBB))
2304 if (DT.dominates(ParentBB, BBSucc))
2305 printResult(BBSucc);
2306
2307 // Print LVI in blocks where `I` is used.
2308 for (const auto *U : I->users())
2309 if (auto *UseI = dyn_cast<Instruction>(U))
2310 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
2311 printResult(UseI->getParent());
2312
2313}
2314
2317 OS << "LVI for function '" << F.getName() << "':\n";
2318 auto &LVI = AM.getResult<LazyValueAnalysis>(F);
2319 auto &DTree = AM.getResult<DominatorTreeAnalysis>(F);
2320 LVI.printLVI(F, DTree, OS);
2321 return PreservedAnalyses::all();
2322}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
Rewrite undef for PHI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const Function * getParent(const Value *V)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseSet and SmallDenseSet classes.
IRTranslator LLVM IR MI
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
Module.h This file contains the declarations for the Module class.
static bool isOperationFoldable(User *Usr)
static void AddNonNullPointer(Value *Ptr, NonNullPointerSet &PtrSet, bool IsDereferenced=true)
static void AddNonNullPointersByInstruction(Instruction *I, NonNullPointerSet &PtrSet)
static std::optional< ConstantRange > getRangeViaSLT(CmpInst::Predicate Pred, APInt RHS, function_ref< std::optional< ConstantRange >(const APInt &)> Fn)
static const unsigned MaxProcessedPerValue
static ValueLatticeElement getValueFromICmpCtpop(ICmpInst::Predicate Pred, Value *RHS)
Get value range for a "ctpop(Val) Pred RHS" condition.
static bool usesOperand(User *Usr, Value *Op)
static ValueLatticeElement constantFoldUser(User *Usr, Value *Op, const APInt &OpConstVal, const DataLayout &DL)
static ValueLatticeElement getFromRangeMetadata(Instruction *BBI)
lazy value Lazy Value Information static true cl::opt< bool > PerPredRanges("lvi-per-pred-ranges", cl::Hidden, cl::init(false), cl::desc("Enable tracking of ranges for a value in a block for" "each block predecessor (default = false)"))
static Constant * getPredicateResult(CmpInst::Predicate Pred, Constant *C, const ValueLatticeElement &Val, const DataLayout &DL)
static ValueLatticeElement getValueFromOverflowCondition(Value *Val, WithOverflowInst *WO, bool IsTrueDest)
static bool isKnownNonConstant(Value *V)
Returns true if we can statically tell that this value will never be a "useful" constant.
static bool matchICmpOperand(APInt &Offset, Value *LHS, Value *Val, ICmpInst::Predicate Pred)
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
#define P(N)
FunctionAnalysisManager FAM
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition PassSupport.h:42
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
Definition PassSupport.h:44
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
Definition PassSupport.h:39
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool InBlock(const Value *V, const BasicBlock *BB)
#define LLVM_DEBUG(...)
Definition Debug.h:114
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition VPlanSLP.cpp:247
Value * RHS
Value * LHS
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1023
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
Definition APInt.h:220
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:307
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:297
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This templated class represents "all analyses that operate over <aparticular IR unit>" (e....
Definition Analysis.h:50
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
void setPreservesAll()
Set by analyses that do not transform their input at all.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator begin()
Instruction iterator methods.
Definition BasicBlock.h:470
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
LLVM_ABI bool isEntryBlock() const
Return true if this is the entry block of the containing function.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this basic block belongs to.
reverse_iterator rend()
Definition BasicBlock.h:488
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Value handle with callbacks on RAUW and destruction.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
Type * getDestTy() const
Return the destination type, as a convenience.
Definition InstrTypes.h:617
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
LLVM_ABI ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
static LLVM_ABI ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned)
Initialize a range based on a known bits constraint.
LLVM_ABI ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
LLVM_ABI ConstantRange umin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned minimum of a value in ...
LLVM_ABI APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
static LLVM_ABI ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
LLVM_ABI ConstantRange abs(bool IntMinIsPoison=false) const
Calculate absolute value range.
static LLVM_ABI bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
LLVM_ABI ConstantRange overflowingBinaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind) const
Return a new range representing the possible values resulting from an application of the specified ov...
bool isSingleElement() const
Return true if this set contains exactly one member.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI ConstantRange umax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an unsigned maximum of a value in ...
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
static LLVM_ABI ConstantRange makeMaskNotEqualRange(const APInt &Mask, const APInt &C)
Initialize a range containing all values X that satisfy (X & Mask) / != C.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
LLVM_ABI ConstantRange smin(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed minimum of a value in thi...
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
LLVM_ABI ConstantRange smax(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a signed maximum of a value in thi...
LLVM_ABI ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
static LLVM_ABI ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
LLVM_ABI ConstantRange toConstantRange() const
Convert constant to an approximate constant range.
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
Definition DenseMap.h:191
iterator end()
Definition DenseMap.h:81
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition DenseMap.h:241
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction extracts a struct member or array element value from an aggregate value.
ArrayRef< unsigned > getIndices() const
unsigned getNumIndices() const
idx_iterator idx_begin() const
FunctionPass class - This class is used to implement most global optimizations.
Definition Pass.h:314
FunctionPass(char &pid)
Definition Pass.h:316
Module * getParent()
Get the module that this global value is contained inside of...
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
This instruction inserts a single (scalar) element into a VectorType value.
LLVM_ABI const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI const DataLayout & getDataLayout() const
Get the data layout of the module this instruction belongs to.
A wrapper class for inspecting calls to intrinsic functions.
Analysis to compute lazy value information.
Result run(Function &F, FunctionAnalysisManager &FAM)
ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* that is true on t...
ValueLatticeElement getValueAt(Value *V, Instruction *CxtI)
This is the query interface to determine the lattice value for the specified Value* at the specified ...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
This is the update interface to inform the cache that an edge from PredBB to OldSucc has been threade...
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Printing the LazyValueInfo Analysis.
void forgetValue(Value *V)
This is part of the update interface to remove information related to this value from the cache.
void eraseBlock(BasicBlock *BB)
This is part of the update interface to inform the cache that a block has been deleted.
void clear()
Complete flush all previously computed values.
LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL, Function *GuardDecl)
ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB, Instruction *CxtI=nullptr)
This is the query interface to determine the lattice value for the specified Value* at the context in...
ValueLatticeElement getValueAtUse(const Use &U)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Wrapper around LazyValueInfo.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
This pass computes, caches, and vends lazy value constraint information.
void eraseBlock(BasicBlock *BB)
Inform the analysis cache that we have erased a block.
ConstantRange getConstantRangeAtUse(const Use &U, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the value at a specific use-site.
ConstantRange getConstantRange(Value *V, Instruction *CxtI, bool UndefAllowed)
Return the ConstantRange constraint that is known to hold for the specified value at the specified in...
void threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc, BasicBlock *NewSucc)
Inform the analysis cache that we have threaded an edge from PredBB to OldSucc to be from PredBB to N...
Constant * getPredicateOnEdge(CmpInst::Predicate Pred, Value *V, Constant *C, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value comparison with a constant is known to be true or false on the ...
Constant * getConstantOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Determine whether the specified value is known to be a constant on the specified edge.
ConstantRange getConstantRangeOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB, Instruction *CxtI=nullptr)
Return the ConstantRage constraint that is known to hold for the specified value on the specified edg...
Constant * getConstant(Value *V, Instruction *CxtI)
Determine whether the specified value is known to be a constant at the specified instruction.
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS)
Print the \LazyValueInfo Analysis.
void forgetValue(Value *V)
Remove information related to this value from the cache.
void clear()
Complete flush all previously computed values.
Constant * getPredicateAt(CmpInst::Predicate Pred, Value *V, Constant *C, Instruction *CxtI, bool UseBlockValue)
Determine whether the specified value comparison with a constant is known to be true or false at the ...
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
Handle invalidation events in the new pass manager.
An instruction for reading from memory.
Metadata node.
Definition Metadata.h:1078
This is the common base class for memset/memcpy/memmove.
This class wraps the llvm.memcpy/memmove intrinsics.
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition Analysis.h:275
This class represents the LLVM 'select' instruction.
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition DenseSet.h:291
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class represents a truncation of integer types.
unsigned getNoWrapKind() const
Returns the no-wrap kind of the operation.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition Type.h:311
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
static LLVM_ABI IntegerType * getInt1Ty(LLVMContext &C)
Definition Type.cpp:293
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
This class represents lattice values for constants.
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
static ValueLatticeElement getNot(Constant *C)
ConstantRange asConstantRange(unsigned BW, bool UndefAllowed=false) const
std::optional< APInt > asConstantInteger() const
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
static ValueLatticeElement get(Constant *C)
Constant * getNotConstant() const
LLVM_ABI ValueLatticeElement intersect(const ValueLatticeElement &Other) const
Combine two sets of facts about the same value into a single set of facts.
Constant * getConstant() const
bool mergeIn(const ValueLatticeElement &RHS, MergeOptions Opts=MergeOptions())
Updates this object to approximate both this object and RHS.
static ValueLatticeElement getOverdefined()
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
LLVM_ABI const Value * stripInBoundsOffsets(function_ref< void(const Value *)> Func=[](const Value *) {}) const
Strip off pointer casts and inbounds GEPs.
Definition Value.cpp:819
use_iterator use_begin()
Definition Value.h:364
LLVM_ABI void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
Represents an op.with.overflow intrinsic.
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
iterator find_as(const LookupKeyT &Val)
Alternative version of find() which allows a different, and possibly less expensive,...
Definition DenseSet.h:190
bool erase(const ValueT &V)
Definition DenseSet.h:100
formatted_raw_ostream - A raw_ostream that wraps another one and keeps track of line and column posit...
An efficient, type-erasing, non-owning reference to a callable.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Entry
Definition COFF.h:862
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrToIntSameSize_match< OpTy > m_PtrToIntSameSize(const DataLayout &DL, const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
bool match(Val *V, const Pattern &P)
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
This namespace contains all of the command line option processing machinery.
Definition CommandLine.h:52
constexpr double e
@ User
could "use" a pointer
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
LLVM_ABI bool isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be undef, but may be poison.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
LLVM_ABI FunctionPass * createLazyValueInfoPass()
createLazyValueInfoPass - This creates an instance of the LazyValueInfo pass.
LLVM_ABI RetainedKnowledge getKnowledgeFromBundle(AssumeInst &Assume, const CallBase::BundleOpInfo &BOI)
This extracts the Knowledge from an element of an operand bundle.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
constexpr unsigned MaxAnalysisRecursionDepth
@ SPF_ABS
Floating point maxnum.
@ SPF_NABS
Absolute value.
@ SPF_UMIN
Signed minimum.
@ SPF_UMAX
Signed maximum.
@ SPF_SMAX
Unsigned minimum.
LLVM_ABI SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind and providing the out param...
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
DWARFExpression::Operation Op
bool isSafeToSpeculativelyExecuteWithVariableReplaced(const Instruction *I, bool IgnoreUBImplyingAttrs=true)
Don't use information from its non-constant operands.
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
Definition CFG.h:105
static bool hasSingleValue(const ValueLatticeElement &Val)
constexpr unsigned BitWidth
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
AnalysisManager< Function > FunctionAnalysisManager
Convenience typedef for the Function analysis manager.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
#define N
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition Analysis.h:29
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?