LLVM 23.0.0git
SCCPSolver.cpp
Go to the documentation of this file.
1//===- SCCPSolver.cpp - SCCP Utility --------------------------- *- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// \file
10// This file implements the Sparse Conditional Constant Propagation (SCCP)
11// utility.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/SetVector.h"
24#include "llvm/IR/IRBuilder.h"
25#include "llvm/IR/InstVisitor.h"
27#include "llvm/IR/NoFolder.h"
30#include "llvm/Support/Debug.h"
34#include <cassert>
35#include <utility>
36#include <vector>
37
38using namespace llvm;
39using namespace PatternMatch;
40
41#define DEBUG_TYPE "sccp"
42
43// The maximum number of range extensions allowed for operations requiring
44// widening.
45static const unsigned MaxNumRangeExtensions = 10;
46
47/// Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions.
52
53namespace llvm {
54
56 return LV.isConstant() ||
58}
59
63
65 Constant *Const = getConstantOrNull(V);
66 if (!Const)
67 return false;
68 // Replacing `musttail` instructions with constant breaks `musttail` invariant
69 // unless the call itself can be removed.
70 // Calls with "clang.arc.attachedcall" implicitly use the return value and
71 // those uses cannot be updated with a constant.
73 if (CB && ((CB->isMustTailCall() && !wouldInstructionBeTriviallyDead(CB)) ||
76
77 // Don't zap returns of the callee
78 if (F)
80
81 LLVM_DEBUG(dbgs() << " Can\'t treat the result of call " << *CB
82 << " as a constant\n");
83 return false;
84 }
85
86 LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n');
87
88 // Replaces all of the uses of a variable with uses of the constant.
89 V->replaceAllUsesWith(Const);
90 return true;
91}
92
93/// Helper for getting ranges from \p Solver. Instructions inserted during
94/// simplification are unavailable in the solver, so we return a full range for
95/// them.
97 const SmallPtrSetImpl<Value *> &InsertedValues) {
98 if (auto *Const = dyn_cast<Constant>(Op))
99 return Const->toConstantRange();
100 if (InsertedValues.contains(Op)) {
101 unsigned Bitwidth = Op->getType()->getScalarSizeInBits();
102 return ConstantRange::getFull(Bitwidth);
103 }
104 return Solver.getLatticeValueFor(Op).asConstantRange(Op->getType(),
105 /*UndefAllowed=*/false);
106}
107
108/// Try to use \p Inst's value range from \p Solver to infer the NUW flag.
109static bool refineInstruction(SCCPSolver &Solver,
110 const SmallPtrSetImpl<Value *> &InsertedValues,
111 Instruction &Inst) {
112 bool Changed = false;
113 auto GetRange = [&Solver, &InsertedValues](Value *Op) {
114 return getRange(Op, Solver, InsertedValues);
115 };
116
118 if (Inst.hasNoSignedWrap() && Inst.hasNoUnsignedWrap())
119 return false;
120
121 auto RangeA = GetRange(Inst.getOperand(0));
122 auto RangeB = GetRange(Inst.getOperand(1));
123 if (!Inst.hasNoUnsignedWrap()) {
125 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
127 if (NUWRange.contains(RangeA)) {
129 Changed = true;
130 }
131 }
132 if (!Inst.hasNoSignedWrap()) {
134 Instruction::BinaryOps(Inst.getOpcode()), RangeB,
136 if (NSWRange.contains(RangeA)) {
137 Inst.setHasNoSignedWrap();
138 Changed = true;
139 }
140 }
141 } else if (isa<PossiblyNonNegInst>(Inst) && !Inst.hasNonNeg()) {
142 auto Range = GetRange(Inst.getOperand(0));
143 if (Range.isAllNonNegative()) {
144 Inst.setNonNeg();
145 Changed = true;
146 }
147 } else if (TruncInst *TI = dyn_cast<TruncInst>(&Inst)) {
148 if (TI->hasNoSignedWrap() && TI->hasNoUnsignedWrap())
149 return false;
150
151 auto Range = GetRange(Inst.getOperand(0));
152 uint64_t DestWidth = TI->getDestTy()->getScalarSizeInBits();
153 if (!TI->hasNoUnsignedWrap()) {
154 if (Range.getActiveBits() <= DestWidth) {
155 TI->setHasNoUnsignedWrap(true);
156 Changed = true;
157 }
158 }
159 if (!TI->hasNoSignedWrap()) {
160 if (Range.getMinSignedBits() <= DestWidth) {
161 TI->setHasNoSignedWrap(true);
162 Changed = true;
163 }
164 }
165 } else if (auto *GEP = dyn_cast<GetElementPtrInst>(&Inst)) {
166 if (GEP->hasNoUnsignedWrap() || !GEP->hasNoUnsignedSignedWrap())
167 return false;
168
169 if (all_of(GEP->indices(),
170 [&](Value *V) { return GetRange(V).isAllNonNegative(); })) {
171 GEP->setNoWrapFlags(GEP->getNoWrapFlags() |
173 Changed = true;
174 }
175 }
176
177 return Changed;
178}
179
180/// Try to replace signed instructions with their unsigned equivalent.
181static bool replaceSignedInst(SCCPSolver &Solver,
182 SmallPtrSetImpl<Value *> &InsertedValues,
183 Instruction &Inst) {
184 // Determine if a signed value is known to be >= 0.
185 auto isNonNegative = [&Solver, &InsertedValues](Value *V) {
186 return getRange(V, Solver, InsertedValues).isAllNonNegative();
187 };
188
189 Instruction *NewInst = nullptr;
190 switch (Inst.getOpcode()) {
191 case Instruction::SIToFP:
192 case Instruction::SExt: {
193 // If the source value is not negative, this is a zext/uitofp.
194 Value *Op0 = Inst.getOperand(0);
195 if (!isNonNegative(Op0))
196 return false;
197 NewInst = CastInst::Create(Inst.getOpcode() == Instruction::SExt
198 ? Instruction::ZExt
199 : Instruction::UIToFP,
200 Op0, Inst.getType(), "", Inst.getIterator());
201 NewInst->setNonNeg();
202 break;
203 }
204 case Instruction::AShr: {
205 // If the shifted value is not negative, this is a logical shift right.
206 Value *Op0 = Inst.getOperand(0);
207 if (!isNonNegative(Op0))
208 return false;
209 NewInst = BinaryOperator::CreateLShr(Op0, Inst.getOperand(1), "", Inst.getIterator());
210 NewInst->setIsExact(Inst.isExact());
211 break;
212 }
213 case Instruction::SDiv:
214 case Instruction::SRem: {
215 // If both operands are not negative, this is the same as udiv/urem.
216 Value *Op0 = Inst.getOperand(0), *Op1 = Inst.getOperand(1);
217 if (!isNonNegative(Op0) || !isNonNegative(Op1))
218 return false;
219 auto NewOpcode = Inst.getOpcode() == Instruction::SDiv ? Instruction::UDiv
220 : Instruction::URem;
221 NewInst = BinaryOperator::Create(NewOpcode, Op0, Op1, "", Inst.getIterator());
222 if (Inst.getOpcode() == Instruction::SDiv)
223 NewInst->setIsExact(Inst.isExact());
224 break;
225 }
226 default:
227 return false;
228 }
229
230 // Wire up the new instruction and update state.
231 assert(NewInst && "Expected replacement instruction");
232 NewInst->takeName(&Inst);
233 InsertedValues.insert(NewInst);
234 Inst.replaceAllUsesWith(NewInst);
235 NewInst->setDebugLoc(Inst.getDebugLoc());
236 Solver.removeLatticeValueFor(&Inst);
237 Inst.eraseFromParent();
238 return true;
239}
240
241/// Try to use \p Inst's value range from \p Solver to simplify it.
243 SmallPtrSetImpl<Value *> &InsertedValues,
244 Instruction &Inst) {
245 auto GetRange = [&Solver, &InsertedValues](Value *Op) {
246 return getRange(Op, Solver, InsertedValues);
247 };
248
249 Value *X;
250 const APInt *RHSC;
251 // Remove masking operations.
252 if (match(&Inst, m_And(m_Value(X), m_LowBitMask(RHSC)))) {
253 ConstantRange LRange = GetRange(X);
254 if (LRange.getUnsignedMax().ule(*RHSC))
255 return X;
256 }
257
258 // Check if we can simplify [us]cmp(X, Y) to X - Y.
259 if (auto *Cmp = dyn_cast<CmpIntrinsic>(&Inst)) {
260 Value *LHS = Cmp->getOperand(0);
261 Value *RHS = Cmp->getOperand(1);
262 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
263 // Bail out on 1-bit comparisons.
264 if (BitWidth == 1)
265 return nullptr;
266 ConstantRange LRange = GetRange(LHS);
267 if (LRange.isSizeLargerThan(3))
268 return nullptr;
269 ConstantRange RRange = GetRange(RHS);
270 if (RRange.isSizeLargerThan(3))
271 return nullptr;
272 ConstantRange RHSLower = RRange.sub(APInt(BitWidth, 1));
273 ConstantRange RHSUpper = RRange.add(APInt(BitWidth, 1));
275 Cmp->isSigned() ? CmpInst::ICMP_SLE : CmpInst::ICMP_ULE;
276 if (!RHSLower.icmp(Pred, LRange) || !LRange.icmp(Pred, RHSUpper))
277 return nullptr;
278
279 IRBuilder<NoFolder> Builder(&Inst);
280 Value *Sub = Builder.CreateSub(LHS, RHS, Inst.getName(), /*HasNUW=*/false,
281 /*HasNSW=*/Cmp->isSigned());
282 InsertedValues.insert(Sub);
283 if (Sub->getType() != Inst.getType()) {
284 Sub = Builder.CreateSExtOrTrunc(Sub, Inst.getType());
285 InsertedValues.insert(Sub);
286 }
287 return Sub;
288 }
289
290 // Relax range checks.
291 if (auto *ICmp = dyn_cast<ICmpInst>(&Inst)) {
292 Value *X;
293 auto MatchTwoInstructionExactRangeCheck =
294 [&]() -> std::optional<ConstantRange> {
295 const APInt *RHSC;
296 if (!match(ICmp->getOperand(1), m_APInt(RHSC)))
297 return std::nullopt;
298
299 Value *LHS = ICmp->getOperand(0);
300 ICmpInst::Predicate Pred = ICmp->getPredicate();
301 const APInt *Offset;
303 return ConstantRange::makeExactICmpRegion(Pred, *RHSC).sub(*Offset);
304 // Match icmp eq/ne X & NegPow2, C
305 if (ICmp->isEquality()) {
306 const APInt *Mask;
307 if (match(LHS, m_OneUse(m_And(m_Value(X), m_NegatedPower2(Mask)))) &&
308 RHSC->countr_zero() >= Mask->countr_zero()) {
309 ConstantRange CR(*RHSC, *RHSC - *Mask);
310 return Pred == ICmpInst::ICMP_EQ ? CR : CR.inverse();
311 }
312 }
313 return std::nullopt;
314 };
315
316 if (auto CR = MatchTwoInstructionExactRangeCheck()) {
317 ConstantRange LRange = GetRange(X);
318 // Early exit if we know nothing about X.
319 if (LRange.isFullSet())
320 return nullptr;
321 auto ConvertCRToICmp =
322 [&](const std::optional<ConstantRange> &NewCR) -> Value * {
324 APInt RHS;
325 // Check if we can represent NewCR as an icmp predicate.
326 if (NewCR && NewCR->getEquivalentICmp(Pred, RHS)) {
327 IRBuilder<NoFolder> Builder(&Inst);
328 Value *NewICmp =
329 Builder.CreateICmp(Pred, X, ConstantInt::get(X->getType(), RHS));
330 InsertedValues.insert(NewICmp);
331 return NewICmp;
332 }
333 return nullptr;
334 };
335 // We are allowed to refine the comparison to either true or false for out
336 // of range inputs.
337 // Here we refine the comparison to false, and check if we can narrow the
338 // range check to a simpler test.
339 if (auto *V = ConvertCRToICmp(CR->exactIntersectWith(LRange)))
340 return V;
341 // Here we refine the comparison to true, i.e. we relax the range check.
342 if (auto *V = ConvertCRToICmp(CR->exactUnionWith(LRange.inverse())))
343 return V;
344 }
345 }
346
347 return nullptr;
348}
349
351 SmallPtrSetImpl<Value *> &InsertedValues,
352 Statistic &InstRemovedStat,
353 Statistic &InstReplacedStat) {
354 bool MadeChanges = false;
355 for (Instruction &Inst : make_early_inc_range(BB)) {
356 if (Inst.getType()->isVoidTy())
357 continue;
358 if (tryToReplaceWithConstant(&Inst)) {
360 Inst.eraseFromParent();
361
362 MadeChanges = true;
363 ++InstRemovedStat;
364 } else if (replaceSignedInst(*this, InsertedValues, Inst)) {
365 MadeChanges = true;
366 ++InstReplacedStat;
367 } else if (refineInstruction(*this, InsertedValues, Inst)) {
368 MadeChanges = true;
369 } else if (auto *V = simplifyInstruction(*this, InsertedValues, Inst)) {
370 Inst.replaceAllUsesWith(V);
371 Inst.eraseFromParent();
372 ++InstRemovedStat;
373 MadeChanges = true;
374 }
375 }
376 return MadeChanges;
377}
378
380 BasicBlock *&NewUnreachableBB) const {
381 SmallPtrSet<BasicBlock *, 8> FeasibleSuccessors;
382 bool HasNonFeasibleEdges = false;
383 for (BasicBlock *Succ : successors(BB)) {
384 if (isEdgeFeasible(BB, Succ))
385 FeasibleSuccessors.insert(Succ);
386 else
387 HasNonFeasibleEdges = true;
388 }
389
390 // All edges feasible, nothing to do.
391 if (!HasNonFeasibleEdges)
392 return false;
393
394 // SCCP can only determine non-feasible edges for br, switch and indirectbr.
395 Instruction *TI = BB->getTerminator();
397 "Terminator must be a br, switch or indirectbr");
398
399 if (FeasibleSuccessors.size() == 0) {
400 // Branch on undef/poison, replace with unreachable.
403 for (BasicBlock *Succ : successors(BB)) {
404 Succ->removePredecessor(BB);
405 if (SeenSuccs.insert(Succ).second)
406 Updates.push_back({DominatorTree::Delete, BB, Succ});
407 }
408 TI->eraseFromParent();
409 new UnreachableInst(BB->getContext(), BB);
410 DTU.applyUpdatesPermissive(Updates);
411 } else if (FeasibleSuccessors.size() == 1) {
412 // Replace with an unconditional branch to the only feasible successor.
413 BasicBlock *OnlyFeasibleSuccessor = *FeasibleSuccessors.begin();
415 bool HaveSeenOnlyFeasibleSuccessor = false;
416 for (BasicBlock *Succ : successors(BB)) {
417 if (Succ == OnlyFeasibleSuccessor && !HaveSeenOnlyFeasibleSuccessor) {
418 // Don't remove the edge to the only feasible successor the first time
419 // we see it. We still do need to remove any multi-edges to it though.
420 HaveSeenOnlyFeasibleSuccessor = true;
421 continue;
422 }
423
424 Succ->removePredecessor(BB);
425 Updates.push_back({DominatorTree::Delete, BB, Succ});
426 }
427
428 Instruction *BI = UncondBrInst::Create(OnlyFeasibleSuccessor, BB);
429 BI->setDebugLoc(TI->getDebugLoc());
430 TI->eraseFromParent();
431 DTU.applyUpdatesPermissive(Updates);
432 } else if (FeasibleSuccessors.size() > 1) {
435
436 // If the default destination is unfeasible it will never be taken. Replace
437 // it with a new block with a single Unreachable instruction.
438 BasicBlock *DefaultDest = SI->getDefaultDest();
439 if (!FeasibleSuccessors.contains(DefaultDest)) {
440 if (!NewUnreachableBB) {
441 NewUnreachableBB =
442 BasicBlock::Create(DefaultDest->getContext(), "default.unreachable",
443 DefaultDest->getParent(), DefaultDest);
444 auto *UI =
445 new UnreachableInst(DefaultDest->getContext(), NewUnreachableBB);
446 UI->setDebugLoc(DebugLoc::getTemporary());
447 }
448
449 DefaultDest->removePredecessor(BB);
450 SI->setDefaultDest(NewUnreachableBB);
451 Updates.push_back({DominatorTree::Delete, BB, DefaultDest});
452 Updates.push_back({DominatorTree::Insert, BB, NewUnreachableBB});
453 }
454
455 for (auto CI = SI->case_begin(); CI != SI->case_end();) {
456 if (FeasibleSuccessors.contains(CI->getCaseSuccessor())) {
457 ++CI;
458 continue;
459 }
460
461 BasicBlock *Succ = CI->getCaseSuccessor();
462 Succ->removePredecessor(BB);
463 Updates.push_back({DominatorTree::Delete, BB, Succ});
464 SI.removeCase(CI);
465 // Don't increment CI, as we removed a case.
466 }
467
468 DTU.applyUpdatesPermissive(Updates);
469 } else {
470 llvm_unreachable("Must have at least one feasible successor");
471 }
472 return true;
473}
474
475static void inferAttribute(Function *F, unsigned AttrIndex,
476 const ValueLatticeElement &Val) {
477 // If there is a known constant range for the value, add range attribute.
478 if (Val.isConstantRange() && !Val.getConstantRange().isSingleElement()) {
479 // Do not add range attribute if the value may include undef.
481 return;
482
483 // Take the intersection of the existing attribute and the inferred range.
484 Attribute OldAttr = F->getAttributeAtIndex(AttrIndex, Attribute::Range);
486 if (OldAttr.isValid())
487 CR = CR.intersectWith(OldAttr.getRange());
488 F->addAttributeAtIndex(
489 AttrIndex, Attribute::get(F->getContext(), Attribute::Range, CR));
490 return;
491 }
492 // Infer nonnull attribute.
493 if (Val.isNotConstant() && Val.getNotConstant()->getType()->isPointerTy() &&
494 Val.getNotConstant()->isNullValue() &&
495 !F->hasAttributeAtIndex(AttrIndex, Attribute::NonNull)) {
496 F->addAttributeAtIndex(AttrIndex,
497 Attribute::get(F->getContext(), Attribute::NonNull));
498 }
499}
500
502 for (const auto &[F, ReturnValue] : getTrackedRetVals())
503 inferAttribute(F, AttributeList::ReturnIndex, ReturnValue);
504}
505
508 if (!isBlockExecutable(&F->front()))
509 continue;
510 for (Argument &A : F->args())
511 if (!A.getType()->isStructTy())
512 inferAttribute(F, AttributeList::FirstArgIndex + A.getArgNo(),
514 }
515}
516
517/// Helper class for SCCPSolver. This implements the instruction visitor and
518/// holds all the state.
519class SCCPInstVisitor : public InstVisitor<SCCPInstVisitor> {
520 const DataLayout &DL;
521 std::function<const TargetLibraryInfo &(Function &)> GetTLI;
522 /// Basic blocks that are executable (but may not have been visited yet).
523 SmallPtrSet<BasicBlock *, 8> BBExecutable;
524 /// Basic blocks that are executable and have been visited at least once.
527 ValueState; // The state each value is in.
528
529 /// StructValueState - This maintains ValueState for values that have
530 /// StructType, for example for formal arguments, calls, insertelement, etc.
532
533 /// GlobalValue - If we are tracking any values for the contents of a global
534 /// variable, we keep a mapping from the constant accessor to the element of
535 /// the global, to the currently known value. If the value becomes
536 /// overdefined, it's entry is simply removed from this map.
538
539 /// TrackedRetVals - If we are tracking arguments into and the return
540 /// value out of a function, it will have an entry in this map, indicating
541 /// what the known return value for the function is.
543
544 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions
545 /// that return multiple values.
547 TrackedMultipleRetVals;
548
549 /// The set of values whose lattice has been invalidated.
550 /// Populated by resetLatticeValueFor(), cleared after resolving undefs.
551 DenseSet<Value *> Invalidated;
552
553 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is
554 /// represented here for efficient lookup.
555 SmallPtrSet<Function *, 16> MRVFunctionsTracked;
556
557 /// A list of functions whose return cannot be modified.
558 SmallPtrSet<Function *, 16> MustPreserveReturnsInFunctions;
559
560 /// TrackingIncomingArguments - This is the set of functions for whose
561 /// arguments we make optimistic assumptions about and try to prove as
562 /// constants.
563 SmallPtrSet<Function *, 16> TrackingIncomingArguments;
564
565 /// Worklist of instructions to re-visit. This only includes instructions
566 /// in blocks that have already been visited at least once.
568
569 /// Current instruction while visiting a block for the first time, used to
570 /// avoid unnecessary instruction worklist insertions. Null if an instruction
571 /// is visited outside a whole-block visitation.
572 Instruction *CurI = nullptr;
573
574 // The BasicBlock work list
576
577 /// KnownFeasibleEdges - Entries in this set are edges which have already had
578 /// PHI nodes retriggered.
579 using Edge = std::pair<BasicBlock *, BasicBlock *>;
580 DenseSet<Edge> KnownFeasibleEdges;
581
583
585
586 LLVMContext &Ctx;
587
588 BumpPtrAllocator PredicateInfoAllocator;
589
590private:
591 ConstantInt *getConstantInt(const ValueLatticeElement &IV, Type *Ty) const {
593 }
594
595 /// Push instruction \p I to the worklist.
596 void pushToWorkList(Instruction *I);
597
598 /// Push users of value \p V to the worklist.
599 void pushUsersToWorkList(Value *V);
600
601 /// Like pushUsersToWorkList(), but also prints a debug message with the
602 /// updated value.
603 void pushUsersToWorkListMsg(ValueLatticeElement &IV, Value *V);
604
605 // markConstant - Make a value be marked as "constant". If the value
606 // is not already a constant, add it to the instruction work list so that
607 // the users of the instruction are updated later.
608 bool markConstant(ValueLatticeElement &IV, Value *V, Constant *C,
609 bool MayIncludeUndef = false);
610
611 bool markConstant(Value *V, Constant *C) {
612 assert(!V->getType()->isStructTy() && "structs should use mergeInValue");
613 return markConstant(ValueState[V], V, C);
614 }
615
616 bool markNotConstant(ValueLatticeElement &IV, Value *V, Constant *C);
617
618 bool markNotNull(ValueLatticeElement &IV, Value *V) {
619 return markNotConstant(IV, V, Constant::getNullValue(V->getType()));
620 }
621
622 /// markConstantRange - Mark the object as constant range with \p CR. If the
623 /// object is not a constant range with the range \p CR, add it to the
624 /// instruction work list so that the users of the instruction are updated
625 /// later.
626 bool markConstantRange(ValueLatticeElement &IV, Value *V,
627 const ConstantRange &CR);
628
629 // markOverdefined - Make a value be marked as "overdefined". If the
630 // value is not already overdefined, add it to the overdefined instruction
631 // work list so that the users of the instruction are updated later.
632 bool markOverdefined(ValueLatticeElement &IV, Value *V);
633
634 /// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV
635 /// changes.
636 bool mergeInValue(ValueLatticeElement &IV, Value *V,
637 const ValueLatticeElement &MergeWithV,
639 /*MayIncludeUndef=*/false, /*CheckWiden=*/false});
640
641 /// getValueState - Return the ValueLatticeElement object that corresponds to
642 /// the value. This function handles the case when the value hasn't been seen
643 /// yet by properly seeding constants etc.
644 ValueLatticeElement &getValueState(Value *V) {
645 assert(!V->getType()->isStructTy() && "Should use getStructValueState");
646
647 auto I = ValueState.try_emplace(V);
648 ValueLatticeElement &LV = I.first->second;
649
650 if (!I.second)
651 return LV; // Common case, already in the map.
652
653 if (auto *C = dyn_cast<Constant>(V))
654 LV.markConstant(C); // Constants are constant
655
656 // All others are unknown by default.
657 return LV;
658 }
659
660 /// getStructValueState - Return the ValueLatticeElement object that
661 /// corresponds to the value/field pair. This function handles the case when
662 /// the value hasn't been seen yet by properly seeding constants etc.
663 ValueLatticeElement &getStructValueState(Value *V, unsigned i) {
664 assert(V->getType()->isStructTy() && "Should use getValueState");
665 assert(i < cast<StructType>(V->getType())->getNumElements() &&
666 "Invalid element #");
667
668 auto I = StructValueState.insert(
669 std::make_pair(std::make_pair(V, i), ValueLatticeElement()));
670 ValueLatticeElement &LV = I.first->second;
671
672 if (!I.second)
673 return LV; // Common case, already in the map.
674
675 if (auto *C = dyn_cast<Constant>(V)) {
676 Constant *Elt = C->getAggregateElement(i);
677
678 if (!Elt)
679 LV.markOverdefined(); // Unknown sort of constant.
680 else
681 LV.markConstant(Elt); // Constants are constant.
682 }
683
684 // All others are underdefined by default.
685 return LV;
686 }
687
688 /// Traverse the use-def chain of \p Call, marking itself and its users as
689 /// "unknown" on the way.
690 void invalidate(CallBase *Call) {
692 ToInvalidate.push_back(Call);
693
694 while (!ToInvalidate.empty()) {
695 Instruction *Inst = ToInvalidate.pop_back_val();
696
697 if (!Invalidated.insert(Inst).second)
698 continue;
699
700 if (!BBExecutable.count(Inst->getParent()))
701 continue;
702
703 Value *V = nullptr;
704 // For return instructions we need to invalidate the tracked returns map.
705 // Anything else has its lattice in the value map.
706 if (auto *RetInst = dyn_cast<ReturnInst>(Inst)) {
707 Function *F = RetInst->getParent()->getParent();
708 if (auto It = TrackedRetVals.find(F); It != TrackedRetVals.end()) {
709 It->second = ValueLatticeElement();
710 V = F;
711 } else if (MRVFunctionsTracked.count(F)) {
712 auto *STy = cast<StructType>(F->getReturnType());
713 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I)
714 TrackedMultipleRetVals[{F, I}] = ValueLatticeElement();
715 V = F;
716 }
717 } else if (auto *STy = dyn_cast<StructType>(Inst->getType())) {
718 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
719 if (auto It = StructValueState.find({Inst, I});
720 It != StructValueState.end()) {
721 It->second = ValueLatticeElement();
722 V = Inst;
723 }
724 }
725 } else if (auto It = ValueState.find(Inst); It != ValueState.end()) {
726 It->second = ValueLatticeElement();
727 V = Inst;
728 }
729
730 if (V) {
731 LLVM_DEBUG(dbgs() << "Invalidated lattice for " << *V << "\n");
732
733 for (User *U : V->users())
734 if (auto *UI = dyn_cast<Instruction>(U))
735 ToInvalidate.push_back(UI);
736
737 auto It = AdditionalUsers.find(V);
738 if (It != AdditionalUsers.end())
739 for (User *U : It->second)
740 if (auto *UI = dyn_cast<Instruction>(U))
741 ToInvalidate.push_back(UI);
742 }
743 }
744 }
745
746 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB
747 /// work list if it is not already executable.
748 bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest);
749
750 // getFeasibleSuccessors - Return a vector of booleans to indicate which
751 // successors are reachable from a given terminator instruction.
752 void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs);
753
754 // Add U as additional user of V.
755 void addAdditionalUser(Value *V, User *U) { AdditionalUsers[V].insert(U); }
756
757 void handlePredicate(Instruction *I, Value *CopyOf, const PredicateBase *PI);
758 void handleCallOverdefined(CallBase &CB);
759 void handleCallResult(CallBase &CB);
760 void handleCallArguments(CallBase &CB);
761 void handleExtractOfWithOverflow(ExtractValueInst &EVI,
762 const WithOverflowInst *WO, unsigned Idx);
763 bool isInstFullyOverDefined(Instruction &Inst);
764
765private:
766 friend class InstVisitor<SCCPInstVisitor>;
767
768 // visit implementations - Something changed in this instruction. Either an
769 // operand made a transition, or the instruction is newly executable. Change
770 // the value type of I to reflect these changes if appropriate.
771 void visitPHINode(PHINode &I);
772
773 // Terminators
774
775 void visitReturnInst(ReturnInst &I);
776 void visitTerminator(Instruction &TI);
777
778 void visitCastInst(CastInst &I);
779 void visitSelectInst(SelectInst &I);
780 void visitUnaryOperator(Instruction &I);
781 void visitFreezeInst(FreezeInst &I);
782 void visitBinaryOperator(Instruction &I);
783 void visitCmpInst(CmpInst &I);
784 void visitExtractValueInst(ExtractValueInst &EVI);
785 void visitInsertValueInst(InsertValueInst &IVI);
786
787 void visitCatchSwitchInst(CatchSwitchInst &CPI) {
788 markOverdefined(&CPI);
789 visitTerminator(CPI);
790 }
791
792 // Instructions that cannot be folded away.
793
794 void visitStoreInst(StoreInst &I);
795 void visitLoadInst(LoadInst &I);
796 void visitGetElementPtrInst(GetElementPtrInst &I);
797 void visitAllocaInst(AllocaInst &AI);
798
799 void visitInvokeInst(InvokeInst &II) {
800 visitCallBase(II);
801 visitTerminator(II);
802 }
803
804 void visitCallBrInst(CallBrInst &CBI) {
805 visitCallBase(CBI);
806 visitTerminator(CBI);
807 }
808
809 void visitCallBase(CallBase &CB);
810 void visitResumeInst(ResumeInst &I) { /*returns void*/
811 }
812 void visitUnreachableInst(UnreachableInst &I) { /*returns void*/
813 }
814 void visitFenceInst(FenceInst &I) { /*returns void*/
815 }
816
817 void visitInstruction(Instruction &I);
818
819public:
821 FnPredicateInfo.insert({&F, std::make_unique<PredicateInfo>(
822 F, DT, AC, PredicateInfoAllocator)});
823 }
824
826 auto It = FnPredicateInfo.find(&F);
827 if (It == FnPredicateInfo.end())
828 return;
829
830 for (BasicBlock &BB : F) {
831 for (Instruction &Inst : llvm::make_early_inc_range(BB)) {
832 if (auto *BC = dyn_cast<BitCastInst>(&Inst)) {
833 if (BC->getType() == BC->getOperand(0)->getType()) {
834 if (It->second->getPredicateInfoFor(&Inst)) {
835 Value *Op = BC->getOperand(0);
836 Inst.replaceAllUsesWith(Op);
837 Inst.eraseFromParent();
838 }
839 }
840 }
841 }
842 }
843 }
844
845 void visitCallInst(CallInst &I) { visitCallBase(I); }
846
848
850 auto It = FnPredicateInfo.find(I->getParent()->getParent());
851 if (It == FnPredicateInfo.end())
852 return nullptr;
853 return It->second->getPredicateInfoFor(I);
854 }
855
857 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
858 LLVMContext &Ctx)
859 : DL(DL), GetTLI(GetTLI), Ctx(Ctx) {}
860
862 // We only track the contents of scalar globals.
863 if (GV->getValueType()->isSingleValueType()) {
864 ValueLatticeElement &IV = TrackedGlobals[GV];
865 IV.markConstant(GV->getInitializer());
866 }
867 }
868
870 // Add an entry, F -> undef.
871 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
872 MRVFunctionsTracked.insert(F);
873 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
874 TrackedMultipleRetVals.try_emplace(std::make_pair(F, i));
875 } else if (!F->getReturnType()->isVoidTy())
876 TrackedRetVals.try_emplace(F);
877 }
878
880 MustPreserveReturnsInFunctions.insert(F);
881 }
882
884 return MustPreserveReturnsInFunctions.count(F);
885 }
886
888 TrackingIncomingArguments.insert(F);
889 }
890
892 return TrackingIncomingArguments.count(F);
893 }
894
896 return TrackingIncomingArguments;
897 }
898
899 void solve();
900
902
904
906 return BBExecutable.count(BB);
907 }
908
909 bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const;
910
911 std::vector<ValueLatticeElement> getStructLatticeValueFor(Value *V) const {
912 std::vector<ValueLatticeElement> StructValues;
913 auto *STy = dyn_cast<StructType>(V->getType());
914 assert(STy && "getStructLatticeValueFor() can be called only on structs");
915 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
916 auto I = StructValueState.find(std::make_pair(V, i));
917 assert(I != StructValueState.end() && "Value not in valuemap!");
918 StructValues.push_back(I->second);
919 }
920 return StructValues;
921 }
922
923 void removeLatticeValueFor(Value *V) { ValueState.erase(V); }
924
925 /// Invalidate the Lattice Value of \p Call and its users after specializing
926 /// the call. Then recompute it.
928 // Calls to void returning functions do not need invalidation.
929 Function *F = Call->getCalledFunction();
930 (void)F;
931 assert(!F->getReturnType()->isVoidTy() &&
932 (TrackedRetVals.count(F) || MRVFunctionsTracked.count(F)) &&
933 "All non void specializations should be tracked");
934 invalidate(Call);
935 handleCallResult(*Call);
936 }
937
939 assert(!V->getType()->isStructTy() &&
940 "Should use getStructLatticeValueFor");
942 ValueState.find(V);
943 assert(I != ValueState.end() &&
944 "V not found in ValueState nor Paramstate map!");
945 return I->second;
946 }
947
949 return TrackedRetVals;
950 }
951
954 return TrackedGlobals;
955 }
956
958 return MRVFunctionsTracked;
959 }
960
962 if (auto *STy = dyn_cast<StructType>(V->getType()))
963 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
964 markOverdefined(getStructValueState(V, i), V);
965 else
966 markOverdefined(ValueState[V], V);
967 }
968
970 if (A->getType()->isIntOrIntVectorTy()) {
971 if (std::optional<ConstantRange> Range = A->getRange())
973 }
974 if (A->hasNonNullAttr())
976 // Assume nothing about the incoming arguments without attributes.
978 }
979
981 if (A->getType()->isStructTy())
982 return (void)markOverdefined(A);
983 mergeInValue(ValueState[A], A, getArgAttributeVL(A));
984 }
985
987
988 Constant *getConstant(const ValueLatticeElement &LV, Type *Ty) const;
989
991
993 const SmallVectorImpl<ArgInfo> &Args);
994
996 for (auto &BB : *F)
997 BBExecutable.erase(&BB);
998 }
999
1001 bool ResolvedUndefs = true;
1002 while (ResolvedUndefs) {
1003 solve();
1004 ResolvedUndefs = false;
1005 for (Function &F : M)
1006 ResolvedUndefs |= resolvedUndefsIn(F);
1007 }
1008 }
1009
1011 bool ResolvedUndefs = true;
1012 while (ResolvedUndefs) {
1013 solve();
1014 ResolvedUndefs = false;
1015 for (Function *F : WorkList)
1016 ResolvedUndefs |= resolvedUndefsIn(*F);
1017 }
1018 }
1019
1021 bool ResolvedUndefs = true;
1022 while (ResolvedUndefs) {
1023 solve();
1024 ResolvedUndefs = false;
1025 for (Value *V : Invalidated)
1026 if (auto *I = dyn_cast<Instruction>(V))
1027 ResolvedUndefs |= resolvedUndef(*I);
1028 }
1029 Invalidated.clear();
1030 }
1031};
1032
1033} // namespace llvm
1034
1036 if (!BBExecutable.insert(BB).second)
1037 return false;
1038 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
1039 BBWorkList.push_back(BB); // Add the block to the work list!
1040 return true;
1041}
1042
1043void SCCPInstVisitor::pushToWorkList(Instruction *I) {
1044 // If we're currently visiting a block, do not push any instructions in the
1045 // same blocks that are after the current one, as they will be visited
1046 // anyway. We do have to push updates to earlier instructions (e.g. phi
1047 // nodes or loads of tracked globals).
1048 if (CurI && I->getParent() == CurI->getParent() && !I->comesBefore(CurI))
1049 return;
1050 // Only push instructions in already visited blocks. Otherwise we'll handle
1051 // it when we visit the block for the first time.
1052 if (BBVisited.contains(I->getParent()))
1053 InstWorkList.insert(I);
1054}
1055
1056void SCCPInstVisitor::pushUsersToWorkList(Value *V) {
1057 for (User *U : V->users())
1058 if (auto *UI = dyn_cast<Instruction>(U))
1059 pushToWorkList(UI);
1060
1061 auto Iter = AdditionalUsers.find(V);
1062 if (Iter != AdditionalUsers.end()) {
1063 // Copy additional users before notifying them of changes, because new
1064 // users may be added, potentially invalidating the iterator.
1066 for (User *U : Iter->second)
1067 if (auto *UI = dyn_cast<Instruction>(U))
1068 ToNotify.push_back(UI);
1069 for (Instruction *UI : ToNotify)
1070 pushToWorkList(UI);
1071 }
1072}
1073
1074void SCCPInstVisitor::pushUsersToWorkListMsg(ValueLatticeElement &IV,
1075 Value *V) {
1076 LLVM_DEBUG(dbgs() << "updated " << IV << ": " << *V << '\n');
1077 pushUsersToWorkList(V);
1078}
1079
1080bool SCCPInstVisitor::markConstant(ValueLatticeElement &IV, Value *V,
1081 Constant *C, bool MayIncludeUndef) {
1082 if (!IV.markConstant(C, MayIncludeUndef))
1083 return false;
1084 LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n');
1085 pushUsersToWorkList(V);
1086 return true;
1087}
1088
1089bool SCCPInstVisitor::markNotConstant(ValueLatticeElement &IV, Value *V,
1090 Constant *C) {
1091 if (!IV.markNotConstant(C))
1092 return false;
1093 LLVM_DEBUG(dbgs() << "markNotConstant: " << *C << ": " << *V << '\n');
1094 pushUsersToWorkList(V);
1095 return true;
1096}
1097
1098bool SCCPInstVisitor::markConstantRange(ValueLatticeElement &IV, Value *V,
1099 const ConstantRange &CR) {
1100 if (!IV.markConstantRange(CR))
1101 return false;
1102 LLVM_DEBUG(dbgs() << "markConstantRange: " << CR << ": " << *V << '\n');
1103 pushUsersToWorkList(V);
1104 return true;
1105}
1106
1107bool SCCPInstVisitor::markOverdefined(ValueLatticeElement &IV, Value *V) {
1108 if (!IV.markOverdefined())
1109 return false;
1110
1111 LLVM_DEBUG(dbgs() << "markOverdefined: ";
1112 if (auto *F = dyn_cast<Function>(V)) dbgs()
1113 << "Function '" << F->getName() << "'\n";
1114 else dbgs() << *V << '\n');
1115 // Only instructions go on the work list
1116 pushUsersToWorkList(V);
1117 return true;
1118}
1119
1121 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1122 const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i));
1123 assert(It != TrackedMultipleRetVals.end());
1124 if (!SCCPSolver::isConstant(It->second))
1125 return false;
1126 }
1127 return true;
1128}
1129
1131 Type *Ty) const {
1132 if (LV.isConstant()) {
1133 Constant *C = LV.getConstant();
1134 assert(C->getType() == Ty && "Type mismatch");
1135 return C;
1136 }
1137
1138 if (LV.isConstantRange()) {
1139 const auto &CR = LV.getConstantRange();
1140 if (CR.getSingleElement())
1141 return ConstantInt::get(Ty, *CR.getSingleElement());
1142 }
1143 return nullptr;
1144}
1145
1147 Constant *Const = nullptr;
1148 if (V->getType()->isStructTy()) {
1149 std::vector<ValueLatticeElement> LVs = getStructLatticeValueFor(V);
1151 return nullptr;
1152 std::vector<Constant *> ConstVals;
1153 auto *ST = cast<StructType>(V->getType());
1154 for (unsigned I = 0, E = ST->getNumElements(); I != E; ++I) {
1155 const ValueLatticeElement &LV = LVs[I];
1156 ConstVals.push_back(SCCPSolver::isConstant(LV)
1157 ? getConstant(LV, ST->getElementType(I))
1158 : UndefValue::get(ST->getElementType(I)));
1159 }
1160 Const = ConstantStruct::get(ST, ConstVals);
1161 } else {
1164 return nullptr;
1165 Const = SCCPSolver::isConstant(LV) ? getConstant(LV, V->getType())
1166 : UndefValue::get(V->getType());
1167 }
1168 assert(Const && "Constant is nullptr here!");
1169 return Const;
1170}
1171
1173 const SmallVectorImpl<ArgInfo> &Args) {
1174 assert(!Args.empty() && "Specialization without arguments");
1175 assert(F->arg_size() == Args[0].Formal->getParent()->arg_size() &&
1176 "Functions should have the same number of arguments");
1177
1178 auto Iter = Args.begin();
1179 Function::arg_iterator NewArg = F->arg_begin();
1180 Function::arg_iterator OldArg = Args[0].Formal->getParent()->arg_begin();
1181 for (auto End = F->arg_end(); NewArg != End; ++NewArg, ++OldArg) {
1182
1183 LLVM_DEBUG(dbgs() << "SCCP: Marking argument "
1184 << NewArg->getNameOrAsOperand() << "\n");
1185
1186 // Mark the argument constants in the new function
1187 // or copy the lattice state over from the old function.
1188 if (Iter != Args.end() && Iter->Formal == &*OldArg) {
1189 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1190 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1191 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1192 NewValue.markConstant(Iter->Actual->getAggregateElement(I));
1193 }
1194 } else {
1195 ValueState[&*NewArg].markConstant(Iter->Actual);
1196 }
1197 ++Iter;
1198 } else {
1199 if (auto *STy = dyn_cast<StructType>(NewArg->getType())) {
1200 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
1201 ValueLatticeElement &NewValue = StructValueState[{&*NewArg, I}];
1202 NewValue = StructValueState[{&*OldArg, I}];
1203 }
1204 } else {
1205 ValueLatticeElement &NewValue = ValueState[&*NewArg];
1206 NewValue = ValueState[&*OldArg];
1207 }
1208 }
1209 }
1210}
1211
1212void SCCPInstVisitor::visitInstruction(Instruction &I) {
1213 // All the instructions we don't do any special handling for just
1214 // go to overdefined.
1215 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n');
1216 markOverdefined(&I);
1217}
1218
1219bool SCCPInstVisitor::mergeInValue(ValueLatticeElement &IV, Value *V,
1220 const ValueLatticeElement &MergeWithV,
1222 if (IV.mergeIn(MergeWithV, Opts)) {
1223 pushUsersToWorkList(V);
1224 LLVM_DEBUG(dbgs() << "Merged " << MergeWithV << " into " << *V << " : "
1225 << IV << "\n");
1226 return true;
1227 }
1228 return false;
1229}
1230
1231bool SCCPInstVisitor::markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) {
1232 if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second)
1233 return false; // This edge is already known to be executable!
1234
1235 if (!markBlockExecutable(Dest)) {
1236 // If the destination is already executable, we just made an *edge*
1237 // feasible that wasn't before. Revisit the PHI nodes in the block
1238 // because they have potentially new operands.
1239 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName()
1240 << " -> " << Dest->getName() << '\n');
1241
1242 for (PHINode &PN : Dest->phis())
1243 pushToWorkList(&PN);
1244 }
1245 return true;
1246}
1247
1248// getFeasibleSuccessors - Return a vector of booleans to indicate which
1249// successors are reachable from a given terminator instruction.
1250void SCCPInstVisitor::getFeasibleSuccessors(Instruction &TI,
1251 SmallVectorImpl<bool> &Succs) {
1252 Succs.resize(TI.getNumSuccessors());
1253 if (isa<UncondBrInst>(TI)) {
1254 Succs[0] = true;
1255 return;
1256 }
1257
1258 if (auto *BI = dyn_cast<CondBrInst>(&TI)) {
1259 const ValueLatticeElement &BCValue = getValueState(BI->getCondition());
1260 ConstantInt *CI = getConstantInt(BCValue, BI->getCondition()->getType());
1261 if (!CI) {
1262 // Overdefined condition variables, and branches on unfoldable constant
1263 // conditions, mean the branch could go either way.
1264 if (!BCValue.isUnknownOrUndef())
1265 Succs[0] = Succs[1] = true;
1266 return;
1267 }
1268
1269 // Constant condition variables mean the branch can only go a single way.
1270 Succs[CI->isZero()] = true;
1271 return;
1272 }
1273
1274 // We cannot analyze special terminators, so consider all successors
1275 // executable.
1276 if (TI.isSpecialTerminator()) {
1277 Succs.assign(TI.getNumSuccessors(), true);
1278 return;
1279 }
1280
1281 if (auto *SI = dyn_cast<SwitchInst>(&TI)) {
1282 if (!SI->getNumCases()) {
1283 Succs[0] = true;
1284 return;
1285 }
1286 const ValueLatticeElement &SCValue = getValueState(SI->getCondition());
1287 if (ConstantInt *CI =
1288 getConstantInt(SCValue, SI->getCondition()->getType())) {
1289 Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true;
1290 return;
1291 }
1292
1293 // TODO: Switch on undef is UB. Stop passing false once the rest of LLVM
1294 // is ready.
1295 if (SCValue.isConstantRange(/*UndefAllowed=*/false)) {
1296 const ConstantRange &Range = SCValue.getConstantRange();
1297 unsigned ReachableCaseCount = 0;
1298 for (const auto &Case : SI->cases()) {
1299 const APInt &CaseValue = Case.getCaseValue()->getValue();
1300 if (Range.contains(CaseValue)) {
1301 Succs[Case.getSuccessorIndex()] = true;
1302 ++ReachableCaseCount;
1303 }
1304 }
1305
1306 Succs[SI->case_default()->getSuccessorIndex()] =
1307 Range.isSizeLargerThan(ReachableCaseCount);
1308 return;
1309 }
1310
1311 // Overdefined or unknown condition? All destinations are executable!
1312 if (!SCValue.isUnknownOrUndef())
1313 Succs.assign(TI.getNumSuccessors(), true);
1314 return;
1315 }
1316
1317 // In case of indirect branch and its address is a blockaddress, we mark
1318 // the target as executable.
1319 if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) {
1320 // Casts are folded by visitCastInst.
1321 const ValueLatticeElement &IBRValue = getValueState(IBR->getAddress());
1323 getConstant(IBRValue, IBR->getAddress()->getType()));
1324 if (!Addr) { // Overdefined or unknown condition?
1325 // All destinations are executable!
1326 if (!IBRValue.isUnknownOrUndef())
1327 Succs.assign(TI.getNumSuccessors(), true);
1328 return;
1329 }
1330
1331 BasicBlock *T = Addr->getBasicBlock();
1332 assert(Addr->getFunction() == T->getParent() &&
1333 "Block address of a different function ?");
1334 for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) {
1335 // This is the target.
1336 if (IBR->getDestination(i) == T) {
1337 Succs[i] = true;
1338 return;
1339 }
1340 }
1341
1342 // If we didn't find our destination in the IBR successor list, then we
1343 // have undefined behavior. Its ok to assume no successor is executable.
1344 return;
1345 }
1346
1347 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n');
1348 llvm_unreachable("SCCP: Don't know how to handle this terminator!");
1349}
1350
1351// isEdgeFeasible - Return true if the control flow edge from the 'From' basic
1352// block to the 'To' basic block is currently feasible.
1354 // Check if we've called markEdgeExecutable on the edge yet. (We could
1355 // be more aggressive and try to consider edges which haven't been marked
1356 // yet, but there isn't any need.)
1357 return KnownFeasibleEdges.count(Edge(From, To));
1358}
1359
1360// visit Implementations - Something changed in this instruction, either an
1361// operand made a transition, or the instruction is newly executable. Change
1362// the value type of I to reflect these changes if appropriate. This method
1363// makes sure to do the following actions:
1364//
1365// 1. If a phi node merges two constants in, and has conflicting value coming
1366// from different branches, or if the PHI node merges in an overdefined
1367// value, then the PHI node becomes overdefined.
1368// 2. If a phi node merges only constants in, and they all agree on value, the
1369// PHI node becomes a constant value equal to that.
1370// 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant
1371// 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined
1372// 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined
1373// 6. If a conditional branch has a value that is constant, make the selected
1374// destination executable
1375// 7. If a conditional branch has a value that is overdefined, make all
1376// successors executable.
1377void SCCPInstVisitor::visitPHINode(PHINode &PN) {
1378 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant,
1379 // and slow us down a lot. Just mark them overdefined.
1380 if (PN.getNumIncomingValues() > 64)
1381 return (void)markOverdefined(&PN);
1382
1383 if (isInstFullyOverDefined(PN))
1384 return;
1385 SmallVector<unsigned> FeasibleIncomingIndices;
1386 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
1387 if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent()))
1388 continue;
1389 FeasibleIncomingIndices.push_back(i);
1390 }
1391
1392 // Look at all of the executable operands of the PHI node. If any of them
1393 // are overdefined, the PHI becomes overdefined as well. If they are all
1394 // constant, and they agree with each other, the PHI becomes the identical
1395 // constant. If they are constant and don't agree, the PHI is a constant
1396 // range. If there are no executable operands, the PHI remains unknown.
1397 if (StructType *STy = dyn_cast<StructType>(PN.getType())) {
1398 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1399 ValueLatticeElement PhiState = getStructValueState(&PN, i);
1400 if (PhiState.isOverdefined())
1401 continue;
1402 for (unsigned j : FeasibleIncomingIndices) {
1403 const ValueLatticeElement &IV =
1404 getStructValueState(PN.getIncomingValue(j), i);
1405 PhiState.mergeIn(IV);
1406 if (PhiState.isOverdefined())
1407 break;
1408 }
1409 ValueLatticeElement &PhiStateRef = getStructValueState(&PN, i);
1410 mergeInValue(PhiStateRef, &PN, PhiState,
1411 ValueLatticeElement::MergeOptions().setMaxWidenSteps(
1412 FeasibleIncomingIndices.size() + 1));
1413 PhiStateRef.setNumRangeExtensions(
1414 std::max((unsigned)FeasibleIncomingIndices.size(),
1415 PhiStateRef.getNumRangeExtensions()));
1416 }
1417 } else {
1418 ValueLatticeElement PhiState = getValueState(&PN);
1419 for (unsigned i : FeasibleIncomingIndices) {
1420 const ValueLatticeElement &IV = getValueState(PN.getIncomingValue(i));
1421 PhiState.mergeIn(IV);
1422 if (PhiState.isOverdefined())
1423 break;
1424 }
1425 // We allow up to 1 range extension per active incoming value and one
1426 // additional extension. Note that we manually adjust the number of range
1427 // extensions to match the number of active incoming values. This helps to
1428 // limit multiple extensions caused by the same incoming value, if other
1429 // incoming values are equal.
1430 ValueLatticeElement &PhiStateRef = ValueState[&PN];
1431 mergeInValue(PhiStateRef, &PN, PhiState,
1432 ValueLatticeElement::MergeOptions().setMaxWidenSteps(
1433 FeasibleIncomingIndices.size() + 1));
1434 PhiStateRef.setNumRangeExtensions(
1435 std::max((unsigned)FeasibleIncomingIndices.size(),
1436 PhiStateRef.getNumRangeExtensions()));
1437 }
1438}
1439
1440void SCCPInstVisitor::visitReturnInst(ReturnInst &I) {
1441 if (I.getNumOperands() == 0)
1442 return; // ret void
1443
1444 Function *F = I.getParent()->getParent();
1445 Value *ResultOp = I.getOperand(0);
1446
1447 // If we are tracking the return value of this function, merge it in.
1448 if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) {
1449 auto TFRVI = TrackedRetVals.find(F);
1450 if (TFRVI != TrackedRetVals.end()) {
1451 mergeInValue(TFRVI->second, F, getValueState(ResultOp));
1452 return;
1453 }
1454 }
1455
1456 // Handle functions that return multiple values.
1457 if (!TrackedMultipleRetVals.empty()) {
1458 if (auto *STy = dyn_cast<StructType>(ResultOp->getType()))
1459 if (MRVFunctionsTracked.count(F))
1460 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1461 mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F,
1462 getStructValueState(ResultOp, i));
1463 }
1464}
1465
1466void SCCPInstVisitor::visitTerminator(Instruction &TI) {
1467 SmallVector<bool, 16> SuccFeasible;
1468 getFeasibleSuccessors(TI, SuccFeasible);
1469
1470 BasicBlock *BB = TI.getParent();
1471
1472 // Mark all feasible successors executable.
1473 for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i)
1474 if (SuccFeasible[i])
1475 markEdgeExecutable(BB, TI.getSuccessor(i));
1476}
1477
1478void SCCPInstVisitor::visitCastInst(CastInst &I) {
1479 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1480 // discover a concrete value later.
1481 if (ValueState[&I].isOverdefined())
1482 return;
1483
1484 if (auto *BC = dyn_cast<BitCastInst>(&I)) {
1485 if (BC->getType() == BC->getOperand(0)->getType()) {
1486 if (const PredicateBase *PI = getPredicateInfoFor(&I)) {
1487 handlePredicate(&I, I.getOperand(0), PI);
1488 return;
1489 }
1490 }
1491 }
1492
1493 const ValueLatticeElement &OpSt = getValueState(I.getOperand(0));
1494 if (OpSt.isUnknownOrUndef())
1495 return;
1496
1497 if (Constant *OpC = getConstant(OpSt, I.getOperand(0)->getType())) {
1498 // Fold the constant as we build.
1499 if (Constant *C =
1500 ConstantFoldCastOperand(I.getOpcode(), OpC, I.getType(), DL)) {
1501 auto &LV = ValueState[&I];
1502 mergeInValue(LV, &I, ValueLatticeElement::get(C));
1503 return;
1504 }
1505 }
1506
1507 // Ignore bitcasts, as they may change the number of vector elements.
1508 if (I.getDestTy()->isIntOrIntVectorTy() &&
1509 I.getSrcTy()->isIntOrIntVectorTy() &&
1510 I.getOpcode() != Instruction::BitCast) {
1511 ConstantRange OpRange =
1512 OpSt.asConstantRange(I.getSrcTy(), /*UndefAllowed=*/false);
1513 auto &LV = getValueState(&I);
1514
1515 Type *DestTy = I.getDestTy();
1516 ConstantRange Res = ConstantRange::getEmpty(DestTy->getScalarSizeInBits());
1517 if (auto *Trunc = dyn_cast<TruncInst>(&I))
1518 Res = OpRange.truncate(DestTy->getScalarSizeInBits(),
1519 Trunc->getNoWrapKind());
1520 else
1521 Res = OpRange.castOp(I.getOpcode(), DestTy->getScalarSizeInBits());
1522 mergeInValue(LV, &I, ValueLatticeElement::getRange(Res));
1523 } else
1524 markOverdefined(&I);
1525}
1526
1527void SCCPInstVisitor::handleExtractOfWithOverflow(ExtractValueInst &EVI,
1528 const WithOverflowInst *WO,
1529 unsigned Idx) {
1530 Value *LHS = WO->getLHS(), *RHS = WO->getRHS();
1531 Type *Ty = LHS->getType();
1532
1533 addAdditionalUser(LHS, &EVI);
1534 addAdditionalUser(RHS, &EVI);
1535
1536 const ValueLatticeElement &L = getValueState(LHS);
1537 if (L.isUnknownOrUndef())
1538 return; // Wait to resolve.
1539 ConstantRange LR = L.asConstantRange(Ty, /*UndefAllowed=*/false);
1540
1541 const ValueLatticeElement &R = getValueState(RHS);
1542 if (R.isUnknownOrUndef())
1543 return; // Wait to resolve.
1544
1545 ConstantRange RR = R.asConstantRange(Ty, /*UndefAllowed=*/false);
1546 if (Idx == 0) {
1547 ConstantRange Res = LR.binaryOp(WO->getBinaryOp(), RR);
1548 mergeInValue(ValueState[&EVI], &EVI, ValueLatticeElement::getRange(Res));
1549 } else {
1550 assert(Idx == 1 && "Index can only be 0 or 1");
1551 ConstantRange NWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
1552 WO->getBinaryOp(), RR, WO->getNoWrapKind());
1553 if (NWRegion.contains(LR))
1554 return (void)markConstant(&EVI, ConstantInt::getFalse(EVI.getType()));
1555 markOverdefined(&EVI);
1556 }
1557}
1558
1559void SCCPInstVisitor::visitExtractValueInst(ExtractValueInst &EVI) {
1560 // If this returns a struct, mark all elements over defined, we don't track
1561 // structs in structs.
1562 if (EVI.getType()->isStructTy())
1563 return (void)markOverdefined(&EVI);
1564
1565 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1566 // discover a concrete value later.
1567 if (ValueState[&EVI].isOverdefined())
1568 return (void)markOverdefined(&EVI);
1569
1570 // If this is extracting from more than one level of struct, we don't know.
1571 if (EVI.getNumIndices() != 1)
1572 return (void)markOverdefined(&EVI);
1573
1574 Value *AggVal = EVI.getAggregateOperand();
1575 if (AggVal->getType()->isStructTy()) {
1576 unsigned i = *EVI.idx_begin();
1577 if (auto *WO = dyn_cast<WithOverflowInst>(AggVal))
1578 return handleExtractOfWithOverflow(EVI, WO, i);
1579 ValueLatticeElement EltVal = getStructValueState(AggVal, i);
1580 mergeInValue(ValueState[&EVI], &EVI, EltVal);
1581 } else {
1582 // Otherwise, must be extracting from an array.
1583 return (void)markOverdefined(&EVI);
1584 }
1585}
1586
1587void SCCPInstVisitor::visitInsertValueInst(InsertValueInst &IVI) {
1588 auto *STy = dyn_cast<StructType>(IVI.getType());
1589 if (!STy)
1590 return (void)markOverdefined(&IVI);
1591
1592 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1593 // discover a concrete value later.
1594 if (ValueState[&IVI].isOverdefined())
1595 return (void)markOverdefined(&IVI);
1596
1597 // If this has more than one index, we can't handle it, drive all results to
1598 // undef.
1599 if (IVI.getNumIndices() != 1)
1600 return (void)markOverdefined(&IVI);
1601
1602 Value *Aggr = IVI.getAggregateOperand();
1603 unsigned Idx = *IVI.idx_begin();
1604
1605 // Compute the result based on what we're inserting.
1606 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1607 // This passes through all values that aren't the inserted element.
1608 if (i != Idx) {
1609 ValueLatticeElement EltVal = getStructValueState(Aggr, i);
1610 mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal);
1611 continue;
1612 }
1613
1614 Value *Val = IVI.getInsertedValueOperand();
1615 if (Val->getType()->isStructTy())
1616 // We don't track structs in structs.
1617 markOverdefined(getStructValueState(&IVI, i), &IVI);
1618 else {
1619 ValueLatticeElement InVal = getValueState(Val);
1620 mergeInValue(getStructValueState(&IVI, i), &IVI, InVal);
1621 }
1622 }
1623}
1624
1625void SCCPInstVisitor::visitSelectInst(SelectInst &I) {
1626 // If this select returns a struct, just mark the result overdefined.
1627 // TODO: We could do a lot better than this if code actually uses this.
1628 if (I.getType()->isStructTy())
1629 return (void)markOverdefined(&I);
1630
1631 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1632 // discover a concrete value later.
1633 if (ValueState[&I].isOverdefined())
1634 return (void)markOverdefined(&I);
1635
1636 const ValueLatticeElement &CondValue = getValueState(I.getCondition());
1637 if (CondValue.isUnknownOrUndef())
1638 return;
1639
1640 if (ConstantInt *CondCB =
1641 getConstantInt(CondValue, I.getCondition()->getType())) {
1642 Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue();
1643 const ValueLatticeElement &OpValState = getValueState(OpVal);
1644 // Safety: ValueState[&I] doesn't invalidate OpValState since it is already
1645 // in the map.
1646 assert(ValueState.contains(&I) && "&I is not in ValueState map.");
1647 mergeInValue(ValueState[&I], &I, OpValState);
1648 return;
1649 }
1650
1651 // Otherwise, the condition is overdefined or a constant we can't evaluate.
1652 // See if we can produce something better than overdefined based on the T/F
1653 // value.
1654 ValueLatticeElement TVal = getValueState(I.getTrueValue());
1655 ValueLatticeElement FVal = getValueState(I.getFalseValue());
1656
1657 ValueLatticeElement &State = ValueState[&I];
1658 bool Changed = State.mergeIn(TVal);
1659 Changed |= State.mergeIn(FVal);
1660 if (Changed)
1661 pushUsersToWorkListMsg(State, &I);
1662}
1663
1664// Handle Unary Operators.
1665void SCCPInstVisitor::visitUnaryOperator(Instruction &I) {
1666 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1667
1668 ValueLatticeElement &IV = ValueState[&I];
1669 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1670 // discover a concrete value later.
1671 if (IV.isOverdefined())
1672 return (void)markOverdefined(&I);
1673
1674 // If something is unknown/undef, wait for it to resolve.
1675 if (V0State.isUnknownOrUndef())
1676 return;
1677
1678 if (SCCPSolver::isConstant(V0State))
1679 if (Constant *C = ConstantFoldUnaryOpOperand(
1680 I.getOpcode(), getConstant(V0State, I.getType()), DL))
1681 return (void)markConstant(IV, &I, C);
1682
1683 markOverdefined(&I);
1684}
1685
1686void SCCPInstVisitor::visitFreezeInst(FreezeInst &I) {
1687 // If this freeze returns a struct, just mark the result overdefined.
1688 // TODO: We could do a lot better than this.
1689 if (I.getType()->isStructTy())
1690 return (void)markOverdefined(&I);
1691
1692 ValueLatticeElement V0State = getValueState(I.getOperand(0));
1693 ValueLatticeElement &IV = ValueState[&I];
1694 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1695 // discover a concrete value later.
1696 if (IV.isOverdefined())
1697 return (void)markOverdefined(&I);
1698
1699 // If something is unknown/undef, wait for it to resolve.
1700 if (V0State.isUnknownOrUndef())
1701 return;
1702
1703 if (SCCPSolver::isConstant(V0State) &&
1704 isGuaranteedNotToBeUndefOrPoison(getConstant(V0State, I.getType())))
1705 return (void)markConstant(IV, &I, getConstant(V0State, I.getType()));
1706
1707 markOverdefined(&I);
1708}
1709
1710// Handle Binary Operators.
1711void SCCPInstVisitor::visitBinaryOperator(Instruction &I) {
1712 ValueLatticeElement V1State = getValueState(I.getOperand(0));
1713 ValueLatticeElement V2State = getValueState(I.getOperand(1));
1714
1715 ValueLatticeElement &IV = ValueState[&I];
1716 if (IV.isOverdefined())
1717 return;
1718
1719 // If something is undef, wait for it to resolve.
1720 if (V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef())
1721 return;
1722
1723 if (V1State.isOverdefined() && V2State.isOverdefined())
1724 return (void)markOverdefined(&I);
1725
1726 // If either of the operands is a constant, try to fold it to a constant.
1727 // TODO: Use information from notconstant better.
1728 if ((V1State.isConstant() || V2State.isConstant())) {
1729 Value *V1 = SCCPSolver::isConstant(V1State)
1730 ? getConstant(V1State, I.getOperand(0)->getType())
1731 : I.getOperand(0);
1732 Value *V2 = SCCPSolver::isConstant(V2State)
1733 ? getConstant(V2State, I.getOperand(1)->getType())
1734 : I.getOperand(1);
1735 Value *R = simplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL, &I));
1736 auto *C = dyn_cast_or_null<Constant>(R);
1737 if (C) {
1738 // Conservatively assume that the result may be based on operands that may
1739 // be undef. Note that we use mergeInValue to combine the constant with
1740 // the existing lattice value for I, as different constants might be found
1741 // after one of the operands go to overdefined, e.g. due to one operand
1742 // being a special floating value.
1743 ValueLatticeElement NewV;
1744 NewV.markConstant(C, /*MayIncludeUndef=*/true);
1745 return (void)mergeInValue(ValueState[&I], &I, NewV);
1746 }
1747 }
1748
1749 // Only use ranges for binary operators on integers.
1750 if (!I.getType()->isIntOrIntVectorTy())
1751 return markOverdefined(&I);
1752
1753 // Try to simplify to a constant range.
1754 ConstantRange A =
1755 V1State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1756 ConstantRange B =
1757 V2State.asConstantRange(I.getType(), /*UndefAllowed=*/false);
1758
1759 auto *BO = cast<BinaryOperator>(&I);
1760 ConstantRange R = ConstantRange::getEmpty(I.getType()->getScalarSizeInBits());
1761 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(BO))
1762 R = A.overflowingBinaryOp(BO->getOpcode(), B, OBO->getNoWrapKind());
1763 else
1764 R = A.binaryOp(BO->getOpcode(), B);
1765 mergeInValue(ValueState[&I], &I, ValueLatticeElement::getRange(R));
1766
1767 // TODO: Currently we do not exploit special values that produce something
1768 // better than overdefined with an overdefined operand for vector or floating
1769 // point types, like and <4 x i32> overdefined, zeroinitializer.
1770}
1771
1772// Handle ICmpInst instruction.
1773void SCCPInstVisitor::visitCmpInst(CmpInst &I) {
1774 // Do not cache this lookup, getValueState calls later in the function might
1775 // invalidate the reference.
1776 if (ValueState[&I].isOverdefined())
1777 return (void)markOverdefined(&I);
1778
1779 Value *Op1 = I.getOperand(0);
1780 Value *Op2 = I.getOperand(1);
1781
1782 // For parameters, use ParamState which includes constant range info if
1783 // available.
1784 auto V1State = getValueState(Op1);
1785 auto V2State = getValueState(Op2);
1786
1787 Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State, DL);
1788 if (C) {
1789 ValueLatticeElement CV;
1790 CV.markConstant(C);
1791 mergeInValue(ValueState[&I], &I, CV);
1792 return;
1793 }
1794
1795 // If operands are still unknown, wait for it to resolve.
1796 if ((V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) &&
1797 !SCCPSolver::isConstant(ValueState[&I]))
1798 return;
1799
1800 markOverdefined(&I);
1801}
1802
1803// Handle getelementptr instructions. If all operands are constants then we
1804// can turn this into a getelementptr ConstantExpr.
1805void SCCPInstVisitor::visitGetElementPtrInst(GetElementPtrInst &I) {
1806 if (ValueState[&I].isOverdefined())
1807 return (void)markOverdefined(&I);
1808
1809 const ValueLatticeElement &PtrState = getValueState(I.getPointerOperand());
1810 if (PtrState.isUnknownOrUndef())
1811 return;
1812
1813 // gep inbounds/nuw of non-null is non-null.
1814 if (PtrState.isNotConstant() && PtrState.getNotConstant()->isNullValue()) {
1815 if (I.hasNoUnsignedWrap() ||
1816 (I.isInBounds() &&
1817 !NullPointerIsDefined(I.getFunction(), I.getAddressSpace())))
1818 return (void)markNotNull(ValueState[&I], &I);
1819 return (void)markOverdefined(&I);
1820 }
1821
1823 Operands.reserve(I.getNumOperands());
1824
1825 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
1826 const ValueLatticeElement &State = getValueState(I.getOperand(i));
1827 if (State.isUnknownOrUndef())
1828 return; // Operands are not resolved yet.
1829
1830 if (Constant *C = getConstant(State, I.getOperand(i)->getType())) {
1831 Operands.push_back(C);
1832 continue;
1833 }
1834
1835 return (void)markOverdefined(&I);
1836 }
1837
1838 if (Constant *C = ConstantFoldInstOperands(&I, Operands, DL))
1839 markConstant(&I, C);
1840 else
1841 markOverdefined(&I);
1842}
1843
1844void SCCPInstVisitor::visitAllocaInst(AllocaInst &I) {
1845 if (!NullPointerIsDefined(I.getFunction(), I.getAddressSpace()))
1846 return (void)markNotNull(ValueState[&I], &I);
1847
1848 markOverdefined(&I);
1849}
1850
1851void SCCPInstVisitor::visitStoreInst(StoreInst &SI) {
1852 // If this store is of a struct, ignore it.
1853 if (SI.getOperand(0)->getType()->isStructTy())
1854 return;
1855
1856 if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1)))
1857 return;
1858
1859 GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1));
1860 auto I = TrackedGlobals.find(GV);
1861 if (I == TrackedGlobals.end())
1862 return;
1863
1864 // Get the value we are storing into the global, then merge it.
1865 mergeInValue(I->second, GV, getValueState(SI.getOperand(0)),
1866 ValueLatticeElement::MergeOptions().setCheckWiden(false));
1867 if (I->second.isOverdefined())
1868 TrackedGlobals.erase(I); // No need to keep tracking this!
1869}
1870
1872 if (const auto *CB = dyn_cast<CallBase>(I)) {
1873 if (CB->getType()->isIntOrIntVectorTy())
1874 if (std::optional<ConstantRange> Range = CB->getRange())
1876 if (CB->getType()->isPointerTy() && CB->isReturnNonNull())
1879 }
1880
1881 if (I->getType()->isIntOrIntVectorTy())
1882 if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range))
1885 if (I->hasMetadata(LLVMContext::MD_nonnull))
1888
1890}
1891
1892// Handle load instructions. If the operand is a constant pointer to a constant
1893// global, we can replace the load with the loaded constant value!
1894void SCCPInstVisitor::visitLoadInst(LoadInst &I) {
1895 // If this load is of a struct or the load is volatile, just mark the result
1896 // as overdefined.
1897 if (I.getType()->isStructTy() || I.isVolatile())
1898 return (void)markOverdefined(&I);
1899
1900 // resolvedUndefsIn might mark I as overdefined. Bail out, even if we would
1901 // discover a concrete value later.
1902 if (ValueState[&I].isOverdefined())
1903 return (void)markOverdefined(&I);
1904
1905 const ValueLatticeElement &PtrVal = getValueState(I.getOperand(0));
1906 if (PtrVal.isUnknownOrUndef())
1907 return; // The pointer is not resolved yet!
1908
1909 if (SCCPSolver::isConstant(PtrVal)) {
1910 Constant *Ptr = getConstant(PtrVal, I.getOperand(0)->getType());
1911 ValueLatticeElement &IV = ValueState[&I];
1912
1913 // load null is undefined.
1914 if (isa<ConstantPointerNull>(Ptr)) {
1915 if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace()))
1916 return (void)markOverdefined(IV, &I);
1917 else
1918 return;
1919 }
1920
1921 // Transform load (constant global) into the value loaded.
1922 if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) {
1923 if (!TrackedGlobals.empty()) {
1924 // If we are tracking this global, merge in the known value for it.
1925 auto It = TrackedGlobals.find(GV);
1926 if (It != TrackedGlobals.end()) {
1927 mergeInValue(IV, &I, It->second, getMaxWidenStepsOpts());
1928 return;
1929 }
1930 }
1931 }
1932
1933 // Transform load from a constant into a constant if possible.
1934 if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL))
1935 return (void)markConstant(IV, &I, C);
1936 }
1937
1938 // Fall back to metadata.
1939 mergeInValue(ValueState[&I], &I, getValueFromMetadata(&I));
1940}
1941
1942void SCCPInstVisitor::visitCallBase(CallBase &CB) {
1943 handleCallResult(CB);
1944 handleCallArguments(CB);
1945}
1946
1947void SCCPInstVisitor::handleCallOverdefined(CallBase &CB) {
1949
1950 // Void return and not tracking callee, just bail.
1951 if (CB.getType()->isVoidTy())
1952 return;
1953
1954 // Always mark struct return as overdefined.
1955 if (CB.getType()->isStructTy())
1956 return (void)markOverdefined(&CB);
1957
1958 // Otherwise, if we have a single return value case, and if the function is
1959 // a declaration, maybe we can constant fold it.
1960 if (F && F->isDeclaration() && canConstantFoldCallTo(&CB, F)) {
1962 for (const Use &A : CB.args()) {
1963 if (A.get()->getType()->isStructTy())
1964 return markOverdefined(&CB); // Can't handle struct args.
1965 if (A.get()->getType()->isMetadataTy())
1966 continue; // Carried in CB, not allowed in Operands.
1967 const ValueLatticeElement &State = getValueState(A);
1968
1969 if (State.isUnknownOrUndef())
1970 return; // Operands are not resolved yet.
1971 if (SCCPSolver::isOverdefined(State))
1972 return (void)markOverdefined(&CB);
1973 assert(SCCPSolver::isConstant(State) && "Unknown state!");
1974 Operands.push_back(getConstant(State, A->getType()));
1975 }
1976
1977 if (SCCPSolver::isOverdefined(getValueState(&CB)))
1978 return (void)markOverdefined(&CB);
1979
1980 // If we can constant fold this, mark the result of the call as a
1981 // constant.
1982 if (Constant *C = ConstantFoldCall(&CB, F, Operands, &GetTLI(*F)))
1983 return (void)markConstant(&CB, C);
1984 }
1985
1986 // Fall back to metadata.
1987 mergeInValue(ValueState[&CB], &CB, getValueFromMetadata(&CB));
1988}
1989
1990void SCCPInstVisitor::handleCallArguments(CallBase &CB) {
1992 // If this is a local function that doesn't have its address taken, mark its
1993 // entry block executable and merge in the actual arguments to the call into
1994 // the formal arguments of the function.
1995 if (TrackingIncomingArguments.count(F)) {
1996 markBlockExecutable(&F->front());
1997
1998 // Propagate information from this call site into the callee.
1999 auto CAI = CB.arg_begin();
2000 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
2001 ++AI, ++CAI) {
2002 // If this argument is byval, and if the function is not readonly, there
2003 // will be an implicit copy formed of the input aggregate.
2004 if (AI->hasByValAttr() && !F->onlyReadsMemory()) {
2005 markOverdefined(&*AI);
2006 continue;
2007 }
2008
2009 if (auto *STy = dyn_cast<StructType>(AI->getType())) {
2010 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2011 ValueLatticeElement CallArg = getStructValueState(*CAI, i);
2012 mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg,
2014 }
2015 } else {
2016 ValueLatticeElement CallArg =
2017 getValueState(*CAI).intersect(getArgAttributeVL(&*AI));
2018 mergeInValue(ValueState[&*AI], &*AI, CallArg, getMaxWidenStepsOpts());
2019 }
2020 }
2021 }
2022}
2023
2024void SCCPInstVisitor::handlePredicate(Instruction *I, Value *CopyOf,
2025 const PredicateBase *PI) {
2026 ValueLatticeElement CopyOfVal = getValueState(CopyOf);
2027 const std::optional<PredicateConstraint> &Constraint = PI->getConstraint();
2028 if (!Constraint) {
2029 mergeInValue(ValueState[I], I, CopyOfVal);
2030 return;
2031 }
2032
2033 CmpInst::Predicate Pred = Constraint->Predicate;
2034 Value *OtherOp = Constraint->OtherOp;
2035
2036 // Wait until OtherOp is resolved.
2037 if (getValueState(OtherOp).isUnknown()) {
2038 addAdditionalUser(OtherOp, I);
2039 return;
2040 }
2041
2042 ValueLatticeElement CondVal = getValueState(OtherOp);
2043 ValueLatticeElement &IV = ValueState[I];
2044 if (CondVal.isConstantRange() || CopyOfVal.isConstantRange()) {
2045 auto ImposedCR =
2046 ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType()));
2047
2048 // Get the range imposed by the condition.
2049 if (CondVal.isConstantRange())
2051 Pred, CondVal.getConstantRange());
2052
2053 // Combine range info for the original value with the new range from the
2054 // condition.
2055 auto CopyOfCR = CopyOfVal.asConstantRange(CopyOf->getType(),
2056 /*UndefAllowed=*/true);
2057 // Treat an unresolved input like a full range.
2058 if (CopyOfCR.isEmptySet())
2059 CopyOfCR = ConstantRange::getFull(CopyOfCR.getBitWidth());
2060 auto NewCR = ImposedCR.intersectWith(CopyOfCR);
2061 // If the existing information is != x, do not use the information from
2062 // a chained predicate, as the != x information is more likely to be
2063 // helpful in practice.
2064 if (!CopyOfCR.contains(NewCR) && CopyOfCR.getSingleMissingElement())
2065 NewCR = std::move(CopyOfCR);
2066
2067 // The new range is based on a branch condition. That guarantees that
2068 // neither of the compare operands can be undef in the branch targets,
2069 // unless we have conditions that are always true/false (e.g. icmp ule
2070 // i32, %a, i32_max). For the latter overdefined/empty range will be
2071 // inferred, but the branch will get folded accordingly anyways.
2072 addAdditionalUser(OtherOp, I);
2073 mergeInValue(
2074 IV, I, ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef*/ false));
2075 return;
2076 } else if (Pred == CmpInst::ICMP_EQ &&
2077 (CondVal.isConstant() || CondVal.isNotConstant())) {
2078 // For non-integer values or integer constant expressions, only
2079 // propagate equal constants or not-constants.
2080 addAdditionalUser(OtherOp, I);
2081 mergeInValue(IV, I, CondVal);
2082 return;
2083 } else if (Pred == CmpInst::ICMP_NE && CondVal.isConstant()) {
2084 // Propagate inequalities.
2085 addAdditionalUser(OtherOp, I);
2086 mergeInValue(IV, I, ValueLatticeElement::getNot(CondVal.getConstant()));
2087 return;
2088 }
2089
2090 return (void)mergeInValue(IV, I, CopyOfVal);
2091}
2092
2093void SCCPInstVisitor::handleCallResult(CallBase &CB) {
2095
2096 if (auto *II = dyn_cast<IntrinsicInst>(&CB)) {
2097 if (II->getIntrinsicID() == Intrinsic::vscale) {
2098 unsigned BitWidth = CB.getType()->getScalarSizeInBits();
2099 const ConstantRange Result = getVScaleRange(II->getFunction(), BitWidth);
2100 return (void)mergeInValue(ValueState[II], II,
2102 }
2103 if (II->getIntrinsicID() == Intrinsic::experimental_get_vector_length) {
2104 Value *CountArg = II->getArgOperand(0);
2105 Value *VF = II->getArgOperand(1);
2106 bool Scalable = cast<ConstantInt>(II->getArgOperand(2))->isOne();
2107
2108 // Computation happens in the larger type.
2109 unsigned BitWidth = std::max(CountArg->getType()->getScalarSizeInBits(),
2110 VF->getType()->getScalarSizeInBits());
2111
2112 ConstantRange Count = getValueState(CountArg)
2113 .asConstantRange(CountArg->getType(), false)
2114 .zeroExtend(BitWidth);
2115 ConstantRange MaxLanes = getValueState(VF)
2116 .asConstantRange(VF->getType(), false)
2117 .zeroExtend(BitWidth);
2118 if (Scalable)
2119 MaxLanes =
2120 MaxLanes.multiply(getVScaleRange(II->getFunction(), BitWidth));
2121
2122 // The result is always less than both Count and MaxLanes.
2123 ConstantRange Result = ConstantRange::getNonEmpty(
2125 APIntOps::umin(Count.getUnsignedMax(), MaxLanes.getUnsignedMax()) +
2126 1);
2127
2128 // If Count <= MaxLanes, getvectorlength(Count, MaxLanes) = Count
2129 if (Count.icmp(CmpInst::ICMP_ULE, MaxLanes))
2130 Result = std::move(Count);
2131
2132 Result = Result.truncate(II->getType()->getScalarSizeInBits());
2133 return (void)mergeInValue(ValueState[II], II,
2135 }
2136
2137 if (ConstantRange::isIntrinsicSupported(II->getIntrinsicID())) {
2138 // Compute result range for intrinsics supported by ConstantRange.
2139 // Do this even if we don't know a range for all operands, as we may
2140 // still know something about the result range, e.g. of abs(x).
2142 for (Value *Op : II->args()) {
2143 const ValueLatticeElement &State = getValueState(Op);
2144 if (State.isUnknownOrUndef())
2145 return;
2146 OpRanges.push_back(
2147 State.asConstantRange(Op->getType(), /*UndefAllowed=*/false));
2148 }
2149
2150 ConstantRange Result =
2151 ConstantRange::intrinsic(II->getIntrinsicID(), OpRanges);
2152 return (void)mergeInValue(ValueState[II], II,
2154 }
2155 }
2156
2157 // The common case is that we aren't tracking the callee, either because we
2158 // are not doing interprocedural analysis or the callee is indirect, or is
2159 // external. Handle these cases first.
2160 if (!F || F->isDeclaration())
2161 return handleCallOverdefined(CB);
2162
2163 // If this is a single/zero retval case, see if we're tracking the function.
2164 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) {
2165 if (!MRVFunctionsTracked.count(F))
2166 return handleCallOverdefined(CB); // Not tracking this callee.
2167
2168 // If we are tracking this callee, propagate the result of the function
2169 // into this call site.
2170 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2171 mergeInValue(getStructValueState(&CB, i), &CB,
2172 TrackedMultipleRetVals[std::make_pair(F, i)],
2174 } else {
2175 auto TFRVI = TrackedRetVals.find(F);
2176 if (TFRVI == TrackedRetVals.end())
2177 return handleCallOverdefined(CB); // Not tracking this callee.
2178
2179 // If so, propagate the return value of the callee into this call result.
2180 mergeInValue(ValueState[&CB], &CB, TFRVI->second, getMaxWidenStepsOpts());
2181 }
2182}
2183
2184bool SCCPInstVisitor::isInstFullyOverDefined(Instruction &Inst) {
2185 // For structure Type, we handle each member separately.
2186 // A structure object won't be considered as overdefined when
2187 // there is at least one member that is not overdefined.
2188 if (StructType *STy = dyn_cast<StructType>(Inst.getType())) {
2189 for (unsigned i = 0, e = STy->getNumElements(); i < e; ++i) {
2190 if (!getStructValueState(&Inst, i).isOverdefined())
2191 return false;
2192 }
2193 return true;
2194 }
2195
2196 return getValueState(&Inst).isOverdefined();
2197}
2198
2200 // Process the work lists until they are empty!
2201 while (!BBWorkList.empty() || !InstWorkList.empty()) {
2202 // Process the instruction work list.
2203 while (!InstWorkList.empty()) {
2204 Instruction *I = InstWorkList.pop_back_val();
2205 Invalidated.erase(I);
2206
2207 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n');
2208
2209 visit(I);
2210 }
2211
2212 // Process the basic block work list.
2213 while (!BBWorkList.empty()) {
2214 BasicBlock *BB = BBWorkList.pop_back_val();
2215 BBVisited.insert(BB);
2216
2217 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n');
2218 for (Instruction &I : *BB) {
2219 CurI = &I;
2220 visit(I);
2221 }
2222 CurI = nullptr;
2223 }
2224 }
2225}
2226
2228 // Look for instructions which produce undef values.
2229 if (I.getType()->isVoidTy())
2230 return false;
2231
2232 if (auto *STy = dyn_cast<StructType>(I.getType())) {
2233 // Only a few things that can be structs matter for undef.
2234
2235 // Tracked calls must never be marked overdefined in resolvedUndefsIn.
2236 if (auto *CB = dyn_cast<CallBase>(&I))
2237 if (Function *F = CB->getCalledFunction())
2238 if (MRVFunctionsTracked.count(F))
2239 return false;
2240
2241 // extractvalue and insertvalue don't need to be marked; they are
2242 // tracked as precisely as their operands.
2244 return false;
2245 // Send the results of everything else to overdefined. We could be
2246 // more precise than this but it isn't worth bothering.
2247 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2248 ValueLatticeElement &LV = getStructValueState(&I, i);
2249 if (LV.isUnknown()) {
2250 markOverdefined(LV, &I);
2251 return true;
2252 }
2253 }
2254 return false;
2255 }
2256
2257 ValueLatticeElement &LV = getValueState(&I);
2258 if (!LV.isUnknown())
2259 return false;
2260
2261 // There are two reasons a call can have an undef result
2262 // 1. It could be tracked.
2263 // 2. It could be constant-foldable.
2264 // Because of the way we solve return values, tracked calls must
2265 // never be marked overdefined in resolvedUndefsIn.
2266 if (auto *CB = dyn_cast<CallBase>(&I))
2267 if (Function *F = CB->getCalledFunction())
2268 if (TrackedRetVals.count(F))
2269 return false;
2270
2271 if (isa<LoadInst>(I)) {
2272 // A load here means one of two things: a load of undef from a global,
2273 // a load from an unknown pointer. Either way, having it return undef
2274 // is okay.
2275 return false;
2276 }
2277
2278 markOverdefined(&I);
2279 return true;
2280}
2281
2282/// While solving the dataflow for a function, we don't compute a result for
2283/// operations with an undef operand, to allow undef to be lowered to a
2284/// constant later. For example, constant folding of "zext i8 undef to i16"
2285/// would result in "i16 0", and if undef is later lowered to "i8 1", then the
2286/// zext result would become "i16 1" and would result into an overdefined
2287/// lattice value once merged with the previous result. Not computing the
2288/// result of the zext (treating undef the same as unknown) allows us to handle
2289/// a later undef->constant lowering more optimally.
2290///
2291/// However, if the operand remains undef when the solver returns, we do need
2292/// to assign some result to the instruction (otherwise we would treat it as
2293/// unreachable). For simplicity, we mark any instructions that are still
2294/// unknown as overdefined.
2296 bool MadeChange = false;
2297 for (BasicBlock &BB : F) {
2298 if (!BBExecutable.count(&BB))
2299 continue;
2300
2301 for (Instruction &I : BB)
2302 MadeChange |= resolvedUndef(I);
2303 }
2304
2305 LLVM_DEBUG(if (MadeChange) dbgs()
2306 << "\nResolved undefs in " << F.getName() << '\n');
2307
2308 return MadeChange;
2309}
2310
2311//===----------------------------------------------------------------------===//
2312//
2313// SCCPSolver implementations
2314//
2316 const DataLayout &DL,
2317 std::function<const TargetLibraryInfo &(Function &)> GetTLI,
2318 LLVMContext &Ctx)
2319 : Visitor(new SCCPInstVisitor(DL, std::move(GetTLI), Ctx)) {}
2320
2321SCCPSolver::~SCCPSolver() = default;
2322
2324 AssumptionCache &AC) {
2325 Visitor->addPredicateInfo(F, DT, AC);
2326}
2327
2329 Visitor->removeSSACopies(F);
2330}
2331
2333 return Visitor->markBlockExecutable(BB);
2334}
2335
2337 return Visitor->getPredicateInfoFor(I);
2338}
2339
2341 Visitor->trackValueOfGlobalVariable(GV);
2342}
2343
2345 Visitor->addTrackedFunction(F);
2346}
2347
2349 Visitor->addToMustPreserveReturnsInFunctions(F);
2350}
2351
2353 return Visitor->mustPreserveReturn(F);
2354}
2355
2357 Visitor->addArgumentTrackedFunction(F);
2358}
2359
2361 return Visitor->isArgumentTrackedFunction(F);
2362}
2363
2366 return Visitor->getArgumentTrackedFunctions();
2367}
2368
2369void SCCPSolver::solve() { Visitor->solve(); }
2370
2372 return Visitor->resolvedUndefsIn(F);
2373}
2374
2376 Visitor->solveWhileResolvedUndefsIn(M);
2377}
2378
2379void
2381 Visitor->solveWhileResolvedUndefsIn(WorkList);
2382}
2383
2385 Visitor->solveWhileResolvedUndefs();
2386}
2387
2389 return Visitor->isBlockExecutable(BB);
2390}
2391
2393 return Visitor->isEdgeFeasible(From, To);
2394}
2395
2396std::vector<ValueLatticeElement>
2398 return Visitor->getStructLatticeValueFor(V);
2399}
2400
2402 return Visitor->removeLatticeValueFor(V);
2403}
2404
2406 Visitor->resetLatticeValueFor(Call);
2407}
2408
2410 return Visitor->getLatticeValueFor(V);
2411}
2412
2415 return Visitor->getTrackedRetVals();
2416}
2417
2420 return Visitor->getTrackedGlobals();
2421}
2422
2424 return Visitor->getMRVFunctionsTracked();
2425}
2426
2427void SCCPSolver::markOverdefined(Value *V) { Visitor->markOverdefined(V); }
2428
2430 Visitor->trackValueOfArgument(V);
2431}
2432
2434 return Visitor->isStructLatticeConstant(F, STy);
2435}
2436
2438 Type *Ty) const {
2439 return Visitor->getConstant(LV, Ty);
2440}
2441
2443 return Visitor->getConstantOrNull(V);
2444}
2445
2447 const SmallVectorImpl<ArgInfo> &Args) {
2448 Visitor->setLatticeValueForSpecializationArguments(F, Args);
2449}
2450
2452 Visitor->markFunctionUnreachable(F);
2453}
2454
2455void SCCPSolver::visit(Instruction *I) { Visitor->visit(I); }
2456
2457void SCCPSolver::visitCall(CallInst &I) { Visitor->visitCall(I); }
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
#define X(NUM, ENUM, NAME)
Definition ELF.h:849
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Hexagon Common GEP
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define T
ConstantRange Range(APInt(BitWidth, Low), APInt(BitWidth, High))
uint64_t IntrinsicInst * II
static ValueLatticeElement::MergeOptions getMaxWidenStepsOpts()
Returns MergeOptions with MaxWidenSteps set to MaxNumRangeExtensions.
static const unsigned MaxNumRangeExtensions
static ValueLatticeElement getValueFromMetadata(const Instruction *I)
std::pair< BasicBlock *, BasicBlock * > Edge
This file implements a set that has insertion order iteration characteristics.
static ConstantInt * getConstantInt(Value *V, const DataLayout &DL)
Extract ConstantInt from value, looking through IntToPtr and PointerNullValue.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition blake3_impl.h:83
Class for arbitrary precision integers.
Definition APInt.h:78
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1654
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1157
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
an instruction to allocate memory on the stack
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
A cache of @llvm.assume calls within a function.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:105
LLVM_ABI const ConstantRange & getRange() const
Returns the value of the range attribute.
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:261
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:518
const Function * getParent() const
Return the enclosing method, or null if none.
Definition BasicBlock.h:213
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
LLVM_ABI void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
LLVM_ABI unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
LLVM_ABI Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
Function * getFunction() const
Definition Constants.h:1101
BasicBlock * getBasicBlock() const
Definition Constants.h:1100
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
LLVM_ABI bool isMustTailCall() const
Tests if this call site must be tail call optimized.
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
static LLVM_ABI CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition Constants.h:219
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
LLVM_ABI ConstantRange multiply(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a multiplication of a value in thi...
LLVM_ABI ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI ConstantRange castOp(Instruction::CastOps CastOp, uint32_t BitWidth) const
Return a new range representing the possible values resulting from an application of the specified ca...
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other?
static LLVM_ABI ConstantRange intrinsic(Intrinsic::ID IntrinsicID, ArrayRef< ConstantRange > Ops)
Compute range of intrinsic result for the given operand ranges.
LLVM_ABI bool isSizeLargerThan(uint64_t MaxSize) const
Compare set size of this range with Value.
static LLVM_ABI bool isIntrinsicSupported(Intrinsic::ID IntrinsicID)
Returns true if ConstantRange calculations are supported for intrinsic with IntrinsicID.
bool isSingleElement() const
Return true if this set contains exactly one member.
LLVM_ABI ConstantRange truncate(uint32_t BitWidth, unsigned NoWrapKind=0) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
LLVM_ABI bool isAllNonNegative() const
Return true if all values in this range are non-negative.
static LLVM_ABI ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
LLVM_ABI APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
LLVM_ABI ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static LLVM_ABI ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
LLVM_ABI ConstantRange binaryOp(Instruction::BinaryOps BinOp, const ConstantRange &Other) const
Return a new range representing the possible values resulting from an application of the specified bi...
LLVM_ABI ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:74
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
static DebugLoc getTemporary()
Definition DebugLoc.h:160
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
Definition DenseMap.h:75
Implements a dense probed hash-table based set.
Definition DenseSet.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:159
This instruction extracts a struct member or array element value from an aggregate value.
unsigned getNumIndices() const
idx_iterator idx_begin() const
This class represents a freeze function that returns random concrete value if an operand is either a ...
Argument * arg_iterator
Definition Function.h:73
static GEPNoWrapFlags noUnsignedWrap()
void applyUpdatesPermissive(ArrayRef< UpdateT > Updates)
Submit updates to all available trees.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Module * getParent()
Get the module that this global value is contained inside of...
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2811
This instruction inserts a struct field of array element value into an aggregate value.
Value * getInsertedValueOperand()
unsigned getNumIndices() const
idx_iterator idx_begin() const
Base class for instruction visitors.
Definition InstVisitor.h:78
void visit(Iterator Start, Iterator End)
Definition InstVisitor.h:87
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
LLVM_ABI unsigned getNumSuccessors() const LLVM_READONLY
Return the number of successors that this instruction has.
LLVM_ABI bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
LLVM_ABI BasicBlock * getSuccessor(unsigned Idx) const LLVM_READONLY
Return the specified successor. This instruction must be a terminator.
LLVM_ABI void setNonNeg(bool b=true)
Set or clear the nneg flag on this instruction, which must be a zext instruction.
LLVM_ABI bool hasNonNeg() const LLVM_READONLY
Determine whether the the nneg flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
LLVM_ABI void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
bool isSpecialTerminator() const
Invoke instruction.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
An instruction for reading from memory.
Metadata node.
Definition Metadata.h:1080
This class implements a map that also provides access to all stored values in a deterministic order.
Definition MapVector.h:36
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
LLVM_ABI std::optional< PredicateConstraint > getConstraint() const
Fetch condition in the form of PredicateConstraint, if possible.
Return a value (possibly void), from a function.
Helper class for SCCPSolver.
const MapVector< Function *, ValueLatticeElement > & getTrackedRetVals() const
const PredicateBase * getPredicateInfoFor(Instruction *I)
std::vector< ValueLatticeElement > getStructLatticeValueFor(Value *V) const
bool resolvedUndef(Instruction &I)
void markFunctionUnreachable(Function *F)
bool markBlockExecutable(BasicBlock *BB)
bool resolvedUndefsIn(Function &F)
While solving the dataflow for a function, we don't compute a result for operations with an undef ope...
Constant * getConstant(const ValueLatticeElement &LV, Type *Ty) const
SCCPInstVisitor(const DataLayout &DL, std::function< const TargetLibraryInfo &(Function &)> GetTLI, LLVMContext &Ctx)
const DenseMap< GlobalVariable *, ValueLatticeElement > & getTrackedGlobals() const
const ValueLatticeElement & getLatticeValueFor(Value *V) const
void removeLatticeValueFor(Value *V)
void trackValueOfArgument(Argument *A)
void visitCallInst(CallInst &I)
void markOverdefined(Value *V)
bool isArgumentTrackedFunction(Function *F)
void addTrackedFunction(Function *F)
void solveWhileResolvedUndefsIn(Module &M)
void trackValueOfGlobalVariable(GlobalVariable *GV)
Constant * getConstantOrNull(Value *V) const
void removeSSACopies(Function &F)
const SmallPtrSet< Function *, 16 > & getMRVFunctionsTracked() const
const SmallPtrSetImpl< Function * > & getArgumentTrackedFunctions() const
void resetLatticeValueFor(CallBase *Call)
Invalidate the Lattice Value of Call and its users after specializing the call.
ValueLatticeElement getArgAttributeVL(Argument *A)
void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC)
void addToMustPreserveReturnsInFunctions(Function *F)
void addArgumentTrackedFunction(Function *F)
bool isStructLatticeConstant(Function *F, StructType *STy)
void solveWhileResolvedUndefsIn(SmallVectorImpl< Function * > &WorkList)
bool isBlockExecutable(BasicBlock *BB) const
bool mustPreserveReturn(Function *F)
void setLatticeValueForSpecializationArguments(Function *F, const SmallVectorImpl< ArgInfo > &Args)
bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const
SCCPSolver - This interface class is a general purpose solver for Sparse Conditional Constant Propaga...
Definition SCCPSolver.h:66
LLVM_ABI void visitCall(CallInst &I)
LLVM_ABI ~SCCPSolver()
LLVM_ABI void resetLatticeValueFor(CallBase *Call)
Invalidate the Lattice Value of Call and its users after specializing the call.
LLVM_ABI void trackValueOfGlobalVariable(GlobalVariable *GV)
trackValueOfGlobalVariable - Clients can use this method to inform the SCCPSolver that it should trac...
LLVM_ABI bool tryToReplaceWithConstant(Value *V)
LLVM_ABI void inferArgAttributes() const
LLVM_ABI bool isStructLatticeConstant(Function *F, StructType *STy)
LLVM_ABI void addPredicateInfo(Function &F, DominatorTree &DT, AssumptionCache &AC)
LLVM_ABI void solve()
Solve - Solve for constants and executable blocks.
LLVM_ABI void visit(Instruction *I)
LLVM_ABI void trackValueOfArgument(Argument *V)
trackValueOfArgument - Mark the specified argument overdefined unless it have range attribute.
LLVM_ABI const DenseMap< GlobalVariable *, ValueLatticeElement > & getTrackedGlobals() const
getTrackedGlobals - Get and return the set of inferred initializers for global variables.
LLVM_ABI void addTrackedFunction(Function *F)
addTrackedFunction - If the SCCP solver is supposed to track calls into and out of the specified func...
LLVM_ABI void solveWhileResolvedUndefsIn(Module &M)
LLVM_ABI const PredicateBase * getPredicateInfoFor(Instruction *I)
LLVM_ABI const SmallPtrSetImpl< Function * > & getArgumentTrackedFunctions() const
LLVM_ABI const SmallPtrSet< Function *, 16 > & getMRVFunctionsTracked() const
getMRVFunctionsTracked - Get the set of functions which return multiple values tracked by the pass.
LLVM_ABI bool resolvedUndefsIn(Function &F)
resolvedUndefsIn - While solving the dataflow for a function, we assume that branches on undef values...
LLVM_ABI void addArgumentTrackedFunction(Function *F)
LLVM_ABI void solveWhileResolvedUndefs()
LLVM_ABI void removeLatticeValueFor(Value *V)
LLVM_ABI std::vector< ValueLatticeElement > getStructLatticeValueFor(Value *V) const
LLVM_ABI Constant * getConstantOrNull(Value *V) const
Return either a Constant or nullptr for a given Value.
LLVM_ABI bool simplifyInstsInBlock(BasicBlock &BB, SmallPtrSetImpl< Value * > &InsertedValues, Statistic &InstRemovedStat, Statistic &InstReplacedStat)
LLVM_ABI Constant * getConstant(const ValueLatticeElement &LV, Type *Ty) const
Helper to return a Constant if LV is either a constant or a constant range with a single element.
LLVM_ABI const ValueLatticeElement & getLatticeValueFor(Value *V) const
LLVM_ABI void addToMustPreserveReturnsInFunctions(Function *F)
Add function to the list of functions whose return cannot be modified.
LLVM_ABI bool removeNonFeasibleEdges(BasicBlock *BB, DomTreeUpdater &DTU, BasicBlock *&NewUnreachableBB) const
LLVM_ABI bool isBlockExecutable(BasicBlock *BB) const
LLVM_ABI void inferReturnAttributes() const
LLVM_ABI bool markBlockExecutable(BasicBlock *BB)
markBlockExecutable - This method can be used by clients to mark all of the blocks that are known to ...
LLVM_ABI void setLatticeValueForSpecializationArguments(Function *F, const SmallVectorImpl< ArgInfo > &Args)
Set the Lattice Value for the arguments of a specialization F.
static LLVM_ABI bool isConstant(const ValueLatticeElement &LV)
LLVM_ABI const MapVector< Function *, ValueLatticeElement > & getTrackedRetVals() const
getTrackedRetVals - Get the inferred return value map.
LLVM_ABI bool isEdgeFeasible(BasicBlock *From, BasicBlock *To) const
LLVM_ABI bool mustPreserveReturn(Function *F)
Returns true if the return of the given function cannot be modified.
static LLVM_ABI bool isOverdefined(const ValueLatticeElement &LV)
LLVM_ABI void markFunctionUnreachable(Function *F)
Mark all of the blocks in function F non-executable.
LLVM_ABI bool isArgumentTrackedFunction(Function *F)
Returns true if the given function is in the solver's set of argument-tracked functions.
LLVM_ABI SCCPSolver(const DataLayout &DL, std::function< const TargetLibraryInfo &(Function &)> GetTLI, LLVMContext &Ctx)
LLVM_ABI void markOverdefined(Value *V)
markOverdefined - Mark the specified value overdefined.
LLVM_ABI void removeSSACopies(Function &F)
This class represents the LLVM 'select' instruction.
size_type size() const
Definition SmallPtrSet.h:99
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
iterator begin() const
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:339
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Class to represent struct types.
unsigned getNumElements() const
Random access to the elements.
A wrapper class to simplify modification of SwitchInst cases along with their prof branch_weights met...
Provides information about what library functions are available for the current target.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:46
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:284
bool isSingleValueType() const
Return true if the type is a valid type for a register in codegen.
Definition Type.h:313
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:278
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:236
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:141
static UncondBrInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
This function has undefined behavior.
Value * getOperand(unsigned i) const
Definition User.h:207
This class represents lattice values for constants.
static ValueLatticeElement getRange(ConstantRange CR, bool MayIncludeUndef=false)
LLVM_ABI Constant * getCompare(CmpInst::Predicate Pred, Type *Ty, const ValueLatticeElement &Other, const DataLayout &DL) const
true, false or undef constants, or nullptr if the comparison cannot be evaluated.
bool isConstantRangeIncludingUndef() const
static ValueLatticeElement getNot(Constant *C)
ConstantRange asConstantRange(unsigned BW, bool UndefAllowed=false) const
void setNumRangeExtensions(unsigned N)
const ConstantRange & getConstantRange(bool UndefAllowed=true) const
Returns the constant range for this value.
bool isConstantRange(bool UndefAllowed=true) const
Returns true if this value is a constant range.
static ValueLatticeElement get(Constant *C)
unsigned getNumRangeExtensions() const
Constant * getNotConstant() const
LLVM_ABI ValueLatticeElement intersect(const ValueLatticeElement &Other) const
Combine two sets of facts about the same value into a single set of facts.
Constant * getConstant() const
bool mergeIn(const ValueLatticeElement &RHS, MergeOptions Opts=MergeOptions())
Updates this object to approximate both this object and RHS.
bool markConstant(Constant *V, bool MayIncludeUndef=false)
static ValueLatticeElement getOverdefined()
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI std::string getNameOrAsOperand() const
Definition Value.cpp:464
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:427
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
Represents an op.with.overflow intrinsic.
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
CallInst * Call
Changed
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
Definition APInt.h:2273
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
OneUse_match< SubPat > m_OneUse(const SubPat &SP)
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_negated_power2 > m_NegatedPower2()
Match a integer or vector negated power-of-2.
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1739
static bool replaceSignedInst(SCCPSolver &Solver, SmallPtrSetImpl< Value * > &InsertedValues, Instruction &Inst)
Try to replace signed instructions with their unsigned equivalent.
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
auto successors(const MachineBasicBlock *BB)
static ConstantRange getRange(Value *Op, SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues)
Helper for getting ranges from Solver.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:634
LLVM_ABI Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:753
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1746
NoopStatistic Statistic
Definition Statistic.h:162
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI bool wouldInstructionBeTriviallyDead(const Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction would have no side effects if it was not used.
Definition Local.cpp:422
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Sub
Subtraction of integers.
DWARFExpression::Operation Op
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1917
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
static bool refineInstruction(SCCPSolver &Solver, const SmallPtrSetImpl< Value * > &InsertedValues, Instruction &Inst)
Try to use Inst's value range from Solver to infer the NUW flag.
static void inferAttribute(Function *F, unsigned AttrIndex, const ValueLatticeElement &Val)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
Struct to control some aspects related to merging constant ranges.
MergeOptions & setMaxWidenSteps(unsigned Steps=1)