LLVM 19.0.0git
SimplifyIndVar.cpp
Go to the documentation of this file.
1//===-- SimplifyIndVar.cpp - Induction variable simplification ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements induction variable simplification. It does
10// not define any actual pass or policy, but provides a single function to
11// simplify a loop's induction variables based on ScalarEvolution.
12//
13//===----------------------------------------------------------------------===//
14
17#include "llvm/ADT/Statistic.h"
20#include "llvm/IR/Dominators.h"
21#include "llvm/IR/IRBuilder.h"
25#include "llvm/Support/Debug.h"
30
31using namespace llvm;
32using namespace llvm::PatternMatch;
33
34#define DEBUG_TYPE "indvars"
35
36STATISTIC(NumElimIdentity, "Number of IV identities eliminated");
37STATISTIC(NumElimOperand, "Number of IV operands folded into a use");
38STATISTIC(NumFoldedUser, "Number of IV users folded into a constant");
39STATISTIC(NumElimRem , "Number of IV remainder operations eliminated");
41 NumSimplifiedSDiv,
42 "Number of IV signed division operations converted to unsigned division");
44 NumSimplifiedSRem,
45 "Number of IV signed remainder operations converted to unsigned remainder");
46STATISTIC(NumElimCmp , "Number of IV comparisons eliminated");
47
48namespace {
49 /// This is a utility for simplifying induction variables
50 /// based on ScalarEvolution. It is the primary instrument of the
51 /// IndvarSimplify pass, but it may also be directly invoked to cleanup after
52 /// other loop passes that preserve SCEV.
53 class SimplifyIndvar {
54 Loop *L;
55 LoopInfo *LI;
57 DominatorTree *DT;
61
62 bool Changed = false;
63 bool RunUnswitching = false;
64
65 public:
66 SimplifyIndvar(Loop *Loop, ScalarEvolution *SE, DominatorTree *DT,
68 SCEVExpander &Rewriter,
70 : L(Loop), LI(LI), SE(SE), DT(DT), TTI(TTI), Rewriter(Rewriter),
71 DeadInsts(Dead) {
72 assert(LI && "IV simplification requires LoopInfo");
73 }
74
75 bool hasChanged() const { return Changed; }
76 bool runUnswitching() const { return RunUnswitching; }
77
78 /// Iteratively perform simplification on a worklist of users of the
79 /// specified induction variable. This is the top-level driver that applies
80 /// all simplifications to users of an IV.
81 void simplifyUsers(PHINode *CurrIV, IVVisitor *V = nullptr);
82
83 void pushIVUsers(Instruction *Def,
85 SmallVectorImpl<std::pair<Instruction *, Instruction *>>
86 &SimpleIVUsers);
87
88 Value *foldIVUser(Instruction *UseInst, Instruction *IVOperand);
89
90 bool eliminateIdentitySCEV(Instruction *UseInst, Instruction *IVOperand);
91 bool replaceIVUserWithLoopInvariant(Instruction *UseInst);
92 bool replaceFloatIVWithIntegerIV(Instruction *UseInst);
93
94 bool eliminateOverflowIntrinsic(WithOverflowInst *WO);
95 bool eliminateSaturatingIntrinsic(SaturatingInst *SI);
96 bool eliminateTrunc(TruncInst *TI);
97 bool eliminateIVUser(Instruction *UseInst, Instruction *IVOperand);
98 bool makeIVComparisonInvariant(ICmpInst *ICmp, Instruction *IVOperand);
99 void eliminateIVComparison(ICmpInst *ICmp, Instruction *IVOperand);
100 void simplifyIVRemainder(BinaryOperator *Rem, Instruction *IVOperand,
101 bool IsSigned);
102 void replaceRemWithNumerator(BinaryOperator *Rem);
103 void replaceRemWithNumeratorOrZero(BinaryOperator *Rem);
104 void replaceSRemWithURem(BinaryOperator *Rem);
105 bool eliminateSDiv(BinaryOperator *SDiv);
106 bool strengthenBinaryOp(BinaryOperator *BO, Instruction *IVOperand);
107 bool strengthenOverflowingOperation(BinaryOperator *OBO,
108 Instruction *IVOperand);
109 bool strengthenRightShift(BinaryOperator *BO, Instruction *IVOperand);
110 };
111}
112
113/// Find a point in code which dominates all given instructions. We can safely
114/// assume that, whatever fact we can prove at the found point, this fact is
115/// also true for each of the given instructions.
117 DominatorTree &DT) {
118 Instruction *CommonDom = nullptr;
119 for (auto *Insn : Instructions)
120 CommonDom =
121 CommonDom ? DT.findNearestCommonDominator(CommonDom, Insn) : Insn;
122 assert(CommonDom && "Common dominator not found?");
123 return CommonDom;
124}
125
126/// Fold an IV operand into its use. This removes increments of an
127/// aligned IV when used by a instruction that ignores the low bits.
128///
129/// IVOperand is guaranteed SCEVable, but UseInst may not be.
130///
131/// Return the operand of IVOperand for this induction variable if IVOperand can
132/// be folded (in case more folding opportunities have been exposed).
133/// Otherwise return null.
134Value *SimplifyIndvar::foldIVUser(Instruction *UseInst, Instruction *IVOperand) {
135 Value *IVSrc = nullptr;
136 const unsigned OperIdx = 0;
137 const SCEV *FoldedExpr = nullptr;
138 bool MustDropExactFlag = false;
139 switch (UseInst->getOpcode()) {
140 default:
141 return nullptr;
142 case Instruction::UDiv:
143 case Instruction::LShr:
144 // We're only interested in the case where we know something about
145 // the numerator and have a constant denominator.
146 if (IVOperand != UseInst->getOperand(OperIdx) ||
147 !isa<ConstantInt>(UseInst->getOperand(1)))
148 return nullptr;
149
150 // Attempt to fold a binary operator with constant operand.
151 // e.g. ((I + 1) >> 2) => I >> 2
152 if (!isa<BinaryOperator>(IVOperand)
153 || !isa<ConstantInt>(IVOperand->getOperand(1)))
154 return nullptr;
155
156 IVSrc = IVOperand->getOperand(0);
157 // IVSrc must be the (SCEVable) IV, since the other operand is const.
158 assert(SE->isSCEVable(IVSrc->getType()) && "Expect SCEVable IV operand");
159
160 ConstantInt *D = cast<ConstantInt>(UseInst->getOperand(1));
161 if (UseInst->getOpcode() == Instruction::LShr) {
162 // Get a constant for the divisor. See createSCEV.
163 uint32_t BitWidth = cast<IntegerType>(UseInst->getType())->getBitWidth();
164 if (D->getValue().uge(BitWidth))
165 return nullptr;
166
167 D = ConstantInt::get(UseInst->getContext(),
168 APInt::getOneBitSet(BitWidth, D->getZExtValue()));
169 }
170 const auto *LHS = SE->getSCEV(IVSrc);
171 const auto *RHS = SE->getSCEV(D);
172 FoldedExpr = SE->getUDivExpr(LHS, RHS);
173 // We might have 'exact' flag set at this point which will no longer be
174 // correct after we make the replacement.
175 if (UseInst->isExact() && LHS != SE->getMulExpr(FoldedExpr, RHS))
176 MustDropExactFlag = true;
177 }
178 // We have something that might fold it's operand. Compare SCEVs.
179 if (!SE->isSCEVable(UseInst->getType()))
180 return nullptr;
181
182 // Bypass the operand if SCEV can prove it has no effect.
183 if (SE->getSCEV(UseInst) != FoldedExpr)
184 return nullptr;
185
186 LLVM_DEBUG(dbgs() << "INDVARS: Eliminated IV operand: " << *IVOperand
187 << " -> " << *UseInst << '\n');
188
189 UseInst->setOperand(OperIdx, IVSrc);
190 assert(SE->getSCEV(UseInst) == FoldedExpr && "bad SCEV with folded oper");
191
192 if (MustDropExactFlag)
193 UseInst->dropPoisonGeneratingFlags();
194
195 ++NumElimOperand;
196 Changed = true;
197 if (IVOperand->use_empty())
198 DeadInsts.emplace_back(IVOperand);
199 return IVSrc;
200}
201
202bool SimplifyIndvar::makeIVComparisonInvariant(ICmpInst *ICmp,
203 Instruction *IVOperand) {
204 auto *Preheader = L->getLoopPreheader();
205 if (!Preheader)
206 return false;
207 unsigned IVOperIdx = 0;
208 ICmpInst::Predicate Pred = ICmp->getPredicate();
209 if (IVOperand != ICmp->getOperand(0)) {
210 // Swapped
211 assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
212 IVOperIdx = 1;
213 Pred = ICmpInst::getSwappedPredicate(Pred);
214 }
215
216 // Get the SCEVs for the ICmp operands (in the specific context of the
217 // current loop)
218 const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
219 const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
220 const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
221 auto LIP = SE->getLoopInvariantPredicate(Pred, S, X, L, ICmp);
222 if (!LIP)
223 return false;
224 ICmpInst::Predicate InvariantPredicate = LIP->Pred;
225 const SCEV *InvariantLHS = LIP->LHS;
226 const SCEV *InvariantRHS = LIP->RHS;
227
228 // Do not generate something ridiculous.
229 auto *PHTerm = Preheader->getTerminator();
230 if (Rewriter.isHighCostExpansion({InvariantLHS, InvariantRHS}, L,
231 2 * SCEVCheapExpansionBudget, TTI, PHTerm) ||
232 !Rewriter.isSafeToExpandAt(InvariantLHS, PHTerm) ||
233 !Rewriter.isSafeToExpandAt(InvariantRHS, PHTerm))
234 return false;
235 auto *NewLHS =
236 Rewriter.expandCodeFor(InvariantLHS, IVOperand->getType(), PHTerm);
237 auto *NewRHS =
238 Rewriter.expandCodeFor(InvariantRHS, IVOperand->getType(), PHTerm);
239 LLVM_DEBUG(dbgs() << "INDVARS: Simplified comparison: " << *ICmp << '\n');
240 ICmp->setPredicate(InvariantPredicate);
241 ICmp->setOperand(0, NewLHS);
242 ICmp->setOperand(1, NewRHS);
243 RunUnswitching = true;
244 return true;
245}
246
247/// SimplifyIVUsers helper for eliminating useless
248/// comparisons against an induction variable.
249void SimplifyIndvar::eliminateIVComparison(ICmpInst *ICmp,
250 Instruction *IVOperand) {
251 unsigned IVOperIdx = 0;
252 ICmpInst::Predicate Pred = ICmp->getPredicate();
253 ICmpInst::Predicate OriginalPred = Pred;
254 if (IVOperand != ICmp->getOperand(0)) {
255 // Swapped
256 assert(IVOperand == ICmp->getOperand(1) && "Can't find IVOperand");
257 IVOperIdx = 1;
258 Pred = ICmpInst::getSwappedPredicate(Pred);
259 }
260
261 // Get the SCEVs for the ICmp operands (in the specific context of the
262 // current loop)
263 const Loop *ICmpLoop = LI->getLoopFor(ICmp->getParent());
264 const SCEV *S = SE->getSCEVAtScope(ICmp->getOperand(IVOperIdx), ICmpLoop);
265 const SCEV *X = SE->getSCEVAtScope(ICmp->getOperand(1 - IVOperIdx), ICmpLoop);
266
267 // If the condition is always true or always false in the given context,
268 // replace it with a constant value.
270 for (auto *U : ICmp->users())
271 Users.push_back(cast<Instruction>(U));
272 const Instruction *CtxI = findCommonDominator(Users, *DT);
273 if (auto Ev = SE->evaluatePredicateAt(Pred, S, X, CtxI)) {
274 SE->forgetValue(ICmp);
276 DeadInsts.emplace_back(ICmp);
277 LLVM_DEBUG(dbgs() << "INDVARS: Eliminated comparison: " << *ICmp << '\n');
278 } else if (makeIVComparisonInvariant(ICmp, IVOperand)) {
279 // fallthrough to end of function
280 } else if (ICmpInst::isSigned(OriginalPred) &&
281 SE->isKnownNonNegative(S) && SE->isKnownNonNegative(X)) {
282 // If we were unable to make anything above, all we can is to canonicalize
283 // the comparison hoping that it will open the doors for other
284 // optimizations. If we find out that we compare two non-negative values,
285 // we turn the instruction's predicate to its unsigned version. Note that
286 // we cannot rely on Pred here unless we check if we have swapped it.
287 assert(ICmp->getPredicate() == OriginalPred && "Predicate changed?");
288 LLVM_DEBUG(dbgs() << "INDVARS: Turn to unsigned comparison: " << *ICmp
289 << '\n');
290 ICmp->setPredicate(ICmpInst::getUnsignedPredicate(OriginalPred));
291 } else
292 return;
293
294 ++NumElimCmp;
295 Changed = true;
296}
297
298bool SimplifyIndvar::eliminateSDiv(BinaryOperator *SDiv) {
299 // Get the SCEVs for the ICmp operands.
300 auto *N = SE->getSCEV(SDiv->getOperand(0));
301 auto *D = SE->getSCEV(SDiv->getOperand(1));
302
303 // Simplify unnecessary loops away.
304 const Loop *L = LI->getLoopFor(SDiv->getParent());
305 N = SE->getSCEVAtScope(N, L);
306 D = SE->getSCEVAtScope(D, L);
307
308 // Replace sdiv by udiv if both of the operands are non-negative
309 if (SE->isKnownNonNegative(N) && SE->isKnownNonNegative(D)) {
310 auto *UDiv = BinaryOperator::Create(
311 BinaryOperator::UDiv, SDiv->getOperand(0), SDiv->getOperand(1),
312 SDiv->getName() + ".udiv", SDiv->getIterator());
313 UDiv->setIsExact(SDiv->isExact());
314 SDiv->replaceAllUsesWith(UDiv);
315 UDiv->setDebugLoc(SDiv->getDebugLoc());
316 LLVM_DEBUG(dbgs() << "INDVARS: Simplified sdiv: " << *SDiv << '\n');
317 ++NumSimplifiedSDiv;
318 Changed = true;
319 DeadInsts.push_back(SDiv);
320 return true;
321 }
322
323 return false;
324}
325
326// i %s n -> i %u n if i >= 0 and n >= 0
327void SimplifyIndvar::replaceSRemWithURem(BinaryOperator *Rem) {
328 auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
329 auto *URem = BinaryOperator::Create(BinaryOperator::URem, N, D,
330 Rem->getName() + ".urem", Rem->getIterator());
331 Rem->replaceAllUsesWith(URem);
332 URem->setDebugLoc(Rem->getDebugLoc());
333 LLVM_DEBUG(dbgs() << "INDVARS: Simplified srem: " << *Rem << '\n');
334 ++NumSimplifiedSRem;
335 Changed = true;
336 DeadInsts.emplace_back(Rem);
337}
338
339// i % n --> i if i is in [0,n).
340void SimplifyIndvar::replaceRemWithNumerator(BinaryOperator *Rem) {
341 Rem->replaceAllUsesWith(Rem->getOperand(0));
342 LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
343 ++NumElimRem;
344 Changed = true;
345 DeadInsts.emplace_back(Rem);
346}
347
348// (i+1) % n --> (i+1)==n?0:(i+1) if i is in [0,n).
349void SimplifyIndvar::replaceRemWithNumeratorOrZero(BinaryOperator *Rem) {
350 auto *T = Rem->getType();
351 auto *N = Rem->getOperand(0), *D = Rem->getOperand(1);
352 ICmpInst *ICmp = new ICmpInst(Rem->getIterator(), ICmpInst::ICMP_EQ, N, D);
353 SelectInst *Sel =
354 SelectInst::Create(ICmp, ConstantInt::get(T, 0), N, "iv.rem", Rem->getIterator());
355 Rem->replaceAllUsesWith(Sel);
356 Sel->setDebugLoc(Rem->getDebugLoc());
357 LLVM_DEBUG(dbgs() << "INDVARS: Simplified rem: " << *Rem << '\n');
358 ++NumElimRem;
359 Changed = true;
360 DeadInsts.emplace_back(Rem);
361}
362
363/// SimplifyIVUsers helper for eliminating useless remainder operations
364/// operating on an induction variable or replacing srem by urem.
365void SimplifyIndvar::simplifyIVRemainder(BinaryOperator *Rem,
366 Instruction *IVOperand,
367 bool IsSigned) {
368 auto *NValue = Rem->getOperand(0);
369 auto *DValue = Rem->getOperand(1);
370 // We're only interested in the case where we know something about
371 // the numerator, unless it is a srem, because we want to replace srem by urem
372 // in general.
373 bool UsedAsNumerator = IVOperand == NValue;
374 if (!UsedAsNumerator && !IsSigned)
375 return;
376
377 const SCEV *N = SE->getSCEV(NValue);
378
379 // Simplify unnecessary loops away.
380 const Loop *ICmpLoop = LI->getLoopFor(Rem->getParent());
381 N = SE->getSCEVAtScope(N, ICmpLoop);
382
383 bool IsNumeratorNonNegative = !IsSigned || SE->isKnownNonNegative(N);
384
385 // Do not proceed if the Numerator may be negative
386 if (!IsNumeratorNonNegative)
387 return;
388
389 const SCEV *D = SE->getSCEV(DValue);
390 D = SE->getSCEVAtScope(D, ICmpLoop);
391
392 if (UsedAsNumerator) {
393 auto LT = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
394 if (SE->isKnownPredicate(LT, N, D)) {
395 replaceRemWithNumerator(Rem);
396 return;
397 }
398
399 auto *T = Rem->getType();
400 const auto *NLessOne = SE->getMinusSCEV(N, SE->getOne(T));
401 if (SE->isKnownPredicate(LT, NLessOne, D)) {
402 replaceRemWithNumeratorOrZero(Rem);
403 return;
404 }
405 }
406
407 // Try to replace SRem with URem, if both N and D are known non-negative.
408 // Since we had already check N, we only need to check D now
409 if (!IsSigned || !SE->isKnownNonNegative(D))
410 return;
411
412 replaceSRemWithURem(Rem);
413}
414
415bool SimplifyIndvar::eliminateOverflowIntrinsic(WithOverflowInst *WO) {
416 const SCEV *LHS = SE->getSCEV(WO->getLHS());
417 const SCEV *RHS = SE->getSCEV(WO->getRHS());
418 if (!SE->willNotOverflow(WO->getBinaryOp(), WO->isSigned(), LHS, RHS))
419 return false;
420
421 // Proved no overflow, nuke the overflow check and, if possible, the overflow
422 // intrinsic as well.
423
425 WO->getBinaryOp(), WO->getLHS(), WO->getRHS(), "", WO->getIterator());
426
427 if (WO->isSigned())
428 NewResult->setHasNoSignedWrap(true);
429 else
430 NewResult->setHasNoUnsignedWrap(true);
431
433
434 for (auto *U : WO->users()) {
435 if (auto *EVI = dyn_cast<ExtractValueInst>(U)) {
436 if (EVI->getIndices()[0] == 1)
437 EVI->replaceAllUsesWith(ConstantInt::getFalse(WO->getContext()));
438 else {
439 assert(EVI->getIndices()[0] == 0 && "Only two possibilities!");
440 EVI->replaceAllUsesWith(NewResult);
441 NewResult->setDebugLoc(EVI->getDebugLoc());
442 }
443 ToDelete.push_back(EVI);
444 }
445 }
446
447 for (auto *EVI : ToDelete)
448 EVI->eraseFromParent();
449
450 if (WO->use_empty())
451 WO->eraseFromParent();
452
453 Changed = true;
454 return true;
455}
456
457bool SimplifyIndvar::eliminateSaturatingIntrinsic(SaturatingInst *SI) {
458 const SCEV *LHS = SE->getSCEV(SI->getLHS());
459 const SCEV *RHS = SE->getSCEV(SI->getRHS());
460 if (!SE->willNotOverflow(SI->getBinaryOp(), SI->isSigned(), LHS, RHS))
461 return false;
462
464 SI->getBinaryOp(), SI->getLHS(), SI->getRHS(), SI->getName(), SI->getIterator());
465 if (SI->isSigned())
466 BO->setHasNoSignedWrap();
467 else
469
470 SI->replaceAllUsesWith(BO);
471 BO->setDebugLoc(SI->getDebugLoc());
472 DeadInsts.emplace_back(SI);
473 Changed = true;
474 return true;
475}
476
477bool SimplifyIndvar::eliminateTrunc(TruncInst *TI) {
478 // It is always legal to replace
479 // icmp <pred> i32 trunc(iv), n
480 // with
481 // icmp <pred> i64 sext(trunc(iv)), sext(n), if pred is signed predicate.
482 // Or with
483 // icmp <pred> i64 zext(trunc(iv)), zext(n), if pred is unsigned predicate.
484 // Or with either of these if pred is an equality predicate.
485 //
486 // If we can prove that iv == sext(trunc(iv)) or iv == zext(trunc(iv)) for
487 // every comparison which uses trunc, it means that we can replace each of
488 // them with comparison of iv against sext/zext(n). We no longer need trunc
489 // after that.
490 //
491 // TODO: Should we do this if we can widen *some* comparisons, but not all
492 // of them? Sometimes it is enough to enable other optimizations, but the
493 // trunc instruction will stay in the loop.
494 Value *IV = TI->getOperand(0);
495 Type *IVTy = IV->getType();
496 const SCEV *IVSCEV = SE->getSCEV(IV);
497 const SCEV *TISCEV = SE->getSCEV(TI);
498
499 // Check if iv == zext(trunc(iv)) and if iv == sext(trunc(iv)). If so, we can
500 // get rid of trunc
501 bool DoesSExtCollapse = false;
502 bool DoesZExtCollapse = false;
503 if (IVSCEV == SE->getSignExtendExpr(TISCEV, IVTy))
504 DoesSExtCollapse = true;
505 if (IVSCEV == SE->getZeroExtendExpr(TISCEV, IVTy))
506 DoesZExtCollapse = true;
507
508 // If neither sext nor zext does collapse, it is not profitable to do any
509 // transform. Bail.
510 if (!DoesSExtCollapse && !DoesZExtCollapse)
511 return false;
512
513 // Collect users of the trunc that look like comparisons against invariants.
514 // Bail if we find something different.
516 for (auto *U : TI->users()) {
517 // We don't care about users in unreachable blocks.
518 if (isa<Instruction>(U) &&
519 !DT->isReachableFromEntry(cast<Instruction>(U)->getParent()))
520 continue;
521 ICmpInst *ICI = dyn_cast<ICmpInst>(U);
522 if (!ICI) return false;
523 assert(L->contains(ICI->getParent()) && "LCSSA form broken?");
524 if (!(ICI->getOperand(0) == TI && L->isLoopInvariant(ICI->getOperand(1))) &&
525 !(ICI->getOperand(1) == TI && L->isLoopInvariant(ICI->getOperand(0))))
526 return false;
527 // If we cannot get rid of trunc, bail.
528 if (ICI->isSigned() && !DoesSExtCollapse)
529 return false;
530 if (ICI->isUnsigned() && !DoesZExtCollapse)
531 return false;
532 // For equality, either signed or unsigned works.
533 ICmpUsers.push_back(ICI);
534 }
535
536 auto CanUseZExt = [&](ICmpInst *ICI) {
537 // Unsigned comparison can be widened as unsigned.
538 if (ICI->isUnsigned())
539 return true;
540 // Is it profitable to do zext?
541 if (!DoesZExtCollapse)
542 return false;
543 // For equality, we can safely zext both parts.
544 if (ICI->isEquality())
545 return true;
546 // Otherwise we can only use zext when comparing two non-negative or two
547 // negative values. But in practice, we will never pass DoesZExtCollapse
548 // check for a negative value, because zext(trunc(x)) is non-negative. So
549 // it only make sense to check for non-negativity here.
550 const SCEV *SCEVOP1 = SE->getSCEV(ICI->getOperand(0));
551 const SCEV *SCEVOP2 = SE->getSCEV(ICI->getOperand(1));
552 return SE->isKnownNonNegative(SCEVOP1) && SE->isKnownNonNegative(SCEVOP2);
553 };
554 // Replace all comparisons against trunc with comparisons against IV.
555 for (auto *ICI : ICmpUsers) {
556 bool IsSwapped = L->isLoopInvariant(ICI->getOperand(0));
557 auto *Op1 = IsSwapped ? ICI->getOperand(0) : ICI->getOperand(1);
558 IRBuilder<> Builder(ICI);
559 Value *Ext = nullptr;
560 // For signed/unsigned predicate, replace the old comparison with comparison
561 // of immediate IV against sext/zext of the invariant argument. If we can
562 // use either sext or zext (i.e. we are dealing with equality predicate),
563 // then prefer zext as a more canonical form.
564 // TODO: If we see a signed comparison which can be turned into unsigned,
565 // we can do it here for canonicalization purposes.
566 ICmpInst::Predicate Pred = ICI->getPredicate();
567 if (IsSwapped) Pred = ICmpInst::getSwappedPredicate(Pred);
568 if (CanUseZExt(ICI)) {
569 assert(DoesZExtCollapse && "Unprofitable zext?");
570 Ext = Builder.CreateZExt(Op1, IVTy, "zext");
572 } else {
573 assert(DoesSExtCollapse && "Unprofitable sext?");
574 Ext = Builder.CreateSExt(Op1, IVTy, "sext");
575 assert(Pred == ICmpInst::getSignedPredicate(Pred) && "Must be signed!");
576 }
577 bool Changed;
578 L->makeLoopInvariant(Ext, Changed);
579 (void)Changed;
580 auto *NewCmp = Builder.CreateICmp(Pred, IV, Ext);
581 ICI->replaceAllUsesWith(NewCmp);
582 DeadInsts.emplace_back(ICI);
583 }
584
585 // Trunc no longer needed.
587 DeadInsts.emplace_back(TI);
588 return true;
589}
590
591/// Eliminate an operation that consumes a simple IV and has no observable
592/// side-effect given the range of IV values. IVOperand is guaranteed SCEVable,
593/// but UseInst may not be.
594bool SimplifyIndvar::eliminateIVUser(Instruction *UseInst,
595 Instruction *IVOperand) {
596 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
597 eliminateIVComparison(ICmp, IVOperand);
598 return true;
599 }
600 if (BinaryOperator *Bin = dyn_cast<BinaryOperator>(UseInst)) {
601 bool IsSRem = Bin->getOpcode() == Instruction::SRem;
602 if (IsSRem || Bin->getOpcode() == Instruction::URem) {
603 simplifyIVRemainder(Bin, IVOperand, IsSRem);
604 return true;
605 }
606
607 if (Bin->getOpcode() == Instruction::SDiv)
608 return eliminateSDiv(Bin);
609 }
610
611 if (auto *WO = dyn_cast<WithOverflowInst>(UseInst))
612 if (eliminateOverflowIntrinsic(WO))
613 return true;
614
615 if (auto *SI = dyn_cast<SaturatingInst>(UseInst))
616 if (eliminateSaturatingIntrinsic(SI))
617 return true;
618
619 if (auto *TI = dyn_cast<TruncInst>(UseInst))
620 if (eliminateTrunc(TI))
621 return true;
622
623 if (eliminateIdentitySCEV(UseInst, IVOperand))
624 return true;
625
626 return false;
627}
628
630 if (auto *BB = L->getLoopPreheader())
631 return BB->getTerminator();
632
633 return Hint;
634}
635
636/// Replace the UseInst with a loop invariant expression if it is safe.
637bool SimplifyIndvar::replaceIVUserWithLoopInvariant(Instruction *I) {
638 if (!SE->isSCEVable(I->getType()))
639 return false;
640
641 // Get the symbolic expression for this instruction.
642 const SCEV *S = SE->getSCEV(I);
643
644 if (!SE->isLoopInvariant(S, L))
645 return false;
646
647 // Do not generate something ridiculous even if S is loop invariant.
648 if (Rewriter.isHighCostExpansion(S, L, SCEVCheapExpansionBudget, TTI, I))
649 return false;
650
651 auto *IP = GetLoopInvariantInsertPosition(L, I);
652
653 if (!Rewriter.isSafeToExpandAt(S, IP)) {
654 LLVM_DEBUG(dbgs() << "INDVARS: Can not replace IV user: " << *I
655 << " with non-speculable loop invariant: " << *S << '\n');
656 return false;
657 }
658
659 auto *Invariant = Rewriter.expandCodeFor(S, I->getType(), IP);
660 bool NeedToEmitLCSSAPhis = false;
661 if (!LI->replacementPreservesLCSSAForm(I, Invariant))
662 NeedToEmitLCSSAPhis = true;
663
664 I->replaceAllUsesWith(Invariant);
665 LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *I
666 << " with loop invariant: " << *S << '\n');
667
668 if (NeedToEmitLCSSAPhis) {
669 SmallVector<Instruction *, 1> NeedsLCSSAPhis;
670 NeedsLCSSAPhis.push_back(cast<Instruction>(Invariant));
671 formLCSSAForInstructions(NeedsLCSSAPhis, *DT, *LI, SE);
672 LLVM_DEBUG(dbgs() << " INDVARS: Replacement breaks LCSSA form"
673 << " inserting LCSSA Phis" << '\n');
674 }
675 ++NumFoldedUser;
676 Changed = true;
677 DeadInsts.emplace_back(I);
678 return true;
679}
680
681/// Eliminate redundant type cast between integer and float.
682bool SimplifyIndvar::replaceFloatIVWithIntegerIV(Instruction *UseInst) {
683 if (UseInst->getOpcode() != CastInst::SIToFP &&
684 UseInst->getOpcode() != CastInst::UIToFP)
685 return false;
686
687 Instruction *IVOperand = cast<Instruction>(UseInst->getOperand(0));
688 // Get the symbolic expression for this instruction.
689 const SCEV *IV = SE->getSCEV(IVOperand);
690 int MaskBits;
691 if (UseInst->getOpcode() == CastInst::SIToFP)
692 MaskBits = (int)SE->getSignedRange(IV).getMinSignedBits();
693 else
694 MaskBits = (int)SE->getUnsignedRange(IV).getActiveBits();
695 int DestNumSigBits = UseInst->getType()->getFPMantissaWidth();
696 if (MaskBits <= DestNumSigBits) {
697 for (User *U : UseInst->users()) {
698 // Match for fptosi/fptoui of sitofp and with same type.
699 auto *CI = dyn_cast<CastInst>(U);
700 if (!CI)
701 continue;
702
703 CastInst::CastOps Opcode = CI->getOpcode();
704 if (Opcode != CastInst::FPToSI && Opcode != CastInst::FPToUI)
705 continue;
706
707 Value *Conv = nullptr;
708 if (IVOperand->getType() != CI->getType()) {
709 IRBuilder<> Builder(CI);
710 StringRef Name = IVOperand->getName();
711 // To match InstCombine logic, we only need sext if both fptosi and
712 // sitofp are used. If one of them is unsigned, then we can use zext.
713 if (SE->getTypeSizeInBits(IVOperand->getType()) >
714 SE->getTypeSizeInBits(CI->getType())) {
715 Conv = Builder.CreateTrunc(IVOperand, CI->getType(), Name + ".trunc");
716 } else if (Opcode == CastInst::FPToUI ||
717 UseInst->getOpcode() == CastInst::UIToFP) {
718 Conv = Builder.CreateZExt(IVOperand, CI->getType(), Name + ".zext");
719 } else {
720 Conv = Builder.CreateSExt(IVOperand, CI->getType(), Name + ".sext");
721 }
722 } else
723 Conv = IVOperand;
724
725 CI->replaceAllUsesWith(Conv);
726 DeadInsts.push_back(CI);
727 LLVM_DEBUG(dbgs() << "INDVARS: Replace IV user: " << *CI
728 << " with: " << *Conv << '\n');
729
730 ++NumFoldedUser;
731 Changed = true;
732 }
733 }
734
735 return Changed;
736}
737
738/// Eliminate any operation that SCEV can prove is an identity function.
739bool SimplifyIndvar::eliminateIdentitySCEV(Instruction *UseInst,
740 Instruction *IVOperand) {
741 if (!SE->isSCEVable(UseInst->getType()) ||
742 UseInst->getType() != IVOperand->getType())
743 return false;
744
745 const SCEV *UseSCEV = SE->getSCEV(UseInst);
746 if (UseSCEV != SE->getSCEV(IVOperand))
747 return false;
748
749 // getSCEV(X) == getSCEV(Y) does not guarantee that X and Y are related in the
750 // dominator tree, even if X is an operand to Y. For instance, in
751 //
752 // %iv = phi i32 {0,+,1}
753 // br %cond, label %left, label %merge
754 //
755 // left:
756 // %X = add i32 %iv, 0
757 // br label %merge
758 //
759 // merge:
760 // %M = phi (%X, %iv)
761 //
762 // getSCEV(%M) == getSCEV(%X) == {0,+,1}, but %X does not dominate %M, and
763 // %M.replaceAllUsesWith(%X) would be incorrect.
764
765 if (isa<PHINode>(UseInst))
766 // If UseInst is not a PHI node then we know that IVOperand dominates
767 // UseInst directly from the legality of SSA.
768 if (!DT || !DT->dominates(IVOperand, UseInst))
769 return false;
770
771 if (!LI->replacementPreservesLCSSAForm(UseInst, IVOperand))
772 return false;
773
774 // Make sure the operand is not more poisonous than the instruction.
775 if (!impliesPoison(IVOperand, UseInst)) {
776 SmallVector<Instruction *> DropPoisonGeneratingInsts;
777 if (!SE->canReuseInstruction(UseSCEV, IVOperand, DropPoisonGeneratingInsts))
778 return false;
779
780 for (Instruction *I : DropPoisonGeneratingInsts)
781 I->dropPoisonGeneratingAnnotations();
782 }
783
784 LLVM_DEBUG(dbgs() << "INDVARS: Eliminated identity: " << *UseInst << '\n');
785
786 SE->forgetValue(UseInst);
787 UseInst->replaceAllUsesWith(IVOperand);
788 ++NumElimIdentity;
789 Changed = true;
790 DeadInsts.emplace_back(UseInst);
791 return true;
792}
793
794bool SimplifyIndvar::strengthenBinaryOp(BinaryOperator *BO,
795 Instruction *IVOperand) {
796 return (isa<OverflowingBinaryOperator>(BO) &&
797 strengthenOverflowingOperation(BO, IVOperand)) ||
798 (isa<ShlOperator>(BO) && strengthenRightShift(BO, IVOperand));
799}
800
801/// Annotate BO with nsw / nuw if it provably does not signed-overflow /
802/// unsigned-overflow. Returns true if anything changed, false otherwise.
803bool SimplifyIndvar::strengthenOverflowingOperation(BinaryOperator *BO,
804 Instruction *IVOperand) {
805 auto Flags = SE->getStrengthenedNoWrapFlagsFromBinOp(
806 cast<OverflowingBinaryOperator>(BO));
807
808 if (!Flags)
809 return false;
810
815
816 // The getStrengthenedNoWrapFlagsFromBinOp() check inferred additional nowrap
817 // flags on addrecs while performing zero/sign extensions. We could call
818 // forgetValue() here to make sure those flags also propagate to any other
819 // SCEV expressions based on the addrec. However, this can have pathological
820 // compile-time impact, see https://bugs.llvm.org/show_bug.cgi?id=50384.
821 return true;
822}
823
824/// Annotate the Shr in (X << IVOperand) >> C as exact using the
825/// information from the IV's range. Returns true if anything changed, false
826/// otherwise.
827bool SimplifyIndvar::strengthenRightShift(BinaryOperator *BO,
828 Instruction *IVOperand) {
829 if (BO->getOpcode() == Instruction::Shl) {
830 bool Changed = false;
831 ConstantRange IVRange = SE->getUnsignedRange(SE->getSCEV(IVOperand));
832 for (auto *U : BO->users()) {
833 const APInt *C;
834 if (match(U,
835 m_AShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C))) ||
836 match(U,
837 m_LShr(m_Shl(m_Value(), m_Specific(IVOperand)), m_APInt(C)))) {
838 BinaryOperator *Shr = cast<BinaryOperator>(U);
839 if (!Shr->isExact() && IVRange.getUnsignedMin().uge(*C)) {
840 Shr->setIsExact(true);
841 Changed = true;
842 }
843 }
844 }
845 return Changed;
846 }
847
848 return false;
849}
850
851/// Add all uses of Def to the current IV's worklist.
852void SimplifyIndvar::pushIVUsers(
854 SmallVectorImpl<std::pair<Instruction *, Instruction *>> &SimpleIVUsers) {
855 for (User *U : Def->users()) {
856 Instruction *UI = cast<Instruction>(U);
857
858 // Avoid infinite or exponential worklist processing.
859 // Also ensure unique worklist users.
860 // If Def is a LoopPhi, it may not be in the Simplified set, so check for
861 // self edges first.
862 if (UI == Def)
863 continue;
864
865 // Only change the current Loop, do not change the other parts (e.g. other
866 // Loops).
867 if (!L->contains(UI))
868 continue;
869
870 // Do not push the same instruction more than once.
871 if (!Simplified.insert(UI).second)
872 continue;
873
874 SimpleIVUsers.push_back(std::make_pair(UI, Def));
875 }
876}
877
878/// Return true if this instruction generates a simple SCEV
879/// expression in terms of that IV.
880///
881/// This is similar to IVUsers' isInteresting() but processes each instruction
882/// non-recursively when the operand is already known to be a simpleIVUser.
883///
884static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE) {
885 if (!SE->isSCEVable(I->getType()))
886 return false;
887
888 // Get the symbolic expression for this instruction.
889 const SCEV *S = SE->getSCEV(I);
890
891 // Only consider affine recurrences.
892 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
893 if (AR && AR->getLoop() == L)
894 return true;
895
896 return false;
897}
898
899/// Iteratively perform simplification on a worklist of users
900/// of the specified induction variable. Each successive simplification may push
901/// more users which may themselves be candidates for simplification.
902///
903/// This algorithm does not require IVUsers analysis. Instead, it simplifies
904/// instructions in-place during analysis. Rather than rewriting induction
905/// variables bottom-up from their users, it transforms a chain of IVUsers
906/// top-down, updating the IR only when it encounters a clear optimization
907/// opportunity.
908///
909/// Once DisableIVRewrite is default, LSR will be the only client of IVUsers.
910///
911void SimplifyIndvar::simplifyUsers(PHINode *CurrIV, IVVisitor *V) {
912 if (!SE->isSCEVable(CurrIV->getType()))
913 return;
914
915 // Instructions processed by SimplifyIndvar for CurrIV.
917
918 // Use-def pairs if IV users waiting to be processed for CurrIV.
920
921 // Push users of the current LoopPhi. In rare cases, pushIVUsers may be
922 // called multiple times for the same LoopPhi. This is the proper thing to
923 // do for loop header phis that use each other.
924 pushIVUsers(CurrIV, Simplified, SimpleIVUsers);
925
926 while (!SimpleIVUsers.empty()) {
927 std::pair<Instruction*, Instruction*> UseOper =
928 SimpleIVUsers.pop_back_val();
929 Instruction *UseInst = UseOper.first;
930
931 // If a user of the IndVar is trivially dead, we prefer just to mark it dead
932 // rather than try to do some complex analysis or transformation (such as
933 // widening) basing on it.
934 // TODO: Propagate TLI and pass it here to handle more cases.
935 if (isInstructionTriviallyDead(UseInst, /* TLI */ nullptr)) {
936 DeadInsts.emplace_back(UseInst);
937 continue;
938 }
939
940 // Bypass back edges to avoid extra work.
941 if (UseInst == CurrIV) continue;
942
943 // Try to replace UseInst with a loop invariant before any other
944 // simplifications.
945 if (replaceIVUserWithLoopInvariant(UseInst))
946 continue;
947
948 // Go further for the bitcast 'prtoint ptr to i64' or if the cast is done
949 // by truncation
950 if ((isa<PtrToIntInst>(UseInst)) || (isa<TruncInst>(UseInst)))
951 for (Use &U : UseInst->uses()) {
952 Instruction *User = cast<Instruction>(U.getUser());
953 if (replaceIVUserWithLoopInvariant(User))
954 break; // done replacing
955 }
956
957 Instruction *IVOperand = UseOper.second;
958 for (unsigned N = 0; IVOperand; ++N) {
959 assert(N <= Simplified.size() && "runaway iteration");
960 (void) N;
961
962 Value *NewOper = foldIVUser(UseInst, IVOperand);
963 if (!NewOper)
964 break; // done folding
965 IVOperand = dyn_cast<Instruction>(NewOper);
966 }
967 if (!IVOperand)
968 continue;
969
970 if (eliminateIVUser(UseInst, IVOperand)) {
971 pushIVUsers(IVOperand, Simplified, SimpleIVUsers);
972 continue;
973 }
974
975 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(UseInst)) {
976 if (strengthenBinaryOp(BO, IVOperand)) {
977 // re-queue uses of the now modified binary operator and fall
978 // through to the checks that remain.
979 pushIVUsers(IVOperand, Simplified, SimpleIVUsers);
980 }
981 }
982
983 // Try to use integer induction for FPToSI of float induction directly.
984 if (replaceFloatIVWithIntegerIV(UseInst)) {
985 // Re-queue the potentially new direct uses of IVOperand.
986 pushIVUsers(IVOperand, Simplified, SimpleIVUsers);
987 continue;
988 }
989
990 CastInst *Cast = dyn_cast<CastInst>(UseInst);
991 if (V && Cast) {
992 V->visitCast(Cast);
993 continue;
994 }
995 if (isSimpleIVUser(UseInst, L, SE)) {
996 pushIVUsers(UseInst, Simplified, SimpleIVUsers);
997 }
998 }
999}
1000
1001namespace llvm {
1002
1004
1005/// Simplify instructions that use this induction variable
1006/// by using ScalarEvolution to analyze the IV's recurrence.
1007/// Returns a pair where the first entry indicates that the function makes
1008/// changes and the second entry indicates that it introduced new opportunities
1009/// for loop unswitching.
1010std::pair<bool, bool> simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE,
1011 DominatorTree *DT, LoopInfo *LI,
1012 const TargetTransformInfo *TTI,
1014 SCEVExpander &Rewriter, IVVisitor *V) {
1015 SimplifyIndvar SIV(LI->getLoopFor(CurrIV->getParent()), SE, DT, LI, TTI,
1016 Rewriter, Dead);
1017 SIV.simplifyUsers(CurrIV, V);
1018 return {SIV.hasChanged(), SIV.runUnswitching()};
1019}
1020
1021/// Simplify users of induction variables within this
1022/// loop. This does not actually change or add IVs.
1024 LoopInfo *LI, const TargetTransformInfo *TTI,
1026 SCEVExpander Rewriter(*SE, SE->getDataLayout(), "indvars");
1027#ifndef NDEBUG
1028 Rewriter.setDebugType(DEBUG_TYPE);
1029#endif
1030 bool Changed = false;
1031 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) {
1032 const auto &[C, _] =
1033 simplifyUsersOfIV(cast<PHINode>(I), SE, DT, LI, TTI, Dead, Rewriter);
1034 Changed |= C;
1035 }
1036 return Changed;
1037}
1038
1039} // namespace llvm
1040
1041namespace {
1042//===----------------------------------------------------------------------===//
1043// Widen Induction Variables - Extend the width of an IV to cover its
1044// widest uses.
1045//===----------------------------------------------------------------------===//
1046
1047class WidenIV {
1048 // Parameters
1049 PHINode *OrigPhi;
1050 Type *WideType;
1051
1052 // Context
1053 LoopInfo *LI;
1054 Loop *L;
1055 ScalarEvolution *SE;
1056 DominatorTree *DT;
1057
1058 // Does the module have any calls to the llvm.experimental.guard intrinsic
1059 // at all? If not we can avoid scanning instructions looking for guards.
1060 bool HasGuards;
1061
1062 bool UsePostIncrementRanges;
1063
1064 // Statistics
1065 unsigned NumElimExt = 0;
1066 unsigned NumWidened = 0;
1067
1068 // Result
1069 PHINode *WidePhi = nullptr;
1070 Instruction *WideInc = nullptr;
1071 const SCEV *WideIncExpr = nullptr;
1073
1075
1076 enum class ExtendKind { Zero, Sign, Unknown };
1077
1078 // A map tracking the kind of extension used to widen each narrow IV
1079 // and narrow IV user.
1080 // Key: pointer to a narrow IV or IV user.
1081 // Value: the kind of extension used to widen this Instruction.
1082 DenseMap<AssertingVH<Instruction>, ExtendKind> ExtendKindMap;
1083
1084 using DefUserPair = std::pair<AssertingVH<Value>, AssertingVH<Instruction>>;
1085
1086 // A map with control-dependent ranges for post increment IV uses. The key is
1087 // a pair of IV def and a use of this def denoting the context. The value is
1088 // a ConstantRange representing possible values of the def at the given
1089 // context.
1090 DenseMap<DefUserPair, ConstantRange> PostIncRangeInfos;
1091
1092 std::optional<ConstantRange> getPostIncRangeInfo(Value *Def,
1093 Instruction *UseI) {
1094 DefUserPair Key(Def, UseI);
1095 auto It = PostIncRangeInfos.find(Key);
1096 return It == PostIncRangeInfos.end()
1097 ? std::optional<ConstantRange>(std::nullopt)
1098 : std::optional<ConstantRange>(It->second);
1099 }
1100
1101 void calculatePostIncRanges(PHINode *OrigPhi);
1102 void calculatePostIncRange(Instruction *NarrowDef, Instruction *NarrowUser);
1103
1104 void updatePostIncRangeInfo(Value *Def, Instruction *UseI, ConstantRange R) {
1105 DefUserPair Key(Def, UseI);
1106 auto It = PostIncRangeInfos.find(Key);
1107 if (It == PostIncRangeInfos.end())
1108 PostIncRangeInfos.insert({Key, R});
1109 else
1110 It->second = R.intersectWith(It->second);
1111 }
1112
1113public:
1114 /// Record a link in the Narrow IV def-use chain along with the WideIV that
1115 /// computes the same value as the Narrow IV def. This avoids caching Use*
1116 /// pointers.
1117 struct NarrowIVDefUse {
1118 Instruction *NarrowDef = nullptr;
1119 Instruction *NarrowUse = nullptr;
1120 Instruction *WideDef = nullptr;
1121
1122 // True if the narrow def is never negative. Tracking this information lets
1123 // us use a sign extension instead of a zero extension or vice versa, when
1124 // profitable and legal.
1125 bool NeverNegative = false;
1126
1127 NarrowIVDefUse(Instruction *ND, Instruction *NU, Instruction *WD,
1128 bool NeverNegative)
1129 : NarrowDef(ND), NarrowUse(NU), WideDef(WD),
1130 NeverNegative(NeverNegative) {}
1131 };
1132
1133 WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
1135 bool HasGuards, bool UsePostIncrementRanges = true);
1136
1137 PHINode *createWideIV(SCEVExpander &Rewriter);
1138
1139 unsigned getNumElimExt() { return NumElimExt; };
1140 unsigned getNumWidened() { return NumWidened; };
1141
1142protected:
1143 Value *createExtendInst(Value *NarrowOper, Type *WideType, bool IsSigned,
1144 Instruction *Use);
1145
1146 Instruction *cloneIVUser(NarrowIVDefUse DU, const SCEVAddRecExpr *WideAR);
1147 Instruction *cloneArithmeticIVUser(NarrowIVDefUse DU,
1148 const SCEVAddRecExpr *WideAR);
1149 Instruction *cloneBitwiseIVUser(NarrowIVDefUse DU);
1150
1151 ExtendKind getExtendKind(Instruction *I);
1152
1153 using WidenedRecTy = std::pair<const SCEVAddRecExpr *, ExtendKind>;
1154
1155 WidenedRecTy getWideRecurrence(NarrowIVDefUse DU);
1156
1157 WidenedRecTy getExtendedOperandRecurrence(NarrowIVDefUse DU);
1158
1159 const SCEV *getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
1160 unsigned OpCode) const;
1161
1162 Instruction *widenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter,
1163 PHINode *OrigPhi, PHINode *WidePhi);
1164 void truncateIVUse(NarrowIVDefUse DU);
1165
1166 bool widenLoopCompare(NarrowIVDefUse DU);
1167 bool widenWithVariantUse(NarrowIVDefUse DU);
1168
1169 void pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef);
1170
1171private:
1172 SmallVector<NarrowIVDefUse, 8> NarrowIVUsers;
1173};
1174} // namespace
1175
1176/// Determine the insertion point for this user. By default, insert immediately
1177/// before the user. SCEVExpander or LICM will hoist loop invariants out of the
1178/// loop. For PHI nodes, there may be multiple uses, so compute the nearest
1179/// common dominator for the incoming blocks. A nullptr can be returned if no
1180/// viable location is found: it may happen if User is a PHI and Def only comes
1181/// to this PHI from unreachable blocks.
1183 DominatorTree *DT, LoopInfo *LI) {
1184 PHINode *PHI = dyn_cast<PHINode>(User);
1185 if (!PHI)
1186 return User;
1187
1188 Instruction *InsertPt = nullptr;
1189 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i) {
1190 if (PHI->getIncomingValue(i) != Def)
1191 continue;
1192
1193 BasicBlock *InsertBB = PHI->getIncomingBlock(i);
1194
1195 if (!DT->isReachableFromEntry(InsertBB))
1196 continue;
1197
1198 if (!InsertPt) {
1199 InsertPt = InsertBB->getTerminator();
1200 continue;
1201 }
1202 InsertBB = DT->findNearestCommonDominator(InsertPt->getParent(), InsertBB);
1203 InsertPt = InsertBB->getTerminator();
1204 }
1205
1206 // If we have skipped all inputs, it means that Def only comes to Phi from
1207 // unreachable blocks.
1208 if (!InsertPt)
1209 return nullptr;
1210
1211 auto *DefI = dyn_cast<Instruction>(Def);
1212 if (!DefI)
1213 return InsertPt;
1214
1215 assert(DT->dominates(DefI, InsertPt) && "def does not dominate all uses");
1216
1217 auto *L = LI->getLoopFor(DefI->getParent());
1218 assert(!L || L->contains(LI->getLoopFor(InsertPt->getParent())));
1219
1220 for (auto *DTN = (*DT)[InsertPt->getParent()]; DTN; DTN = DTN->getIDom())
1221 if (LI->getLoopFor(DTN->getBlock()) == L)
1222 return DTN->getBlock()->getTerminator();
1223
1224 llvm_unreachable("DefI dominates InsertPt!");
1225}
1226
1227WidenIV::WidenIV(const WideIVInfo &WI, LoopInfo *LInfo, ScalarEvolution *SEv,
1229 bool HasGuards, bool UsePostIncrementRanges)
1230 : OrigPhi(WI.NarrowIV), WideType(WI.WidestNativeType), LI(LInfo),
1231 L(LI->getLoopFor(OrigPhi->getParent())), SE(SEv), DT(DTree),
1232 HasGuards(HasGuards), UsePostIncrementRanges(UsePostIncrementRanges),
1233 DeadInsts(DI) {
1234 assert(L->getHeader() == OrigPhi->getParent() && "Phi must be an IV");
1235 ExtendKindMap[OrigPhi] = WI.IsSigned ? ExtendKind::Sign : ExtendKind::Zero;
1236}
1237
1238Value *WidenIV::createExtendInst(Value *NarrowOper, Type *WideType,
1239 bool IsSigned, Instruction *Use) {
1240 // Set the debug location and conservative insertion point.
1241 IRBuilder<> Builder(Use);
1242 // Hoist the insertion point into loop preheaders as far as possible.
1243 for (const Loop *L = LI->getLoopFor(Use->getParent());
1244 L && L->getLoopPreheader() && L->isLoopInvariant(NarrowOper);
1245 L = L->getParentLoop())
1246 Builder.SetInsertPoint(L->getLoopPreheader()->getTerminator());
1247
1248 return IsSigned ? Builder.CreateSExt(NarrowOper, WideType) :
1249 Builder.CreateZExt(NarrowOper, WideType);
1250}
1251
1252/// Instantiate a wide operation to replace a narrow operation. This only needs
1253/// to handle operations that can evaluation to SCEVAddRec. It can safely return
1254/// 0 for any operation we decide not to clone.
1255Instruction *WidenIV::cloneIVUser(WidenIV::NarrowIVDefUse DU,
1256 const SCEVAddRecExpr *WideAR) {
1257 unsigned Opcode = DU.NarrowUse->getOpcode();
1258 switch (Opcode) {
1259 default:
1260 return nullptr;
1261 case Instruction::Add:
1262 case Instruction::Mul:
1263 case Instruction::UDiv:
1264 case Instruction::Sub:
1265 return cloneArithmeticIVUser(DU, WideAR);
1266
1267 case Instruction::And:
1268 case Instruction::Or:
1269 case Instruction::Xor:
1270 case Instruction::Shl:
1271 case Instruction::LShr:
1272 case Instruction::AShr:
1273 return cloneBitwiseIVUser(DU);
1274 }
1275}
1276
1277Instruction *WidenIV::cloneBitwiseIVUser(WidenIV::NarrowIVDefUse DU) {
1278 Instruction *NarrowUse = DU.NarrowUse;
1279 Instruction *NarrowDef = DU.NarrowDef;
1280 Instruction *WideDef = DU.WideDef;
1281
1282 LLVM_DEBUG(dbgs() << "Cloning bitwise IVUser: " << *NarrowUse << "\n");
1283
1284 // Replace NarrowDef operands with WideDef. Otherwise, we don't know anything
1285 // about the narrow operand yet so must insert a [sz]ext. It is probably loop
1286 // invariant and will be folded or hoisted. If it actually comes from a
1287 // widened IV, it should be removed during a future call to widenIVUse.
1288 bool IsSigned = getExtendKind(NarrowDef) == ExtendKind::Sign;
1289 Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
1290 ? WideDef
1291 : createExtendInst(NarrowUse->getOperand(0), WideType,
1292 IsSigned, NarrowUse);
1293 Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
1294 ? WideDef
1295 : createExtendInst(NarrowUse->getOperand(1), WideType,
1296 IsSigned, NarrowUse);
1297
1298 auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1299 auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1300 NarrowBO->getName());
1301 IRBuilder<> Builder(NarrowUse);
1302 Builder.Insert(WideBO);
1303 WideBO->copyIRFlags(NarrowBO);
1304 return WideBO;
1305}
1306
1307Instruction *WidenIV::cloneArithmeticIVUser(WidenIV::NarrowIVDefUse DU,
1308 const SCEVAddRecExpr *WideAR) {
1309 Instruction *NarrowUse = DU.NarrowUse;
1310 Instruction *NarrowDef = DU.NarrowDef;
1311 Instruction *WideDef = DU.WideDef;
1312
1313 LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
1314
1315 unsigned IVOpIdx = (NarrowUse->getOperand(0) == NarrowDef) ? 0 : 1;
1316
1317 // We're trying to find X such that
1318 //
1319 // Widen(NarrowDef `op` NonIVNarrowDef) == WideAR == WideDef `op.wide` X
1320 //
1321 // We guess two solutions to X, sext(NonIVNarrowDef) and zext(NonIVNarrowDef),
1322 // and check using SCEV if any of them are correct.
1323
1324 // Returns true if extending NonIVNarrowDef according to `SignExt` is a
1325 // correct solution to X.
1326 auto GuessNonIVOperand = [&](bool SignExt) {
1327 const SCEV *WideLHS;
1328 const SCEV *WideRHS;
1329
1330 auto GetExtend = [this, SignExt](const SCEV *S, Type *Ty) {
1331 if (SignExt)
1332 return SE->getSignExtendExpr(S, Ty);
1333 return SE->getZeroExtendExpr(S, Ty);
1334 };
1335
1336 if (IVOpIdx == 0) {
1337 WideLHS = SE->getSCEV(WideDef);
1338 const SCEV *NarrowRHS = SE->getSCEV(NarrowUse->getOperand(1));
1339 WideRHS = GetExtend(NarrowRHS, WideType);
1340 } else {
1341 const SCEV *NarrowLHS = SE->getSCEV(NarrowUse->getOperand(0));
1342 WideLHS = GetExtend(NarrowLHS, WideType);
1343 WideRHS = SE->getSCEV(WideDef);
1344 }
1345
1346 // WideUse is "WideDef `op.wide` X" as described in the comment.
1347 const SCEV *WideUse =
1348 getSCEVByOpCode(WideLHS, WideRHS, NarrowUse->getOpcode());
1349
1350 return WideUse == WideAR;
1351 };
1352
1353 bool SignExtend = getExtendKind(NarrowDef) == ExtendKind::Sign;
1354 if (!GuessNonIVOperand(SignExtend)) {
1355 SignExtend = !SignExtend;
1356 if (!GuessNonIVOperand(SignExtend))
1357 return nullptr;
1358 }
1359
1360 Value *LHS = (NarrowUse->getOperand(0) == NarrowDef)
1361 ? WideDef
1362 : createExtendInst(NarrowUse->getOperand(0), WideType,
1363 SignExtend, NarrowUse);
1364 Value *RHS = (NarrowUse->getOperand(1) == NarrowDef)
1365 ? WideDef
1366 : createExtendInst(NarrowUse->getOperand(1), WideType,
1367 SignExtend, NarrowUse);
1368
1369 auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1370 auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1371 NarrowBO->getName());
1372
1373 IRBuilder<> Builder(NarrowUse);
1374 Builder.Insert(WideBO);
1375 WideBO->copyIRFlags(NarrowBO);
1376 return WideBO;
1377}
1378
1379WidenIV::ExtendKind WidenIV::getExtendKind(Instruction *I) {
1380 auto It = ExtendKindMap.find(I);
1381 assert(It != ExtendKindMap.end() && "Instruction not yet extended!");
1382 return It->second;
1383}
1384
1385const SCEV *WidenIV::getSCEVByOpCode(const SCEV *LHS, const SCEV *RHS,
1386 unsigned OpCode) const {
1387 switch (OpCode) {
1388 case Instruction::Add:
1389 return SE->getAddExpr(LHS, RHS);
1390 case Instruction::Sub:
1391 return SE->getMinusSCEV(LHS, RHS);
1392 case Instruction::Mul:
1393 return SE->getMulExpr(LHS, RHS);
1394 case Instruction::UDiv:
1395 return SE->getUDivExpr(LHS, RHS);
1396 default:
1397 llvm_unreachable("Unsupported opcode.");
1398 };
1399}
1400
1401namespace {
1402
1403// Represents a interesting integer binary operation for
1404// getExtendedOperandRecurrence. This may be a shl that is being treated as a
1405// multiply or a 'or disjoint' that is being treated as 'add nsw nuw'.
1406struct BinaryOp {
1407 unsigned Opcode;
1408 std::array<Value *, 2> Operands;
1409 bool IsNSW = false;
1410 bool IsNUW = false;
1411
1412 explicit BinaryOp(Instruction *Op)
1413 : Opcode(Op->getOpcode()),
1414 Operands({Op->getOperand(0), Op->getOperand(1)}) {
1415 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
1416 IsNSW = OBO->hasNoSignedWrap();
1417 IsNUW = OBO->hasNoUnsignedWrap();
1418 }
1419 }
1420
1421 explicit BinaryOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS,
1422 bool IsNSW = false, bool IsNUW = false)
1423 : Opcode(Opcode), Operands({LHS, RHS}), IsNSW(IsNSW), IsNUW(IsNUW) {}
1424};
1425
1426} // end anonymous namespace
1427
1428static std::optional<BinaryOp> matchBinaryOp(Instruction *Op) {
1429 switch (Op->getOpcode()) {
1430 case Instruction::Add:
1431 case Instruction::Sub:
1432 case Instruction::Mul:
1433 return BinaryOp(Op);
1434 case Instruction::Or: {
1435 // Convert or disjoint into add nuw nsw.
1436 if (cast<PossiblyDisjointInst>(Op)->isDisjoint())
1437 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1),
1438 /*IsNSW=*/true, /*IsNUW=*/true);
1439 break;
1440 }
1441 case Instruction::Shl: {
1442 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
1443 unsigned BitWidth = cast<IntegerType>(SA->getType())->getBitWidth();
1444
1445 // If the shift count is not less than the bitwidth, the result of
1446 // the shift is undefined. Don't try to analyze it, because the
1447 // resolution chosen here may differ from the resolution chosen in
1448 // other parts of the compiler.
1449 if (SA->getValue().ult(BitWidth)) {
1450 // We can safely preserve the nuw flag in all cases. It's also safe to
1451 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation
1452 // requires special handling. It can be preserved as long as we're not
1453 // left shifting by bitwidth - 1.
1454 bool IsNUW = Op->hasNoUnsignedWrap();
1455 bool IsNSW = Op->hasNoSignedWrap() &&
1456 (IsNUW || SA->getValue().ult(BitWidth - 1));
1457
1458 ConstantInt *X =
1459 ConstantInt::get(Op->getContext(),
1460 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
1461 return BinaryOp(Instruction::Mul, Op->getOperand(0), X, IsNSW, IsNUW);
1462 }
1463 }
1464
1465 break;
1466 }
1467 }
1468
1469 return std::nullopt;
1470}
1471
1472/// No-wrap operations can transfer sign extension of their result to their
1473/// operands. Generate the SCEV value for the widened operation without
1474/// actually modifying the IR yet. If the expression after extending the
1475/// operands is an AddRec for this loop, return the AddRec and the kind of
1476/// extension used.
1477WidenIV::WidenedRecTy
1478WidenIV::getExtendedOperandRecurrence(WidenIV::NarrowIVDefUse DU) {
1479 auto Op = matchBinaryOp(DU.NarrowUse);
1480 if (!Op)
1481 return {nullptr, ExtendKind::Unknown};
1482
1483 assert((Op->Opcode == Instruction::Add || Op->Opcode == Instruction::Sub ||
1484 Op->Opcode == Instruction::Mul) &&
1485 "Unexpected opcode");
1486
1487 // One operand (NarrowDef) has already been extended to WideDef. Now determine
1488 // if extending the other will lead to a recurrence.
1489 const unsigned ExtendOperIdx = Op->Operands[0] == DU.NarrowDef ? 1 : 0;
1490 assert(Op->Operands[1 - ExtendOperIdx] == DU.NarrowDef && "bad DU");
1491
1492 ExtendKind ExtKind = getExtendKind(DU.NarrowDef);
1493 if (!(ExtKind == ExtendKind::Sign && Op->IsNSW) &&
1494 !(ExtKind == ExtendKind::Zero && Op->IsNUW)) {
1495 ExtKind = ExtendKind::Unknown;
1496
1497 // For a non-negative NarrowDef, we can choose either type of
1498 // extension. We want to use the current extend kind if legal
1499 // (see above), and we only hit this code if we need to check
1500 // the opposite case.
1501 if (DU.NeverNegative) {
1502 if (Op->IsNSW) {
1503 ExtKind = ExtendKind::Sign;
1504 } else if (Op->IsNUW) {
1505 ExtKind = ExtendKind::Zero;
1506 }
1507 }
1508 }
1509
1510 const SCEV *ExtendOperExpr = SE->getSCEV(Op->Operands[ExtendOperIdx]);
1511 if (ExtKind == ExtendKind::Sign)
1512 ExtendOperExpr = SE->getSignExtendExpr(ExtendOperExpr, WideType);
1513 else if (ExtKind == ExtendKind::Zero)
1514 ExtendOperExpr = SE->getZeroExtendExpr(ExtendOperExpr, WideType);
1515 else
1516 return {nullptr, ExtendKind::Unknown};
1517
1518 // When creating this SCEV expr, don't apply the current operations NSW or NUW
1519 // flags. This instruction may be guarded by control flow that the no-wrap
1520 // behavior depends on. Non-control-equivalent instructions can be mapped to
1521 // the same SCEV expression, and it would be incorrect to transfer NSW/NUW
1522 // semantics to those operations.
1523 const SCEV *lhs = SE->getSCEV(DU.WideDef);
1524 const SCEV *rhs = ExtendOperExpr;
1525
1526 // Let's swap operands to the initial order for the case of non-commutative
1527 // operations, like SUB. See PR21014.
1528 if (ExtendOperIdx == 0)
1529 std::swap(lhs, rhs);
1530 const SCEVAddRecExpr *AddRec =
1531 dyn_cast<SCEVAddRecExpr>(getSCEVByOpCode(lhs, rhs, Op->Opcode));
1532
1533 if (!AddRec || AddRec->getLoop() != L)
1534 return {nullptr, ExtendKind::Unknown};
1535
1536 return {AddRec, ExtKind};
1537}
1538
1539/// Is this instruction potentially interesting for further simplification after
1540/// widening it's type? In other words, can the extend be safely hoisted out of
1541/// the loop with SCEV reducing the value to a recurrence on the same loop. If
1542/// so, return the extended recurrence and the kind of extension used. Otherwise
1543/// return {nullptr, ExtendKind::Unknown}.
1544WidenIV::WidenedRecTy WidenIV::getWideRecurrence(WidenIV::NarrowIVDefUse DU) {
1545 if (!DU.NarrowUse->getType()->isIntegerTy())
1546 return {nullptr, ExtendKind::Unknown};
1547
1548 const SCEV *NarrowExpr = SE->getSCEV(DU.NarrowUse);
1549 if (SE->getTypeSizeInBits(NarrowExpr->getType()) >=
1550 SE->getTypeSizeInBits(WideType)) {
1551 // NarrowUse implicitly widens its operand. e.g. a gep with a narrow
1552 // index. So don't follow this use.
1553 return {nullptr, ExtendKind::Unknown};
1554 }
1555
1556 const SCEV *WideExpr;
1557 ExtendKind ExtKind;
1558 if (DU.NeverNegative) {
1559 WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
1560 if (isa<SCEVAddRecExpr>(WideExpr))
1561 ExtKind = ExtendKind::Sign;
1562 else {
1563 WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
1564 ExtKind = ExtendKind::Zero;
1565 }
1566 } else if (getExtendKind(DU.NarrowDef) == ExtendKind::Sign) {
1567 WideExpr = SE->getSignExtendExpr(NarrowExpr, WideType);
1568 ExtKind = ExtendKind::Sign;
1569 } else {
1570 WideExpr = SE->getZeroExtendExpr(NarrowExpr, WideType);
1571 ExtKind = ExtendKind::Zero;
1572 }
1573 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(WideExpr);
1574 if (!AddRec || AddRec->getLoop() != L)
1575 return {nullptr, ExtendKind::Unknown};
1576 return {AddRec, ExtKind};
1577}
1578
1579/// This IV user cannot be widened. Replace this use of the original narrow IV
1580/// with a truncation of the new wide IV to isolate and eliminate the narrow IV.
1581void WidenIV::truncateIVUse(NarrowIVDefUse DU) {
1582 auto *InsertPt = getInsertPointForUses(DU.NarrowUse, DU.NarrowDef, DT, LI);
1583 if (!InsertPt)
1584 return;
1585 LLVM_DEBUG(dbgs() << "INDVARS: Truncate IV " << *DU.WideDef << " for user "
1586 << *DU.NarrowUse << "\n");
1587 ExtendKind ExtKind = getExtendKind(DU.NarrowDef);
1588 IRBuilder<> Builder(InsertPt);
1589 Value *Trunc =
1590 Builder.CreateTrunc(DU.WideDef, DU.NarrowDef->getType(), "",
1591 DU.NeverNegative || ExtKind == ExtendKind::Zero,
1592 DU.NeverNegative || ExtKind == ExtendKind::Sign);
1593 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, Trunc);
1594}
1595
1596/// If the narrow use is a compare instruction, then widen the compare
1597// (and possibly the other operand). The extend operation is hoisted into the
1598// loop preheader as far as possible.
1599bool WidenIV::widenLoopCompare(WidenIV::NarrowIVDefUse DU) {
1600 ICmpInst *Cmp = dyn_cast<ICmpInst>(DU.NarrowUse);
1601 if (!Cmp)
1602 return false;
1603
1604 // We can legally widen the comparison in the following two cases:
1605 //
1606 // - The signedness of the IV extension and comparison match
1607 //
1608 // - The narrow IV is always positive (and thus its sign extension is equal
1609 // to its zero extension). For instance, let's say we're zero extending
1610 // %narrow for the following use
1611 //
1612 // icmp slt i32 %narrow, %val ... (A)
1613 //
1614 // and %narrow is always positive. Then
1615 //
1616 // (A) == icmp slt i32 sext(%narrow), sext(%val)
1617 // == icmp slt i32 zext(%narrow), sext(%val)
1618 bool IsSigned = getExtendKind(DU.NarrowDef) == ExtendKind::Sign;
1619 if (!(DU.NeverNegative || IsSigned == Cmp->isSigned()))
1620 return false;
1621
1622 Value *Op = Cmp->getOperand(Cmp->getOperand(0) == DU.NarrowDef ? 1 : 0);
1623 unsigned CastWidth = SE->getTypeSizeInBits(Op->getType());
1624 unsigned IVWidth = SE->getTypeSizeInBits(WideType);
1625 assert(CastWidth <= IVWidth && "Unexpected width while widening compare.");
1626
1627 // Widen the compare instruction.
1628 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
1629
1630 // Widen the other operand of the compare, if necessary.
1631 if (CastWidth < IVWidth) {
1632 Value *ExtOp = createExtendInst(Op, WideType, Cmp->isSigned(), Cmp);
1633 DU.NarrowUse->replaceUsesOfWith(Op, ExtOp);
1634 }
1635 return true;
1636}
1637
1638// The widenIVUse avoids generating trunc by evaluating the use as AddRec, this
1639// will not work when:
1640// 1) SCEV traces back to an instruction inside the loop that SCEV can not
1641// expand, eg. add %indvar, (load %addr)
1642// 2) SCEV finds a loop variant, eg. add %indvar, %loopvariant
1643// While SCEV fails to avoid trunc, we can still try to use instruction
1644// combining approach to prove trunc is not required. This can be further
1645// extended with other instruction combining checks, but for now we handle the
1646// following case (sub can be "add" and "mul", "nsw + sext" can be "nus + zext")
1647//
1648// Src:
1649// %c = sub nsw %b, %indvar
1650// %d = sext %c to i64
1651// Dst:
1652// %indvar.ext1 = sext %indvar to i64
1653// %m = sext %b to i64
1654// %d = sub nsw i64 %m, %indvar.ext1
1655// Therefore, as long as the result of add/sub/mul is extended to wide type, no
1656// trunc is required regardless of how %b is generated. This pattern is common
1657// when calculating address in 64 bit architecture
1658bool WidenIV::widenWithVariantUse(WidenIV::NarrowIVDefUse DU) {
1659 Instruction *NarrowUse = DU.NarrowUse;
1660 Instruction *NarrowDef = DU.NarrowDef;
1661 Instruction *WideDef = DU.WideDef;
1662
1663 // Handle the common case of add<nsw/nuw>
1664 const unsigned OpCode = NarrowUse->getOpcode();
1665 // Only Add/Sub/Mul instructions are supported.
1666 if (OpCode != Instruction::Add && OpCode != Instruction::Sub &&
1667 OpCode != Instruction::Mul)
1668 return false;
1669
1670 // The operand that is not defined by NarrowDef of DU. Let's call it the
1671 // other operand.
1672 assert((NarrowUse->getOperand(0) == NarrowDef ||
1673 NarrowUse->getOperand(1) == NarrowDef) &&
1674 "bad DU");
1675
1676 const OverflowingBinaryOperator *OBO =
1677 cast<OverflowingBinaryOperator>(NarrowUse);
1678 ExtendKind ExtKind = getExtendKind(NarrowDef);
1679 bool CanSignExtend = ExtKind == ExtendKind::Sign && OBO->hasNoSignedWrap();
1680 bool CanZeroExtend = ExtKind == ExtendKind::Zero && OBO->hasNoUnsignedWrap();
1681 auto AnotherOpExtKind = ExtKind;
1682
1683 // Check that all uses are either:
1684 // - narrow def (in case of we are widening the IV increment);
1685 // - single-input LCSSA Phis;
1686 // - comparison of the chosen type;
1687 // - extend of the chosen type (raison d'etre).
1689 SmallVector<PHINode *, 4> LCSSAPhiUsers;
1691 for (Use &U : NarrowUse->uses()) {
1692 Instruction *User = cast<Instruction>(U.getUser());
1693 if (User == NarrowDef)
1694 continue;
1695 if (!L->contains(User)) {
1696 auto *LCSSAPhi = cast<PHINode>(User);
1697 // Make sure there is only 1 input, so that we don't have to split
1698 // critical edges.
1699 if (LCSSAPhi->getNumOperands() != 1)
1700 return false;
1701 LCSSAPhiUsers.push_back(LCSSAPhi);
1702 continue;
1703 }
1704 if (auto *ICmp = dyn_cast<ICmpInst>(User)) {
1705 auto Pred = ICmp->getPredicate();
1706 // We have 3 types of predicates: signed, unsigned and equality
1707 // predicates. For equality, it's legal to widen icmp for either sign and
1708 // zero extend. For sign extend, we can also do so for signed predicates,
1709 // likeweise for zero extend we can widen icmp for unsigned predicates.
1710 if (ExtKind == ExtendKind::Zero && ICmpInst::isSigned(Pred))
1711 return false;
1712 if (ExtKind == ExtendKind::Sign && ICmpInst::isUnsigned(Pred))
1713 return false;
1714 ICmpUsers.push_back(ICmp);
1715 continue;
1716 }
1717 if (ExtKind == ExtendKind::Sign)
1718 User = dyn_cast<SExtInst>(User);
1719 else
1720 User = dyn_cast<ZExtInst>(User);
1721 if (!User || User->getType() != WideType)
1722 return false;
1723 ExtUsers.push_back(User);
1724 }
1725 if (ExtUsers.empty()) {
1726 DeadInsts.emplace_back(NarrowUse);
1727 return true;
1728 }
1729
1730 // We'll prove some facts that should be true in the context of ext users. If
1731 // there is no users, we are done now. If there are some, pick their common
1732 // dominator as context.
1733 const Instruction *CtxI = findCommonDominator(ExtUsers, *DT);
1734
1735 if (!CanSignExtend && !CanZeroExtend) {
1736 // Because InstCombine turns 'sub nuw' to 'add' losing the no-wrap flag, we
1737 // will most likely not see it. Let's try to prove it.
1738 if (OpCode != Instruction::Add)
1739 return false;
1740 if (ExtKind != ExtendKind::Zero)
1741 return false;
1742 const SCEV *LHS = SE->getSCEV(OBO->getOperand(0));
1743 const SCEV *RHS = SE->getSCEV(OBO->getOperand(1));
1744 // TODO: Support case for NarrowDef = NarrowUse->getOperand(1).
1745 if (NarrowUse->getOperand(0) != NarrowDef)
1746 return false;
1747 if (!SE->isKnownNegative(RHS))
1748 return false;
1749 bool ProvedSubNUW = SE->isKnownPredicateAt(ICmpInst::ICMP_UGE, LHS,
1750 SE->getNegativeSCEV(RHS), CtxI);
1751 if (!ProvedSubNUW)
1752 return false;
1753 // In fact, our 'add' is 'sub nuw'. We will need to widen the 2nd operand as
1754 // neg(zext(neg(op))), which is basically sext(op).
1755 AnotherOpExtKind = ExtendKind::Sign;
1756 }
1757
1758 // Verifying that Defining operand is an AddRec
1759 const SCEV *Op1 = SE->getSCEV(WideDef);
1760 const SCEVAddRecExpr *AddRecOp1 = dyn_cast<SCEVAddRecExpr>(Op1);
1761 if (!AddRecOp1 || AddRecOp1->getLoop() != L)
1762 return false;
1763
1764 LLVM_DEBUG(dbgs() << "Cloning arithmetic IVUser: " << *NarrowUse << "\n");
1765
1766 // Generating a widening use instruction.
1767 Value *LHS =
1768 (NarrowUse->getOperand(0) == NarrowDef)
1769 ? WideDef
1770 : createExtendInst(NarrowUse->getOperand(0), WideType,
1771 AnotherOpExtKind == ExtendKind::Sign, NarrowUse);
1772 Value *RHS =
1773 (NarrowUse->getOperand(1) == NarrowDef)
1774 ? WideDef
1775 : createExtendInst(NarrowUse->getOperand(1), WideType,
1776 AnotherOpExtKind == ExtendKind::Sign, NarrowUse);
1777
1778 auto *NarrowBO = cast<BinaryOperator>(NarrowUse);
1779 auto *WideBO = BinaryOperator::Create(NarrowBO->getOpcode(), LHS, RHS,
1780 NarrowBO->getName());
1781 IRBuilder<> Builder(NarrowUse);
1782 Builder.Insert(WideBO);
1783 WideBO->copyIRFlags(NarrowBO);
1784 ExtendKindMap[NarrowUse] = ExtKind;
1785
1786 for (Instruction *User : ExtUsers) {
1787 assert(User->getType() == WideType && "Checked before!");
1788 LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *User << " replaced by "
1789 << *WideBO << "\n");
1790 ++NumElimExt;
1791 User->replaceAllUsesWith(WideBO);
1792 DeadInsts.emplace_back(User);
1793 }
1794
1795 for (PHINode *User : LCSSAPhiUsers) {
1796 assert(User->getNumOperands() == 1 && "Checked before!");
1797 Builder.SetInsertPoint(User);
1798 auto *WidePN =
1799 Builder.CreatePHI(WideBO->getType(), 1, User->getName() + ".wide");
1800 BasicBlock *LoopExitingBlock = User->getParent()->getSinglePredecessor();
1801 assert(LoopExitingBlock && L->contains(LoopExitingBlock) &&
1802 "Not a LCSSA Phi?");
1803 WidePN->addIncoming(WideBO, LoopExitingBlock);
1804 Builder.SetInsertPoint(User->getParent(),
1805 User->getParent()->getFirstInsertionPt());
1806 auto *TruncPN = Builder.CreateTrunc(WidePN, User->getType());
1807 User->replaceAllUsesWith(TruncPN);
1808 DeadInsts.emplace_back(User);
1809 }
1810
1811 for (ICmpInst *User : ICmpUsers) {
1812 Builder.SetInsertPoint(User);
1813 auto ExtendedOp = [&](Value * V)->Value * {
1814 if (V == NarrowUse)
1815 return WideBO;
1816 if (ExtKind == ExtendKind::Zero)
1817 return Builder.CreateZExt(V, WideBO->getType());
1818 else
1819 return Builder.CreateSExt(V, WideBO->getType());
1820 };
1821 auto Pred = User->getPredicate();
1822 auto *LHS = ExtendedOp(User->getOperand(0));
1823 auto *RHS = ExtendedOp(User->getOperand(1));
1824 auto *WideCmp =
1825 Builder.CreateICmp(Pred, LHS, RHS, User->getName() + ".wide");
1826 User->replaceAllUsesWith(WideCmp);
1827 DeadInsts.emplace_back(User);
1828 }
1829
1830 return true;
1831}
1832
1833/// Determine whether an individual user of the narrow IV can be widened. If so,
1834/// return the wide clone of the user.
1835Instruction *WidenIV::widenIVUse(WidenIV::NarrowIVDefUse DU,
1836 SCEVExpander &Rewriter, PHINode *OrigPhi,
1837 PHINode *WidePhi) {
1838 assert(ExtendKindMap.count(DU.NarrowDef) &&
1839 "Should already know the kind of extension used to widen NarrowDef");
1840
1841 // This narrow use can be widened by a sext if it's non-negative or its narrow
1842 // def was widened by a sext. Same for zext.
1843 bool CanWidenBySExt =
1844 DU.NeverNegative || getExtendKind(DU.NarrowDef) == ExtendKind::Sign;
1845 bool CanWidenByZExt =
1846 DU.NeverNegative || getExtendKind(DU.NarrowDef) == ExtendKind::Zero;
1847
1848 // Stop traversing the def-use chain at inner-loop phis or post-loop phis.
1849 if (PHINode *UsePhi = dyn_cast<PHINode>(DU.NarrowUse)) {
1850 if (LI->getLoopFor(UsePhi->getParent()) != L) {
1851 // For LCSSA phis, sink the truncate outside the loop.
1852 // After SimplifyCFG most loop exit targets have a single predecessor.
1853 // Otherwise fall back to a truncate within the loop.
1854 if (UsePhi->getNumOperands() != 1)
1855 truncateIVUse(DU);
1856 else {
1857 // Widening the PHI requires us to insert a trunc. The logical place
1858 // for this trunc is in the same BB as the PHI. This is not possible if
1859 // the BB is terminated by a catchswitch.
1860 if (isa<CatchSwitchInst>(UsePhi->getParent()->getTerminator()))
1861 return nullptr;
1862
1863 PHINode *WidePhi =
1864 PHINode::Create(DU.WideDef->getType(), 1, UsePhi->getName() + ".wide",
1865 UsePhi->getIterator());
1866 WidePhi->addIncoming(DU.WideDef, UsePhi->getIncomingBlock(0));
1867 BasicBlock *WidePhiBB = WidePhi->getParent();
1868 IRBuilder<> Builder(WidePhiBB, WidePhiBB->getFirstInsertionPt());
1869 Value *Trunc = Builder.CreateTrunc(WidePhi, DU.NarrowDef->getType(), "",
1870 CanWidenByZExt, CanWidenBySExt);
1871 UsePhi->replaceAllUsesWith(Trunc);
1872 DeadInsts.emplace_back(UsePhi);
1873 LLVM_DEBUG(dbgs() << "INDVARS: Widen lcssa phi " << *UsePhi << " to "
1874 << *WidePhi << "\n");
1875 }
1876 return nullptr;
1877 }
1878 }
1879
1880 // Our raison d'etre! Eliminate sign and zero extension.
1881 if ((match(DU.NarrowUse, m_SExtLike(m_Value())) && CanWidenBySExt) ||
1882 (isa<ZExtInst>(DU.NarrowUse) && CanWidenByZExt)) {
1883 Value *NewDef = DU.WideDef;
1884 if (DU.NarrowUse->getType() != WideType) {
1885 unsigned CastWidth = SE->getTypeSizeInBits(DU.NarrowUse->getType());
1886 unsigned IVWidth = SE->getTypeSizeInBits(WideType);
1887 if (CastWidth < IVWidth) {
1888 // The cast isn't as wide as the IV, so insert a Trunc.
1889 IRBuilder<> Builder(DU.NarrowUse);
1890 NewDef = Builder.CreateTrunc(DU.WideDef, DU.NarrowUse->getType(), "",
1891 CanWidenByZExt, CanWidenBySExt);
1892 }
1893 else {
1894 // A wider extend was hidden behind a narrower one. This may induce
1895 // another round of IV widening in which the intermediate IV becomes
1896 // dead. It should be very rare.
1897 LLVM_DEBUG(dbgs() << "INDVARS: New IV " << *WidePhi
1898 << " not wide enough to subsume " << *DU.NarrowUse
1899 << "\n");
1900 DU.NarrowUse->replaceUsesOfWith(DU.NarrowDef, DU.WideDef);
1901 NewDef = DU.NarrowUse;
1902 }
1903 }
1904 if (NewDef != DU.NarrowUse) {
1905 LLVM_DEBUG(dbgs() << "INDVARS: eliminating " << *DU.NarrowUse
1906 << " replaced by " << *DU.WideDef << "\n");
1907 ++NumElimExt;
1908 DU.NarrowUse->replaceAllUsesWith(NewDef);
1909 DeadInsts.emplace_back(DU.NarrowUse);
1910 }
1911 // Now that the extend is gone, we want to expose it's uses for potential
1912 // further simplification. We don't need to directly inform SimplifyIVUsers
1913 // of the new users, because their parent IV will be processed later as a
1914 // new loop phi. If we preserved IVUsers analysis, we would also want to
1915 // push the uses of WideDef here.
1916
1917 // No further widening is needed. The deceased [sz]ext had done it for us.
1918 return nullptr;
1919 }
1920
1921 auto tryAddRecExpansion = [&]() -> Instruction* {
1922 // Does this user itself evaluate to a recurrence after widening?
1923 WidenedRecTy WideAddRec = getExtendedOperandRecurrence(DU);
1924 if (!WideAddRec.first)
1925 WideAddRec = getWideRecurrence(DU);
1926 assert((WideAddRec.first == nullptr) ==
1927 (WideAddRec.second == ExtendKind::Unknown));
1928 if (!WideAddRec.first)
1929 return nullptr;
1930
1931 // Reuse the IV increment that SCEVExpander created. Recompute flags, unless
1932 // the flags for both increments agree and it is safe to use the ones from
1933 // the original inc. In that case, the new use of the wide increment won't
1934 // be more poisonous.
1935 bool NeedToRecomputeFlags =
1937 DU.NarrowUse, WideInc) ||
1938 DU.NarrowUse->hasNoUnsignedWrap() != WideInc->hasNoUnsignedWrap() ||
1939 DU.NarrowUse->hasNoSignedWrap() != WideInc->hasNoSignedWrap();
1940 Instruction *WideUse = nullptr;
1941 if (WideAddRec.first == WideIncExpr &&
1942 Rewriter.hoistIVInc(WideInc, DU.NarrowUse, NeedToRecomputeFlags))
1943 WideUse = WideInc;
1944 else {
1945 WideUse = cloneIVUser(DU, WideAddRec.first);
1946 if (!WideUse)
1947 return nullptr;
1948 }
1949 // Evaluation of WideAddRec ensured that the narrow expression could be
1950 // extended outside the loop without overflow. This suggests that the wide use
1951 // evaluates to the same expression as the extended narrow use, but doesn't
1952 // absolutely guarantee it. Hence the following failsafe check. In rare cases
1953 // where it fails, we simply throw away the newly created wide use.
1954 if (WideAddRec.first != SE->getSCEV(WideUse)) {
1955 LLVM_DEBUG(dbgs() << "Wide use expression mismatch: " << *WideUse << ": "
1956 << *SE->getSCEV(WideUse) << " != " << *WideAddRec.first
1957 << "\n");
1958 DeadInsts.emplace_back(WideUse);
1959 return nullptr;
1960 };
1961
1962 // if we reached this point then we are going to replace
1963 // DU.NarrowUse with WideUse. Reattach DbgValue then.
1964 replaceAllDbgUsesWith(*DU.NarrowUse, *WideUse, *WideUse, *DT);
1965
1966 ExtendKindMap[DU.NarrowUse] = WideAddRec.second;
1967 // Returning WideUse pushes it on the worklist.
1968 return WideUse;
1969 };
1970
1971 if (auto *I = tryAddRecExpansion())
1972 return I;
1973
1974 // If use is a loop condition, try to promote the condition instead of
1975 // truncating the IV first.
1976 if (widenLoopCompare(DU))
1977 return nullptr;
1978
1979 // We are here about to generate a truncate instruction that may hurt
1980 // performance because the scalar evolution expression computed earlier
1981 // in WideAddRec.first does not indicate a polynomial induction expression.
1982 // In that case, look at the operands of the use instruction to determine
1983 // if we can still widen the use instead of truncating its operand.
1984 if (widenWithVariantUse(DU))
1985 return nullptr;
1986
1987 // This user does not evaluate to a recurrence after widening, so don't
1988 // follow it. Instead insert a Trunc to kill off the original use,
1989 // eventually isolating the original narrow IV so it can be removed.
1990 truncateIVUse(DU);
1991 return nullptr;
1992}
1993
1994/// Add eligible users of NarrowDef to NarrowIVUsers.
1995void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
1996 const SCEV *NarrowSCEV = SE->getSCEV(NarrowDef);
1997 bool NonNegativeDef =
1998 SE->isKnownPredicate(ICmpInst::ICMP_SGE, NarrowSCEV,
1999 SE->getZero(NarrowSCEV->getType()));
2000 for (User *U : NarrowDef->users()) {
2001 Instruction *NarrowUser = cast<Instruction>(U);
2002
2003 // Handle data flow merges and bizarre phi cycles.
2004 if (!Widened.insert(NarrowUser).second)
2005 continue;
2006
2007 bool NonNegativeUse = false;
2008 if (!NonNegativeDef) {
2009 // We might have a control-dependent range information for this context.
2010 if (auto RangeInfo = getPostIncRangeInfo(NarrowDef, NarrowUser))
2011 NonNegativeUse = RangeInfo->getSignedMin().isNonNegative();
2012 }
2013
2014 NarrowIVUsers.emplace_back(NarrowDef, NarrowUser, WideDef,
2015 NonNegativeDef || NonNegativeUse);
2016 }
2017}
2018
2019/// Process a single induction variable. First use the SCEVExpander to create a
2020/// wide induction variable that evaluates to the same recurrence as the
2021/// original narrow IV. Then use a worklist to forward traverse the narrow IV's
2022/// def-use chain. After widenIVUse has processed all interesting IV users, the
2023/// narrow IV will be isolated for removal by DeleteDeadPHIs.
2024///
2025/// It would be simpler to delete uses as they are processed, but we must avoid
2026/// invalidating SCEV expressions.
2027PHINode *WidenIV::createWideIV(SCEVExpander &Rewriter) {
2028 // Is this phi an induction variable?
2029 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(OrigPhi));
2030 if (!AddRec)
2031 return nullptr;
2032
2033 // Widen the induction variable expression.
2034 const SCEV *WideIVExpr = getExtendKind(OrigPhi) == ExtendKind::Sign
2035 ? SE->getSignExtendExpr(AddRec, WideType)
2036 : SE->getZeroExtendExpr(AddRec, WideType);
2037
2038 assert(SE->getEffectiveSCEVType(WideIVExpr->getType()) == WideType &&
2039 "Expect the new IV expression to preserve its type");
2040
2041 // Can the IV be extended outside the loop without overflow?
2042 AddRec = dyn_cast<SCEVAddRecExpr>(WideIVExpr);
2043 if (!AddRec || AddRec->getLoop() != L)
2044 return nullptr;
2045
2046 // An AddRec must have loop-invariant operands. Since this AddRec is
2047 // materialized by a loop header phi, the expression cannot have any post-loop
2048 // operands, so they must dominate the loop header.
2049 assert(
2050 SE->properlyDominates(AddRec->getStart(), L->getHeader()) &&
2051 SE->properlyDominates(AddRec->getStepRecurrence(*SE), L->getHeader()) &&
2052 "Loop header phi recurrence inputs do not dominate the loop");
2053
2054 // Iterate over IV uses (including transitive ones) looking for IV increments
2055 // of the form 'add nsw %iv, <const>'. For each increment and each use of
2056 // the increment calculate control-dependent range information basing on
2057 // dominating conditions inside of the loop (e.g. a range check inside of the
2058 // loop). Calculated ranges are stored in PostIncRangeInfos map.
2059 //
2060 // Control-dependent range information is later used to prove that a narrow
2061 // definition is not negative (see pushNarrowIVUsers). It's difficult to do
2062 // this on demand because when pushNarrowIVUsers needs this information some
2063 // of the dominating conditions might be already widened.
2065 calculatePostIncRanges(OrigPhi);
2066
2067 // The rewriter provides a value for the desired IV expression. This may
2068 // either find an existing phi or materialize a new one. Either way, we
2069 // expect a well-formed cyclic phi-with-increments. i.e. any operand not part
2070 // of the phi-SCC dominates the loop entry.
2071 Instruction *InsertPt = &*L->getHeader()->getFirstInsertionPt();
2072 Value *ExpandInst = Rewriter.expandCodeFor(AddRec, WideType, InsertPt);
2073 // If the wide phi is not a phi node, for example a cast node, like bitcast,
2074 // inttoptr, ptrtoint, just skip for now.
2075 if (!(WidePhi = dyn_cast<PHINode>(ExpandInst))) {
2076 // if the cast node is an inserted instruction without any user, we should
2077 // remove it to make sure the pass don't touch the function as we can not
2078 // wide the phi.
2079 if (ExpandInst->hasNUses(0) &&
2080 Rewriter.isInsertedInstruction(cast<Instruction>(ExpandInst)))
2081 DeadInsts.emplace_back(ExpandInst);
2082 return nullptr;
2083 }
2084
2085 // Remembering the WideIV increment generated by SCEVExpander allows
2086 // widenIVUse to reuse it when widening the narrow IV's increment. We don't
2087 // employ a general reuse mechanism because the call above is the only call to
2088 // SCEVExpander. Henceforth, we produce 1-to-1 narrow to wide uses.
2089 if (BasicBlock *LatchBlock = L->getLoopLatch()) {
2090 WideInc =
2091 dyn_cast<Instruction>(WidePhi->getIncomingValueForBlock(LatchBlock));
2092 if (WideInc) {
2093 WideIncExpr = SE->getSCEV(WideInc);
2094 // Propagate the debug location associated with the original loop
2095 // increment to the new (widened) increment.
2096 auto *OrigInc =
2097 cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
2098
2099 WideInc->setDebugLoc(OrigInc->getDebugLoc());
2100 // We are replacing a narrow IV increment with a wider IV increment. If
2101 // the original (narrow) increment did not wrap, the wider increment one
2102 // should not wrap either. Set the flags to be the union of both wide
2103 // increment and original increment; this ensures we preserve flags SCEV
2104 // could infer for the wider increment. Limit this only to cases where
2105 // both increments directly increment the corresponding PHI nodes and have
2106 // the same opcode. It is not safe to re-use the flags from the original
2107 // increment, if it is more complex and SCEV expansion may have yielded a
2108 // more simplified wider increment.
2110 OrigInc, WideInc) &&
2111 isa<OverflowingBinaryOperator>(OrigInc) &&
2112 isa<OverflowingBinaryOperator>(WideInc)) {
2113 WideInc->setHasNoUnsignedWrap(WideInc->hasNoUnsignedWrap() ||
2114 OrigInc->hasNoUnsignedWrap());
2115 WideInc->setHasNoSignedWrap(WideInc->hasNoSignedWrap() ||
2116 OrigInc->hasNoSignedWrap());
2117 }
2118 }
2119 }
2120
2121 LLVM_DEBUG(dbgs() << "Wide IV: " << *WidePhi << "\n");
2122 ++NumWidened;
2123
2124 // Traverse the def-use chain using a worklist starting at the original IV.
2125 assert(Widened.empty() && NarrowIVUsers.empty() && "expect initial state" );
2126
2127 Widened.insert(OrigPhi);
2128 pushNarrowIVUsers(OrigPhi, WidePhi);
2129
2130 while (!NarrowIVUsers.empty()) {
2131 WidenIV::NarrowIVDefUse DU = NarrowIVUsers.pop_back_val();
2132
2133 // Process a def-use edge. This may replace the use, so don't hold a
2134 // use_iterator across it.
2135 Instruction *WideUse = widenIVUse(DU, Rewriter, OrigPhi, WidePhi);
2136
2137 // Follow all def-use edges from the previous narrow use.
2138 if (WideUse)
2139 pushNarrowIVUsers(DU.NarrowUse, WideUse);
2140
2141 // widenIVUse may have removed the def-use edge.
2142 if (DU.NarrowDef->use_empty())
2143 DeadInsts.emplace_back(DU.NarrowDef);
2144 }
2145
2146 // Attach any debug information to the new PHI.
2147 replaceAllDbgUsesWith(*OrigPhi, *WidePhi, *WidePhi, *DT);
2148
2149 return WidePhi;
2150}
2151
2152/// Calculates control-dependent range for the given def at the given context
2153/// by looking at dominating conditions inside of the loop
2154void WidenIV::calculatePostIncRange(Instruction *NarrowDef,
2155 Instruction *NarrowUser) {
2156 Value *NarrowDefLHS;
2157 const APInt *NarrowDefRHS;
2158 if (!match(NarrowDef, m_NSWAdd(m_Value(NarrowDefLHS),
2159 m_APInt(NarrowDefRHS))) ||
2160 !NarrowDefRHS->isNonNegative())
2161 return;
2162
2163 auto UpdateRangeFromCondition = [&] (Value *Condition,
2164 bool TrueDest) {
2165 CmpInst::Predicate Pred;
2166 Value *CmpRHS;
2167 if (!match(Condition, m_ICmp(Pred, m_Specific(NarrowDefLHS),
2168 m_Value(CmpRHS))))
2169 return;
2170
2172 TrueDest ? Pred : CmpInst::getInversePredicate(Pred);
2173
2174 auto CmpRHSRange = SE->getSignedRange(SE->getSCEV(CmpRHS));
2175 auto CmpConstrainedLHSRange =
2177 auto NarrowDefRange = CmpConstrainedLHSRange.addWithNoWrap(
2179
2180 updatePostIncRangeInfo(NarrowDef, NarrowUser, NarrowDefRange);
2181 };
2182
2183 auto UpdateRangeFromGuards = [&](Instruction *Ctx) {
2184 if (!HasGuards)
2185 return;
2186
2187 for (Instruction &I : make_range(Ctx->getIterator().getReverse(),
2188 Ctx->getParent()->rend())) {
2189 Value *C = nullptr;
2190 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(C))))
2191 UpdateRangeFromCondition(C, /*TrueDest=*/true);
2192 }
2193 };
2194
2195 UpdateRangeFromGuards(NarrowUser);
2196
2197 BasicBlock *NarrowUserBB = NarrowUser->getParent();
2198 // If NarrowUserBB is statically unreachable asking dominator queries may
2199 // yield surprising results. (e.g. the block may not have a dom tree node)
2200 if (!DT->isReachableFromEntry(NarrowUserBB))
2201 return;
2202
2203 for (auto *DTB = (*DT)[NarrowUserBB]->getIDom();
2204 L->contains(DTB->getBlock());
2205 DTB = DTB->getIDom()) {
2206 auto *BB = DTB->getBlock();
2207 auto *TI = BB->getTerminator();
2208 UpdateRangeFromGuards(TI);
2209
2210 auto *BI = dyn_cast<BranchInst>(TI);
2211 if (!BI || !BI->isConditional())
2212 continue;
2213
2214 auto *TrueSuccessor = BI->getSuccessor(0);
2215 auto *FalseSuccessor = BI->getSuccessor(1);
2216
2217 auto DominatesNarrowUser = [this, NarrowUser] (BasicBlockEdge BBE) {
2218 return BBE.isSingleEdge() &&
2219 DT->dominates(BBE, NarrowUser->getParent());
2220 };
2221
2222 if (DominatesNarrowUser(BasicBlockEdge(BB, TrueSuccessor)))
2223 UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/true);
2224
2225 if (DominatesNarrowUser(BasicBlockEdge(BB, FalseSuccessor)))
2226 UpdateRangeFromCondition(BI->getCondition(), /*TrueDest=*/false);
2227 }
2228}
2229
2230/// Calculates PostIncRangeInfos map for the given IV
2231void WidenIV::calculatePostIncRanges(PHINode *OrigPhi) {
2234 Worklist.push_back(OrigPhi);
2235 Visited.insert(OrigPhi);
2236
2237 while (!Worklist.empty()) {
2238 Instruction *NarrowDef = Worklist.pop_back_val();
2239
2240 for (Use &U : NarrowDef->uses()) {
2241 auto *NarrowUser = cast<Instruction>(U.getUser());
2242
2243 // Don't go looking outside the current loop.
2244 auto *NarrowUserLoop = (*LI)[NarrowUser->getParent()];
2245 if (!NarrowUserLoop || !L->contains(NarrowUserLoop))
2246 continue;
2247
2248 if (!Visited.insert(NarrowUser).second)
2249 continue;
2250
2251 Worklist.push_back(NarrowUser);
2252
2253 calculatePostIncRange(NarrowDef, NarrowUser);
2254 }
2255 }
2256}
2257
2259 LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter,
2261 unsigned &NumElimExt, unsigned &NumWidened,
2262 bool HasGuards, bool UsePostIncrementRanges) {
2263 WidenIV Widener(WI, LI, SE, DT, DeadInsts, HasGuards, UsePostIncrementRanges);
2264 PHINode *WidePHI = Widener.createWideIV(Rewriter);
2265 NumElimExt = Widener.getNumElimExt();
2266 NumWidened = Widener.getNumWidened();
2267 return WidePHI;
2268}
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
Rewrite undef for PHI
static const Function * getParent(const Value *V)
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
#define DEBUG_TYPE
#define _
iv Induction Variable Users
Definition: IVUsers.cpp:48
static cl::opt< bool > UsePostIncrementRanges("indvars-post-increment-ranges", cl::Hidden, cl::desc("Use post increment control-dependent ranges in IndVarSimplify"), cl::init(true))
static cl::opt< bool > WidenIV("loop-flatten-widen-iv", cl::Hidden, cl::init(true), cl::desc("Widen the loop induction variables, if possible, so " "overflow checks won't reject flattening"))
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
#define P(N)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static Instruction * GetLoopInvariantInsertPosition(Loop *L, Instruction *Hint)
static bool isSimpleIVUser(Instruction *I, const Loop *L, ScalarEvolution *SE)
Return true if this instruction generates a simple SCEV expression in terms of that IV.
static Instruction * findCommonDominator(ArrayRef< Instruction * > Instructions, DominatorTree &DT)
Find a point in code which dominates all given instructions.
static Instruction * getInsertPointForUses(Instruction *User, Value *Def, DominatorTree *DT, LoopInfo *LI)
Determine the insertion point for this user.
static std::optional< BinaryOp > matchBinaryOp(Instruction *Op)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition: Statistic.h:167
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Virtual Register Rewriter
Definition: VirtRegMap.cpp:237
Value * RHS
Value * LHS
static const uint32_t IV[8]
Definition: blake3_impl.h:78
Class for arbitrary precision integers.
Definition: APInt.h:78
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition: APInt.h:314
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition: APInt.h:219
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition: APInt.h:1201
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
Value handle that asserts if the Value is deleted.
Definition: ValueHandle.h:264
LLVM Basic Block Representation.
Definition: BasicBlock.h:61
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:414
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:167
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:229
Value * getRHS() const
bool isSigned() const
Whether the intrinsic is signed or unsigned.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
Value * getLHS() const
BinaryOps getOpcode() const
Definition: InstrTypes.h:442
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:530
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:850
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:757
bool isSigned() const
Definition: InstrTypes.h:1007
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:871
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:847
bool isUnsigned() const
Definition: InstrTypes.h:1013
This is the shared class of boolean and integer constants.
Definition: Constants.h:81
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:857
static ConstantInt * getBool(LLVMContext &Context, bool V)
Definition: Constants.cpp:864
This class represents a range of values.
Definition: ConstantRange.h:47
APInt getUnsignedMin() const
Return the smallest unsigned value contained in the ConstantRange.
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred, const ConstantRange &Other)
Produce the smallest range such that all values that may satisfy the given predicate with any value c...
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
Definition: Dominators.cpp:321
Instruction * findNearestCommonDominator(Instruction *I1, Instruction *I2) const
Find the nearest instruction I that dominates both I1 and I2, in the sense that a result produced bef...
Definition: Dominators.cpp:344
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
This instruction compares its operands according to the predicate given to the constructor.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2671
Interface for visiting interesting IV users that are recognized but not simplified by this utility.
virtual void anchor()
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:466
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Definition: Instruction.cpp:92
bool isExact() const LLVM_READONLY
Determine whether the exact flag is set.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:274
void setIsExact(bool b=true)
Set or clear the exact flag on this instruction, which must be an operator which supports this flag.
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:463
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
Definition: Operator.h:77
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
Definition: Operator.h:110
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
Definition: Operator.h:104
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1852
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
This class uses information about analyze scalars to rewrite expressions in canonical form.
static bool canReuseFlagsFromOriginalIVInc(PHINode *OrigPhi, PHINode *WidePhi, Instruction *OrigInc, Instruction *WideInc)
Return true if both increments directly increment the corresponding IV PHI nodes and have the same op...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Represents a saturating add/sub intrinsic.
The main scalar evolution driver.
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, Instruction *MDFrom=nullptr)
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:344
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:479
bool empty() const
Definition: SmallVector.h:94
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
This class represents a truncation of integer types.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
int getFPMantissaWidth() const
Return the width of the mantissa of this type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1075
iterator_range< use_iterator > uses()
Definition: Value.h:376
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Represents an op.with.overflow intrinsic.
const ParentTy * getParent() const
Definition: ilist_node.h:32
self_iterator getIterator()
Definition: ilist_node.h:132
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Key
PAL metadata keys.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
Definition: PatternMatch.h:875
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
Definition: PatternMatch.h:299
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
NodeAddr< DefNode * > Def
Definition: RDFGraph.h:384
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
PHINode * createWideIV(const WideIVInfo &WI, LoopInfo *LI, ScalarEvolution *SE, SCEVExpander &Rewriter, DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, unsigned &NumElimExt, unsigned &NumWidened, bool HasGuards, bool UsePostIncrementRanges)
Widen Induction Variables - Extend the width of an IV to cover its widest uses.
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition: Local.cpp:400
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
cl::opt< unsigned > SCEVCheapExpansionBudget
bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT, LoopInfo *LI, const TargetTransformInfo *TTI, SmallVectorImpl< WeakTrackingVH > &Dead)
SimplifyLoopIVs - Simplify users of induction variables within this loop.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
Definition: Local.cpp:2715
std::pair< bool, bool > simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT, LoopInfo *LI, const TargetTransformInfo *TTI, SmallVectorImpl< WeakTrackingVH > &Dead, SCEVExpander &Rewriter, IVVisitor *V=nullptr)
simplifyUsersOfIV - Simplify instructions that use this induction variable by using ScalarEvolution t...
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr, SmallVectorImpl< PHINode * > *InsertedPHIs=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
Definition: LCSSA.cpp:77
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
Collect information about induction variables that are used by sign/zero extend operations.