LLVM 23.0.0git
ScalarEvolutionExpander.cpp
Go to the documentation of this file.
1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the implementation of the scalar evolution expander,
10// which is used to generate the code corresponding to a given scalar evolution
11// expression.
12//
13//===----------------------------------------------------------------------===//
14
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/ScopeExit.h"
23#include "llvm/IR/DataLayout.h"
24#include "llvm/IR/Dominators.h"
31
32#if LLVM_ENABLE_ABI_BREAKING_CHECKS
33#define SCEV_DEBUG_WITH_TYPE(TYPE, X) DEBUG_WITH_TYPE(TYPE, X)
34#else
35#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
36#endif
37
38using namespace llvm;
39
41 "scev-cheap-expansion-budget", cl::Hidden, cl::init(4),
42 cl::desc("When performing SCEV expansion only if it is cheap to do, this "
43 "controls the budget that is considered cheap (default = 4)"));
44
45using namespace PatternMatch;
46using namespace SCEVPatternMatch;
47
49 NUW = false;
50 NSW = false;
51 Exact = false;
52 Disjoint = false;
53 NNeg = false;
54 SameSign = false;
56 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I)) {
57 NUW = OBO->hasNoUnsignedWrap();
58 NSW = OBO->hasNoSignedWrap();
59 }
60 if (auto *PEO = dyn_cast<PossiblyExactOperator>(I))
61 Exact = PEO->isExact();
62 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(I))
63 Disjoint = PDI->isDisjoint();
64 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(I))
65 NNeg = PNI->hasNonNeg();
66 if (auto *TI = dyn_cast<TruncInst>(I)) {
67 NUW = TI->hasNoUnsignedWrap();
68 NSW = TI->hasNoSignedWrap();
69 }
71 GEPNW = GEP->getNoWrapFlags();
72 if (auto *ICmp = dyn_cast<ICmpInst>(I))
73 SameSign = ICmp->hasSameSign();
74}
75
78 I->setHasNoUnsignedWrap(NUW);
79 I->setHasNoSignedWrap(NSW);
80 }
82 I->setIsExact(Exact);
83 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(I))
84 PDI->setIsDisjoint(Disjoint);
85 if (auto *PNI = dyn_cast<PossiblyNonNegInst>(I))
86 PNI->setNonNeg(NNeg);
87 if (isa<TruncInst>(I)) {
88 I->setHasNoUnsignedWrap(NUW);
89 I->setHasNoSignedWrap(NSW);
90 }
92 GEP->setNoWrapFlags(GEPNW);
93 if (auto *ICmp = dyn_cast<ICmpInst>(I))
94 ICmp->setSameSign(SameSign);
95}
96
97/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
98/// reusing an existing cast if a suitable one (= dominating IP) exists, or
99/// creating a new one.
100Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
103 // This function must be called with the builder having a valid insertion
104 // point. It doesn't need to be the actual IP where the uses of the returned
105 // cast will be added, but it must dominate such IP.
106 // We use this precondition to produce a cast that will dominate all its
107 // uses. In particular, this is crucial for the case where the builder's
108 // insertion point *is* the point where we were asked to put the cast.
109 // Since we don't know the builder's insertion point is actually
110 // where the uses will be added (only that it dominates it), we are
111 // not allowed to move it.
112 BasicBlock::iterator BIP = Builder.GetInsertPoint();
113
114 Value *Ret = nullptr;
115
116 if (!isa<Constant>(V)) {
117 // Check to see if there is already a cast!
118 for (User *U : V->users()) {
119 if (U->getType() != Ty)
120 continue;
122 if (!CI || CI->getOpcode() != Op)
123 continue;
124
125 // Found a suitable cast that is at IP or comes before IP. Use it. Note
126 // that the cast must also properly dominate the Builder's insertion
127 // point.
128 if (IP->getParent() == CI->getParent() && &*BIP != CI &&
129 (&*IP == CI || CI->comesBefore(&*IP))) {
130 Ret = CI;
131 break;
132 }
133 }
134 }
135
136 // Create a new cast.
137 if (!Ret) {
138 SCEVInsertPointGuard Guard(Builder, this);
139 Builder.SetInsertPoint(&*IP);
140 Ret = Builder.CreateCast(Op, V, Ty, V->getName());
141 }
142
143 // We assert at the end of the function since IP might point to an
144 // instruction with different dominance properties than a cast
145 // (an invoke for example) and not dominate BIP (but the cast does).
146 assert(!isa<Instruction>(Ret) ||
147 SE.DT.dominates(cast<Instruction>(Ret), &*BIP));
148
149 return Ret;
150}
151
154 Instruction *MustDominate) const {
155 BasicBlock::iterator IP = ++I->getIterator();
156 if (auto *II = dyn_cast<InvokeInst>(I))
157 IP = II->getNormalDest()->begin();
158
159 while (isa<PHINode>(IP))
160 ++IP;
161
163 ++IP;
164 } else if (isa<CatchSwitchInst>(IP)) {
165 IP = MustDominate->getParent()->getFirstInsertionPt();
166 } else {
167 assert(!IP->isEHPad() && "unexpected eh pad!");
168 }
169
170 // Adjust insert point to be after instructions inserted by the expander, so
171 // we can re-use already inserted instructions. Avoid skipping past the
172 // original \p MustDominate, in case it is an inserted instruction.
173 while (isInsertedInstruction(&*IP) && &*IP != MustDominate)
174 ++IP;
175
176 return IP;
177}
178
180 SmallVector<Value *> WorkList;
181 SmallPtrSet<Value *, 8> DeletedValues;
183 while (!WorkList.empty()) {
184 Value *V = WorkList.pop_back_val();
185 if (DeletedValues.contains(V))
186 continue;
187 auto *I = dyn_cast<Instruction>(V);
188 if (!I || I == Root || !isInsertedInstruction(I) ||
190 continue;
191 append_range(WorkList, I->operands());
192 InsertedValues.erase(I);
193 InsertedPostIncValues.erase(I);
194 DeletedValues.insert(I);
195 I->eraseFromParent();
196 }
197}
198
200SCEVExpander::GetOptimalInsertionPointForCastOf(Value *V) const {
201 // Cast the argument at the beginning of the entry block, after
202 // any bitcasts of other arguments.
203 if (Argument *A = dyn_cast<Argument>(V)) {
204 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
205 while ((isa<BitCastInst>(IP) &&
206 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
207 cast<BitCastInst>(IP)->getOperand(0) != A))
208 ++IP;
209 return IP;
210 }
211
212 // Cast the instruction immediately after the instruction.
214 return findInsertPointAfter(I, &*Builder.GetInsertPoint());
215
216 // Otherwise, this must be some kind of a constant,
217 // so let's plop this cast into the function's entry block.
219 "Expected the cast argument to be a global/constant");
220 return Builder.GetInsertBlock()
221 ->getParent()
222 ->getEntryBlock()
223 .getFirstInsertionPt();
224}
225
226/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
227/// which must be possible with a noop cast, doing what we can to share
228/// the casts.
229Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
230 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
231 assert((Op == Instruction::BitCast ||
232 Op == Instruction::PtrToInt ||
233 Op == Instruction::IntToPtr) &&
234 "InsertNoopCastOfTo cannot perform non-noop casts!");
235 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
236 "InsertNoopCastOfTo cannot change sizes!");
237
238 // inttoptr only works for integral pointers. For non-integral pointers, we
239 // can create a GEP on null with the integral value as index. Note that
240 // it is safe to use GEP of null instead of inttoptr here, because only
241 // expressions already based on a GEP of null should be converted to pointers
242 // during expansion.
243 if (Op == Instruction::IntToPtr) {
244 auto *PtrTy = cast<PointerType>(Ty);
245 if (DL.isNonIntegralPointerType(PtrTy))
246 return Builder.CreatePtrAdd(Constant::getNullValue(PtrTy), V, "scevgep");
247 }
248 // Short-circuit unnecessary bitcasts.
249 if (Op == Instruction::BitCast) {
250 if (V->getType() == Ty)
251 return V;
252 if (CastInst *CI = dyn_cast<CastInst>(V)) {
253 if (CI->getOperand(0)->getType() == Ty)
254 return CI->getOperand(0);
255 }
256 }
257 // Short-circuit unnecessary inttoptr<->ptrtoint casts.
258 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
259 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
260 if (CastInst *CI = dyn_cast<CastInst>(V))
261 if ((CI->getOpcode() == Instruction::PtrToInt ||
262 CI->getOpcode() == Instruction::IntToPtr) &&
263 SE.getTypeSizeInBits(CI->getType()) ==
264 SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
265 return CI->getOperand(0);
266 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
267 if ((CE->getOpcode() == Instruction::PtrToInt ||
268 CE->getOpcode() == Instruction::IntToPtr) &&
269 SE.getTypeSizeInBits(CE->getType()) ==
270 SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
271 return CE->getOperand(0);
272 }
273
274 // Fold a cast of a constant.
275 if (Constant *C = dyn_cast<Constant>(V))
276 return ConstantExpr::getCast(Op, C, Ty);
277
278 // Try to reuse existing cast, or insert one.
279 return ReuseOrCreateCast(V, Ty, Op, GetOptimalInsertionPointForCastOf(V));
280}
281
282/// InsertBinop - Insert the specified binary operator, doing a small amount
283/// of work to avoid inserting an obviously redundant operation, and hoisting
284/// to an outer loop when the opportunity is there and it is safe.
285Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
286 Value *LHS, Value *RHS,
287 SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
288 // Fold a binop with constant operands.
289 if (Constant *CLHS = dyn_cast<Constant>(LHS))
290 if (Constant *CRHS = dyn_cast<Constant>(RHS))
291 if (Constant *Res = ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, DL))
292 return Res;
293
294 // Do a quick scan to see if we have this binop nearby. If so, reuse it.
295 unsigned ScanLimit = 6;
296 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
297 // Scanning starts from the last instruction before the insertion point.
298 BasicBlock::iterator IP = Builder.GetInsertPoint();
299 if (IP != BlockBegin) {
300 --IP;
301 for (; ScanLimit; --IP, --ScanLimit) {
302 auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
303 // Ensure that no-wrap flags match.
305 if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
306 return true;
307 if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
308 return true;
309 }
310 // Conservatively, do not use any instruction which has any of exact
311 // flags installed.
312 if (isa<PossiblyExactOperator>(I) && I->isExact())
313 return true;
314 return false;
315 };
316 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
317 IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
318 return &*IP;
319 if (IP == BlockBegin) break;
320 }
321 }
322
323 // Save the original insertion point so we can restore it when we're done.
324 DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
325 SCEVInsertPointGuard Guard(Builder, this);
326
327 if (IsSafeToHoist) {
328 // Move the insertion point out of as many loops as we can.
329 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
330 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
331 BasicBlock *Preheader = L->getLoopPreheader();
332 if (!Preheader) break;
333
334 // Ok, move up a level.
335 Builder.SetInsertPoint(Preheader->getTerminator());
336 }
337 }
338
339 // If we haven't found this binop, insert it.
340 // TODO: Use the Builder, which will make CreateBinOp below fold with
341 // InstSimplifyFolder.
342 Instruction *BO = Builder.Insert(BinaryOperator::Create(Opcode, LHS, RHS));
343 BO->setDebugLoc(Loc);
344 if (Flags & SCEV::FlagNUW)
346 if (Flags & SCEV::FlagNSW)
347 BO->setHasNoSignedWrap();
348
349 return BO;
350}
351
352/// expandAddToGEP - Expand an addition expression with a pointer type into
353/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
354/// BasicAliasAnalysis and other passes analyze the result. See the rules
355/// for getelementptr vs. inttoptr in
356/// http://llvm.org/docs/LangRef.html#pointeraliasing
357/// for details.
358///
359/// Design note: The correctness of using getelementptr here depends on
360/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
361/// they may introduce pointer arithmetic which may not be safely converted
362/// into getelementptr.
363///
364/// Design note: It might seem desirable for this function to be more
365/// loop-aware. If some of the indices are loop-invariant while others
366/// aren't, it might seem desirable to emit multiple GEPs, keeping the
367/// loop-invariant portions of the overall computation outside the loop.
368/// However, there are a few reasons this is not done here. Hoisting simple
369/// arithmetic is a low-level optimization that often isn't very
370/// important until late in the optimization process. In fact, passes
371/// like InstructionCombining will combine GEPs, even if it means
372/// pushing loop-invariant computation down into loops, so even if the
373/// GEPs were split here, the work would quickly be undone. The
374/// LoopStrengthReduction pass, which is usually run quite late (and
375/// after the last InstructionCombining pass), takes care of hoisting
376/// loop-invariant portions of expressions, after considering what
377/// can be folded using target addressing modes.
378///
379Value *SCEVExpander::expandAddToGEP(const SCEV *Offset, Value *V,
380 SCEV::NoWrapFlags Flags) {
382 SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
383
384 Value *Idx = expand(Offset);
385 GEPNoWrapFlags NW = (Flags & SCEV::FlagNUW) ? GEPNoWrapFlags::noUnsignedWrap()
387
388 // Fold a GEP with constant operands.
389 if (Constant *CLHS = dyn_cast<Constant>(V))
390 if (Constant *CRHS = dyn_cast<Constant>(Idx))
391 return Builder.CreatePtrAdd(CLHS, CRHS, "", NW);
392
393 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
394 unsigned ScanLimit = 6;
395 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
396 // Scanning starts from the last instruction before the insertion point.
397 BasicBlock::iterator IP = Builder.GetInsertPoint();
398 if (IP != BlockBegin) {
399 --IP;
400 for (; ScanLimit; --IP, --ScanLimit) {
401 if (auto *GEP = dyn_cast<GetElementPtrInst>(IP)) {
402 if (GEP->getPointerOperand() == V &&
403 GEP->getSourceElementType() == Builder.getInt8Ty() &&
404 GEP->getOperand(1) == Idx) {
405 rememberFlags(GEP);
406 GEP->setNoWrapFlags(GEP->getNoWrapFlags() & NW);
407 return &*IP;
408 }
409 }
410 if (IP == BlockBegin) break;
411 }
412 }
413
414 // Save the original insertion point so we can restore it when we're done.
415 SCEVInsertPointGuard Guard(Builder, this);
416
417 // Move the insertion point out of as many loops as we can.
418 while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
419 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
420 BasicBlock *Preheader = L->getLoopPreheader();
421 if (!Preheader) break;
422
423 // Ok, move up a level.
424 Builder.SetInsertPoint(Preheader->getTerminator());
425 }
426
427 // Emit a GEP.
428 return Builder.CreatePtrAdd(V, Idx, "scevgep", NW);
429}
430
431/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
432/// SCEV expansion. If they are nested, this is the most nested. If they are
433/// neighboring, pick the later.
434static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
435 DominatorTree &DT) {
436 if (!A) return B;
437 if (!B) return A;
438 if (A->contains(B)) return B;
439 if (B->contains(A)) return A;
440 if (DT.dominates(A->getHeader(), B->getHeader())) return B;
441 if (DT.dominates(B->getHeader(), A->getHeader())) return A;
442 return A; // Arbitrarily break the tie.
443}
444
445/// getRelevantLoop - Get the most relevant loop associated with the given
446/// expression, according to PickMostRelevantLoop.
447const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
448 // Test whether we've already computed the most relevant loop for this SCEV.
449 auto Pair = RelevantLoops.try_emplace(S);
450 if (!Pair.second)
451 return Pair.first->second;
452
453 switch (S->getSCEVType()) {
454 case scConstant:
455 case scVScale:
456 return nullptr; // A constant has no relevant loops.
457 case scTruncate:
458 case scZeroExtend:
459 case scSignExtend:
460 case scPtrToAddr:
461 case scPtrToInt:
462 case scAddExpr:
463 case scMulExpr:
464 case scUDivExpr:
465 case scAddRecExpr:
466 case scUMaxExpr:
467 case scSMaxExpr:
468 case scUMinExpr:
469 case scSMinExpr:
471 const Loop *L = nullptr;
472 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
473 L = AR->getLoop();
474 for (const SCEV *Op : S->operands())
475 L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
476 return RelevantLoops[S] = L;
477 }
478 case scUnknown: {
479 const SCEVUnknown *U = cast<SCEVUnknown>(S);
480 if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
481 return Pair.first->second = SE.LI.getLoopFor(I->getParent());
482 // A non-instruction has no relevant loops.
483 return nullptr;
484 }
486 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
487 }
488 llvm_unreachable("Unexpected SCEV type!");
489}
490
491namespace {
492
493/// LoopCompare - Compare loops by PickMostRelevantLoop.
494class LoopCompare {
495 DominatorTree &DT;
496public:
497 explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
498
499 bool operator()(std::pair<const Loop *, const SCEV *> LHS,
500 std::pair<const Loop *, const SCEV *> RHS) const {
501 // Keep pointer operands sorted at the end.
502 if (LHS.second->getType()->isPointerTy() !=
503 RHS.second->getType()->isPointerTy())
504 return LHS.second->getType()->isPointerTy();
505
506 // Compare loops with PickMostRelevantLoop.
507 if (LHS.first != RHS.first)
508 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
509
510 // If one operand is a non-constant negative and the other is not,
511 // put the non-constant negative on the right so that a sub can
512 // be used instead of a negate and add.
513 if (LHS.second->isNonConstantNegative()) {
514 if (!RHS.second->isNonConstantNegative())
515 return false;
516 } else if (RHS.second->isNonConstantNegative())
517 return true;
518
519 // Otherwise they are equivalent according to this comparison.
520 return false;
521 }
522};
523
524}
525
526Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
527 // Recognize the canonical representation of an unsimplifed urem.
528 const SCEV *URemLHS = nullptr;
529 const SCEV *URemRHS = nullptr;
530 if (match(S, m_scev_URem(m_SCEV(URemLHS), m_SCEV(URemRHS), SE))) {
531 Value *LHS = expand(URemLHS);
532 Value *RHS = expand(URemRHS);
533 return InsertBinop(Instruction::URem, LHS, RHS, SCEV::FlagAnyWrap,
534 /*IsSafeToHoist*/ false);
535 }
536
537 // Collect all the add operands in a loop, along with their associated loops.
538 // Iterate in reverse so that constants are emitted last, all else equal, and
539 // so that pointer operands are inserted first, which the code below relies on
540 // to form more involved GEPs.
542 for (const SCEV *Op : reverse(S->operands()))
543 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
544
545 // Sort by loop. Use a stable sort so that constants follow non-constants and
546 // pointer operands precede non-pointer operands.
547 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
548
549 // Emit instructions to add all the operands. Hoist as much as possible
550 // out of loops, and form meaningful getelementptrs where possible.
551 Value *Sum = nullptr;
552 for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
553 const Loop *CurLoop = I->first;
554 const SCEV *Op = I->second;
555 if (!Sum) {
556 // This is the first operand. Just expand it.
557 Sum = expand(Op);
558 ++I;
559 continue;
560 }
561
562 assert(!Op->getType()->isPointerTy() && "Only first op can be pointer");
563 if (isa<PointerType>(Sum->getType())) {
564 // The running sum expression is a pointer. Try to form a getelementptr
565 // at this level with that as the base.
567 for (; I != E && I->first == CurLoop; ++I) {
568 // If the operand is SCEVUnknown and not instructions, peek through
569 // it, to enable more of it to be folded into the GEP.
570 const SCEV *X = I->second;
571 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
572 if (!isa<Instruction>(U->getValue()))
573 X = SE.getSCEV(U->getValue());
574 NewOps.push_back(X);
575 }
576 Sum = expandAddToGEP(SE.getAddExpr(NewOps), Sum, S->getNoWrapFlags());
577 } else if (Op->isNonConstantNegative()) {
578 // Instead of doing a negate and add, just do a subtract.
579 Value *W = expand(SE.getNegativeSCEV(Op));
580 Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
581 /*IsSafeToHoist*/ true);
582 ++I;
583 } else {
584 // A simple add.
585 Value *W = expand(Op);
586 // Canonicalize a constant to the RHS.
587 if (isa<Constant>(Sum))
588 std::swap(Sum, W);
589 Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
590 /*IsSafeToHoist*/ true);
591 ++I;
592 }
593 }
594
595 return Sum;
596}
597
598Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
599 Type *Ty = S->getType();
600
601 // Collect all the mul operands in a loop, along with their associated loops.
602 // Iterate in reverse so that constants are emitted last, all else equal.
604 for (const SCEV *Op : reverse(S->operands()))
605 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(Op), Op));
606
607 // Sort by loop. Use a stable sort so that constants follow non-constants.
608 llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
609
610 // Emit instructions to mul all the operands. Hoist as much as possible
611 // out of loops.
612 Value *Prod = nullptr;
613 auto I = OpsAndLoops.begin();
614
615 // Expand the calculation of X pow N in the following manner:
616 // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
617 // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
618 const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops]() {
619 auto E = I;
620 // Calculate how many times the same operand from the same loop is included
621 // into this power.
622 uint64_t Exponent = 0;
623 const uint64_t MaxExponent = UINT64_MAX >> 1;
624 // No one sane will ever try to calculate such huge exponents, but if we
625 // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
626 // below when the power of 2 exceeds our Exponent, and we want it to be
627 // 1u << 31 at most to not deal with unsigned overflow.
628 while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
629 ++Exponent;
630 ++E;
631 }
632 assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
633
634 // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
635 // that are needed into the result.
636 Value *P = expand(I->second);
637 Value *Result = nullptr;
638 if (Exponent & 1)
639 Result = P;
640 for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
641 P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
642 /*IsSafeToHoist*/ true);
643 if (Exponent & BinExp)
644 Result = Result ? InsertBinop(Instruction::Mul, Result, P,
646 /*IsSafeToHoist*/ true)
647 : P;
648 }
649
650 I = E;
651 assert(Result && "Nothing was expanded?");
652 return Result;
653 };
654
655 while (I != OpsAndLoops.end()) {
656 if (!Prod) {
657 // This is the first operand. Just expand it.
658 Prod = ExpandOpBinPowN();
659 } else if (I->second->isAllOnesValue()) {
660 // Instead of doing a multiply by negative one, just do a negate.
661 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
662 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
663 ++I;
664 } else {
665 // A simple mul.
666 Value *W = ExpandOpBinPowN();
667 // Canonicalize a constant to the RHS.
668 if (isa<Constant>(Prod)) std::swap(Prod, W);
669 const APInt *RHS;
670 if (match(W, m_Power2(RHS))) {
671 // Canonicalize Prod*(1<<C) to Prod<<C.
672 assert(!Ty->isVectorTy() && "vector types are not SCEVable");
673 auto NWFlags = S->getNoWrapFlags();
674 // clear nsw flag if shl will produce poison value.
675 if (RHS->logBase2() == RHS->getBitWidth() - 1)
676 NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
677 Prod = InsertBinop(Instruction::Shl, Prod,
678 ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
679 /*IsSafeToHoist*/ true);
680 } else {
681 Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
682 /*IsSafeToHoist*/ true);
683 }
684 }
685 }
686
687 return Prod;
688}
689
690Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
691 Value *LHS = expand(S->getLHS());
692 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
693 const APInt &RHS = SC->getAPInt();
694 if (RHS.isPowerOf2())
695 return InsertBinop(Instruction::LShr, LHS,
696 ConstantInt::get(SC->getType(), RHS.logBase2()),
697 SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
698 }
699
700 const SCEV *RHSExpr = S->getRHS();
701 Value *RHS = expand(RHSExpr);
702 if (SafeUDivMode) {
703 bool GuaranteedNotPoison =
704 ScalarEvolution::isGuaranteedNotToBePoison(RHSExpr);
705 if (!GuaranteedNotPoison)
706 RHS = Builder.CreateFreeze(RHS);
707
708 // We need an umax if either RHSExpr is not known to be zero, or if it is
709 // not guaranteed to be non-poison. In the later case, the frozen poison may
710 // be 0.
711 if (!SE.isKnownNonZero(RHSExpr) || !GuaranteedNotPoison)
712 RHS = Builder.CreateIntrinsic(RHS->getType(), Intrinsic::umax,
713 {RHS, ConstantInt::get(RHS->getType(), 1)});
714 }
715 return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
716 /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
717}
718
719/// Determine if this is a well-behaved chain of instructions leading back to
720/// the PHI. If so, it may be reused by expanded expressions.
721bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
722 const Loop *L) {
723 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
724 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
725 return false;
726 // If any of the operands don't dominate the insert position, bail.
727 // Addrec operands are always loop-invariant, so this can only happen
728 // if there are instructions which haven't been hoisted.
729 if (L == IVIncInsertLoop) {
730 for (Use &Op : llvm::drop_begin(IncV->operands()))
731 if (Instruction *OInst = dyn_cast<Instruction>(Op))
732 if (!SE.DT.dominates(OInst, IVIncInsertPos))
733 return false;
734 }
735 // Advance to the next instruction.
736 IncV = dyn_cast<Instruction>(IncV->getOperand(0));
737 if (!IncV)
738 return false;
739
740 if (IncV->mayHaveSideEffects())
741 return false;
742
743 if (IncV == PN)
744 return true;
745
746 return isNormalAddRecExprPHI(PN, IncV, L);
747}
748
749/// getIVIncOperand returns an induction variable increment's induction
750/// variable operand.
751///
752/// If allowScale is set, any type of GEP is allowed as long as the nonIV
753/// operands dominate InsertPos.
754///
755/// If allowScale is not set, ensure that a GEP increment conforms to one of the
756/// simple patterns generated by getAddRecExprPHILiterally and
757/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
759 Instruction *InsertPos,
760 bool allowScale) {
761 if (IncV == InsertPos)
762 return nullptr;
763
764 switch (IncV->getOpcode()) {
765 default:
766 return nullptr;
767 // Check for a simple Add/Sub or GEP of a loop invariant step.
768 case Instruction::Add:
769 case Instruction::Sub: {
771 if (!OInst || SE.DT.dominates(OInst, InsertPos))
772 return dyn_cast<Instruction>(IncV->getOperand(0));
773 return nullptr;
774 }
775 case Instruction::BitCast:
776 return dyn_cast<Instruction>(IncV->getOperand(0));
777 case Instruction::GetElementPtr:
778 for (Use &U : llvm::drop_begin(IncV->operands())) {
779 if (isa<Constant>(U))
780 continue;
781 if (Instruction *OInst = dyn_cast<Instruction>(U)) {
782 if (!SE.DT.dominates(OInst, InsertPos))
783 return nullptr;
784 }
785 if (allowScale) {
786 // allow any kind of GEP as long as it can be hoisted.
787 continue;
788 }
789 // GEPs produced by SCEVExpander use i8 element type.
790 if (!cast<GEPOperator>(IncV)->getSourceElementType()->isIntegerTy(8))
791 return nullptr;
792 break;
793 }
794 return dyn_cast<Instruction>(IncV->getOperand(0));
795 }
796}
797
798/// If the insert point of the current builder or any of the builders on the
799/// stack of saved builders has 'I' as its insert point, update it to point to
800/// the instruction after 'I'. This is intended to be used when the instruction
801/// 'I' is being moved. If this fixup is not done and 'I' is moved to a
802/// different block, the inconsistent insert point (with a mismatched
803/// Instruction and Block) can lead to an instruction being inserted in a block
804/// other than its parent.
805void SCEVExpander::fixupInsertPoints(Instruction *I) {
807 BasicBlock::iterator NewInsertPt = std::next(It);
808 if (Builder.GetInsertPoint() == It)
809 Builder.SetInsertPoint(&*NewInsertPt);
810 for (auto *InsertPtGuard : InsertPointGuards)
811 if (InsertPtGuard->GetInsertPoint() == It)
812 InsertPtGuard->SetInsertPoint(NewInsertPt);
813}
814
815/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
816/// it available to other uses in this loop. Recursively hoist any operands,
817/// until we reach a value that dominates InsertPos.
819 bool RecomputePoisonFlags) {
820 auto FixupPoisonFlags = [this](Instruction *I) {
821 // Drop flags that are potentially inferred from old context and infer flags
822 // in new context.
823 rememberFlags(I);
824 I->dropPoisonGeneratingFlags();
825 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I))
826 if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
827 auto *BO = cast<BinaryOperator>(I);
832 }
833 };
834
835 if (SE.DT.dominates(IncV, InsertPos)) {
836 if (RecomputePoisonFlags)
837 FixupPoisonFlags(IncV);
838 return true;
839 }
840
841 // InsertPos must itself dominate IncV so that IncV's new position satisfies
842 // its existing users.
843 if (isa<PHINode>(InsertPos) ||
844 !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
845 return false;
846
847 if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
848 return false;
849
850 // Check that the chain of IV operands leading back to Phi can be hoisted.
852 for(;;) {
853 Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
854 if (!Oper)
855 return false;
856 // IncV is safe to hoist.
857 IVIncs.push_back(IncV);
858 IncV = Oper;
859 if (SE.DT.dominates(IncV, InsertPos))
860 break;
861 }
862 for (Instruction *I : llvm::reverse(IVIncs)) {
863 fixupInsertPoints(I);
864 I->moveBefore(InsertPos->getIterator());
865 if (RecomputePoisonFlags)
866 FixupPoisonFlags(I);
867 }
868 return true;
869}
870
872 PHINode *WidePhi,
873 Instruction *OrigInc,
874 Instruction *WideInc) {
875 return match(OrigInc, m_c_BinOp(m_Specific(OrigPhi), m_Value())) &&
876 match(WideInc, m_c_BinOp(m_Specific(WidePhi), m_Value())) &&
877 OrigInc->getOpcode() == WideInc->getOpcode();
878}
879
880/// Determine if this cyclic phi is in a form that would have been generated by
881/// LSR. We don't care if the phi was actually expanded in this pass, as long
882/// as it is in a low-cost form, for example, no implied multiplication. This
883/// should match any patterns generated by getAddRecExprPHILiterally and
884/// expandAddtoGEP.
885bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
886 const Loop *L) {
887 for(Instruction *IVOper = IncV;
888 (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
889 /*allowScale=*/false));) {
890 if (IVOper == PN)
891 return true;
892 }
893 return false;
894}
895
896/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
897/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
898/// need to materialize IV increments elsewhere to handle difficult situations.
899Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
900 bool useSubtract) {
901 Value *IncV;
902 // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
903 if (PN->getType()->isPointerTy()) {
904 // TODO: Change name to IVName.iv.next.
905 IncV = Builder.CreatePtrAdd(PN, StepV, "scevgep");
906 } else {
907 IncV = useSubtract ?
908 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
909 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
910 }
911 return IncV;
912}
913
914/// Check whether we can cheaply express the requested SCEV in terms of
915/// the available PHI SCEV by truncation and/or inversion of the step.
917 const SCEVAddRecExpr *Phi,
918 const SCEVAddRecExpr *Requested,
919 bool &InvertStep) {
920 // We can't transform to match a pointer PHI.
921 Type *PhiTy = Phi->getType();
922 Type *RequestedTy = Requested->getType();
923 if (PhiTy->isPointerTy() || RequestedTy->isPointerTy())
924 return false;
925
926 if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
927 return false;
928
929 // Try truncate it if necessary.
930 Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
931 if (!Phi)
932 return false;
933
934 // Check whether truncation will help.
935 if (Phi == Requested) {
936 InvertStep = false;
937 return true;
938 }
939
940 // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
941 if (SE.getMinusSCEV(Requested->getStart(), Requested) == Phi) {
942 InvertStep = true;
943 return true;
944 }
945
946 return false;
947}
948
949static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
950 if (!isa<IntegerType>(AR->getType()))
951 return false;
952
953 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
954 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
955 const SCEV *Step = AR->getStepRecurrence(SE);
956 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
957 SE.getSignExtendExpr(AR, WideTy));
958 const SCEV *ExtendAfterOp =
959 SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
960 return ExtendAfterOp == OpAfterExtend;
961}
962
963static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
964 if (!isa<IntegerType>(AR->getType()))
965 return false;
966
967 unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
968 Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
969 const SCEV *Step = AR->getStepRecurrence(SE);
970 const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
971 SE.getZeroExtendExpr(AR, WideTy));
972 const SCEV *ExtendAfterOp =
973 SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
974 return ExtendAfterOp == OpAfterExtend;
975}
976
977/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
978/// the base addrec, which is the addrec without any non-loop-dominating
979/// values, and return the PHI.
980PHINode *
981SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
982 const Loop *L, Type *&TruncTy,
983 bool &InvertStep) {
984 assert((!IVIncInsertLoop || IVIncInsertPos) &&
985 "Uninitialized insert position");
986
987 // Reuse a previously-inserted PHI, if present.
988 BasicBlock *LatchBlock = L->getLoopLatch();
989 if (LatchBlock) {
990 PHINode *AddRecPhiMatch = nullptr;
991 Instruction *IncV = nullptr;
992 TruncTy = nullptr;
993 InvertStep = false;
994
995 // Only try partially matching scevs that need truncation and/or
996 // step-inversion if we know this loop is outside the current loop.
997 bool TryNonMatchingSCEV =
998 IVIncInsertLoop &&
999 SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1000
1001 for (PHINode &PN : L->getHeader()->phis()) {
1002 if (!SE.isSCEVable(PN.getType()))
1003 continue;
1004
1005 // We should not look for a incomplete PHI. Getting SCEV for a incomplete
1006 // PHI has no meaning at all.
1007 if (!PN.isComplete()) {
1009 DebugType, dbgs() << "One incomplete PHI is found: " << PN << "\n");
1010 continue;
1011 }
1012
1013 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
1014 if (!PhiSCEV)
1015 continue;
1016
1017 bool IsMatchingSCEV = PhiSCEV == Normalized;
1018 // We only handle truncation and inversion of phi recurrences for the
1019 // expanded expression if the expanded expression's loop dominates the
1020 // loop we insert to. Check now, so we can bail out early.
1021 if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1022 continue;
1023
1024 // TODO: this possibly can be reworked to avoid this cast at all.
1025 Instruction *TempIncV =
1027 if (!TempIncV)
1028 continue;
1029
1030 // Check whether we can reuse this PHI node.
1031 if (LSRMode) {
1032 if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1033 continue;
1034 } else {
1035 if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1036 continue;
1037 }
1038
1039 // Stop if we have found an exact match SCEV.
1040 if (IsMatchingSCEV) {
1041 IncV = TempIncV;
1042 TruncTy = nullptr;
1043 InvertStep = false;
1044 AddRecPhiMatch = &PN;
1045 break;
1046 }
1047
1048 // Try whether the phi can be translated into the requested form
1049 // (truncated and/or offset by a constant).
1050 if ((!TruncTy || InvertStep) &&
1051 canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1052 // Record the phi node. But don't stop we might find an exact match
1053 // later.
1054 AddRecPhiMatch = &PN;
1055 IncV = TempIncV;
1056 TruncTy = Normalized->getType();
1057 }
1058 }
1059
1060 if (AddRecPhiMatch) {
1061 // Ok, the add recurrence looks usable.
1062 // Remember this PHI, even in post-inc mode.
1063 InsertedValues.insert(AddRecPhiMatch);
1064 // Remember the increment.
1065 rememberInstruction(IncV);
1066 // Those values were not actually inserted but re-used.
1067 ReusedValues.insert(AddRecPhiMatch);
1068 ReusedValues.insert(IncV);
1069 return AddRecPhiMatch;
1070 }
1071 }
1072
1073 // Save the original insertion point so we can restore it when we're done.
1074 SCEVInsertPointGuard Guard(Builder, this);
1075
1076 // Another AddRec may need to be recursively expanded below. For example, if
1077 // this AddRec is quadratic, the StepV may itself be an AddRec in this
1078 // loop. Remove this loop from the PostIncLoops set before expanding such
1079 // AddRecs. Otherwise, we cannot find a valid position for the step
1080 // (i.e. StepV can never dominate its loop header). Ideally, we could do
1081 // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1082 // so it's not worth implementing SmallPtrSet::swap.
1083 PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1084 PostIncLoops.clear();
1085
1086 // Expand code for the start value into the loop preheader.
1087 assert(L->getLoopPreheader() &&
1088 "Can't expand add recurrences without a loop preheader!");
1089 Value *StartV =
1090 expand(Normalized->getStart(), L->getLoopPreheader()->getTerminator());
1091
1092 // StartV must have been be inserted into L's preheader to dominate the new
1093 // phi.
1094 assert(!isa<Instruction>(StartV) ||
1095 SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1096 L->getHeader()));
1097
1098 // Expand code for the step value. Do this before creating the PHI so that PHI
1099 // reuse code doesn't see an incomplete PHI.
1100 const SCEV *Step = Normalized->getStepRecurrence(SE);
1101 Type *ExpandTy = Normalized->getType();
1102 // If the stride is negative, insert a sub instead of an add for the increment
1103 // (unless it's a constant, because subtracts of constants are canonicalized
1104 // to adds).
1105 bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1106 if (useSubtract)
1107 Step = SE.getNegativeSCEV(Step);
1108 // Expand the step somewhere that dominates the loop header.
1109 Value *StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1110
1111 // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1112 // we actually do emit an addition. It does not apply if we emit a
1113 // subtraction.
1114 bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1115 bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1116
1117 // Create the PHI.
1118 BasicBlock *Header = L->getHeader();
1119 Builder.SetInsertPoint(Header, Header->begin());
1120 PHINode *PN =
1121 Builder.CreatePHI(ExpandTy, pred_size(Header), Twine(IVName) + ".iv");
1122
1123 // Create the step instructions and populate the PHI.
1124 for (BasicBlock *Pred : predecessors(Header)) {
1125 // Add a start value.
1126 if (!L->contains(Pred)) {
1127 PN->addIncoming(StartV, Pred);
1128 continue;
1129 }
1130
1131 // Create a step value and add it to the PHI.
1132 // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1133 // instructions at IVIncInsertPos.
1134 Instruction *InsertPos = L == IVIncInsertLoop ?
1135 IVIncInsertPos : Pred->getTerminator();
1136 Builder.SetInsertPoint(InsertPos);
1137 Value *IncV = expandIVInc(PN, StepV, L, useSubtract);
1138
1140 if (IncrementIsNUW)
1141 cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1142 if (IncrementIsNSW)
1143 cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1144 }
1145 PN->addIncoming(IncV, Pred);
1146 }
1147
1148 // After expanding subexpressions, restore the PostIncLoops set so the caller
1149 // can ensure that IVIncrement dominates the current uses.
1150 PostIncLoops = SavedPostIncLoops;
1151
1152 // Remember this PHI, even in post-inc mode. LSR SCEV-based salvaging is most
1153 // effective when we are able to use an IV inserted here, so record it.
1154 InsertedValues.insert(PN);
1155 InsertedIVs.push_back(PN);
1156 return PN;
1157}
1158
1159Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1160 const Loop *L = S->getLoop();
1161
1162 // Determine a normalized form of this expression, which is the expression
1163 // before any post-inc adjustment is made.
1164 const SCEVAddRecExpr *Normalized = S;
1165 if (PostIncLoops.count(L)) {
1167 Loops.insert(L);
1168 Normalized = cast<SCEVAddRecExpr>(
1169 normalizeForPostIncUse(S, Loops, SE, /*CheckInvertible=*/false));
1170 }
1171
1172 [[maybe_unused]] const SCEV *Start = Normalized->getStart();
1173 const SCEV *Step = Normalized->getStepRecurrence(SE);
1174 assert(SE.properlyDominates(Start, L->getHeader()) &&
1175 "Start does not properly dominate loop header");
1176 assert(SE.dominates(Step, L->getHeader()) && "Step not dominate loop header");
1177
1178 // In some cases, we decide to reuse an existing phi node but need to truncate
1179 // it and/or invert the step.
1180 Type *TruncTy = nullptr;
1181 bool InvertStep = false;
1182 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, TruncTy, InvertStep);
1183
1184 // Accommodate post-inc mode, if necessary.
1185 Value *Result;
1186 if (!PostIncLoops.count(L))
1187 Result = PN;
1188 else {
1189 // In PostInc mode, use the post-incremented value.
1190 BasicBlock *LatchBlock = L->getLoopLatch();
1191 assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1192 Result = PN->getIncomingValueForBlock(LatchBlock);
1193
1194 // We might be introducing a new use of the post-inc IV that is not poison
1195 // safe, in which case we should drop poison generating flags. Only keep
1196 // those flags for which SCEV has proven that they always hold.
1197 if (isa<OverflowingBinaryOperator>(Result)) {
1198 auto *I = cast<Instruction>(Result);
1199 if (!S->hasNoUnsignedWrap())
1200 I->setHasNoUnsignedWrap(false);
1201 if (!S->hasNoSignedWrap())
1202 I->setHasNoSignedWrap(false);
1203 }
1204
1205 // For an expansion to use the postinc form, the client must call
1206 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1207 // or dominated by IVIncInsertPos.
1208 if (isa<Instruction>(Result) &&
1209 !SE.DT.dominates(cast<Instruction>(Result),
1210 &*Builder.GetInsertPoint())) {
1211 // The induction variable's postinc expansion does not dominate this use.
1212 // IVUsers tries to prevent this case, so it is rare. However, it can
1213 // happen when an IVUser outside the loop is not dominated by the latch
1214 // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1215 // all cases. Consider a phi outside whose operand is replaced during
1216 // expansion with the value of the postinc user. Without fundamentally
1217 // changing the way postinc users are tracked, the only remedy is
1218 // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1219 // but hopefully expandCodeFor handles that.
1220 bool useSubtract =
1221 !S->getType()->isPointerTy() && Step->isNonConstantNegative();
1222 if (useSubtract)
1223 Step = SE.getNegativeSCEV(Step);
1224 Value *StepV;
1225 {
1226 // Expand the step somewhere that dominates the loop header.
1227 SCEVInsertPointGuard Guard(Builder, this);
1228 StepV = expand(Step, L->getHeader()->getFirstInsertionPt());
1229 }
1230 Result = expandIVInc(PN, StepV, L, useSubtract);
1231 }
1232 }
1233
1234 // We have decided to reuse an induction variable of a dominating loop. Apply
1235 // truncation and/or inversion of the step.
1236 if (TruncTy) {
1237 // Truncate the result.
1238 if (TruncTy != Result->getType())
1239 Result = Builder.CreateTrunc(Result, TruncTy);
1240
1241 // Invert the result.
1242 if (InvertStep)
1243 Result = Builder.CreateSub(expand(Normalized->getStart()), Result);
1244 }
1245
1246 return Result;
1247}
1248
1249Value *SCEVExpander::tryToReuseLCSSAPhi(const SCEVAddRecExpr *S) {
1250 Type *STy = S->getType();
1251 const Loop *L = S->getLoop();
1252 BasicBlock *EB = L->getExitBlock();
1253 if (!EB || !EB->getSinglePredecessor() ||
1254 !SE.DT.dominates(EB, Builder.GetInsertBlock()))
1255 return nullptr;
1256
1257 for (auto &PN : EB->phis()) {
1258 if (!SE.isSCEVable(PN.getType()))
1259 continue;
1260 auto *ExitSCEV = SE.getSCEV(&PN);
1261 if (!isa<SCEVAddRecExpr>(ExitSCEV))
1262 continue;
1263 Type *PhiTy = PN.getType();
1264 if (STy->isIntegerTy() && PhiTy->isPointerTy()) {
1265 ExitSCEV = SE.getPtrToIntExpr(ExitSCEV, STy);
1266 if (isa<SCEVCouldNotCompute>(ExitSCEV))
1267 continue;
1268 } else if (S->getType() != PN.getType()) {
1269 continue;
1270 }
1271
1272 // Check if we can re-use the existing PN, by adjusting it with an expanded
1273 // offset, if the offset is simpler.
1274 const SCEV *Diff = SE.getMinusSCEV(S, ExitSCEV);
1275 const SCEV *Op = Diff;
1280 continue;
1281
1282 assert(Diff->getType()->isIntegerTy() &&
1283 "difference must be of integer type");
1284 Value *DiffV = expand(Diff);
1285 Value *BaseV = fixupLCSSAFormFor(&PN);
1286 if (PhiTy->isPointerTy()) {
1287 if (STy->isPointerTy())
1288 return Builder.CreatePtrAdd(BaseV, DiffV);
1289 BaseV = Builder.CreatePtrToInt(BaseV, DiffV->getType());
1290 }
1291 return Builder.CreateAdd(BaseV, DiffV);
1292 }
1293
1294 return nullptr;
1295}
1296
1297Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1298 // In canonical mode we compute the addrec as an expression of a canonical IV
1299 // using evaluateAtIteration and expand the resulting SCEV expression. This
1300 // way we avoid introducing new IVs to carry on the computation of the addrec
1301 // throughout the loop.
1302 //
1303 // For nested addrecs evaluateAtIteration might need a canonical IV of a
1304 // type wider than the addrec itself. Emitting a canonical IV of the
1305 // proper type might produce non-legal types, for example expanding an i64
1306 // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1307 // back to non-canonical mode for nested addrecs.
1308 if (!CanonicalMode || (S->getNumOperands() > 2))
1309 return expandAddRecExprLiterally(S);
1310
1311 Type *Ty = SE.getEffectiveSCEVType(S->getType());
1312 const Loop *L = S->getLoop();
1313
1314 // First check for an existing canonical IV in a suitable type.
1315 PHINode *CanonicalIV = nullptr;
1316 if (PHINode *PN = L->getCanonicalInductionVariable())
1317 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1318 CanonicalIV = PN;
1319
1320 // Rewrite an AddRec in terms of the canonical induction variable, if
1321 // its type is more narrow.
1322 if (CanonicalIV &&
1323 SE.getTypeSizeInBits(CanonicalIV->getType()) > SE.getTypeSizeInBits(Ty) &&
1324 !S->getType()->isPointerTy()) {
1326 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1327 NewOps[i] = SE.getAnyExtendExpr(S->getOperand(i), CanonicalIV->getType());
1328 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1330 BasicBlock::iterator NewInsertPt =
1331 findInsertPointAfter(cast<Instruction>(V), &*Builder.GetInsertPoint());
1332 V = expand(SE.getTruncateExpr(SE.getUnknown(V), Ty), NewInsertPt);
1333 return V;
1334 }
1335
1336 // If S is expanded outside the defining loop, check if there is a
1337 // matching LCSSA phi node for it.
1338 if (Value *V = tryToReuseLCSSAPhi(S))
1339 return V;
1340
1341 // {X,+,F} --> X + {0,+,F}
1342 if (!S->getStart()->isZero()) {
1343 if (isa<PointerType>(S->getType())) {
1344 Value *StartV = expand(SE.getPointerBase(S));
1345 return expandAddToGEP(SE.removePointerBase(S), StartV,
1347 }
1348
1350 NewOps[0] = SE.getConstant(Ty, 0);
1351 const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1353
1354 // Just do a normal add. Pre-expand the operands to suppress folding.
1355 //
1356 // The LHS and RHS values are factored out of the expand call to make the
1357 // output independent of the argument evaluation order.
1358 const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1359 const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1360 return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1361 }
1362
1363 // If we don't yet have a canonical IV, create one.
1364 if (!CanonicalIV) {
1365 // Create and insert the PHI node for the induction variable in the
1366 // specified loop.
1367 BasicBlock *Header = L->getHeader();
1368 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1369 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar");
1370 CanonicalIV->insertBefore(Header->begin());
1371 rememberInstruction(CanonicalIV);
1372
1373 SmallPtrSet<BasicBlock *, 4> PredSeen;
1374 Constant *One = ConstantInt::get(Ty, 1);
1375 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1376 BasicBlock *HP = *HPI;
1377 if (!PredSeen.insert(HP).second) {
1378 // There must be an incoming value for each predecessor, even the
1379 // duplicates!
1380 CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1381 continue;
1382 }
1383
1384 if (L->contains(HP)) {
1385 // Insert a unit add instruction right before the terminator
1386 // corresponding to the back-edge.
1387 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1388 "indvar.next",
1389 HP->getTerminator()->getIterator());
1390 Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1391 rememberInstruction(Add);
1392 CanonicalIV->addIncoming(Add, HP);
1393 } else {
1394 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1395 }
1396 }
1397 }
1398
1399 // {0,+,1} --> Insert a canonical induction variable into the loop!
1400 if (S->isAffine() && S->getOperand(1)->isOne()) {
1401 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1402 "IVs with types different from the canonical IV should "
1403 "already have been handled!");
1404 return CanonicalIV;
1405 }
1406
1407 // {0,+,F} --> {0,+,1} * F
1408
1409 // If this is a simple linear addrec, emit it now as a special case.
1410 if (S->isAffine()) // {0,+,F} --> i*F
1411 return
1412 expand(SE.getTruncateOrNoop(
1413 SE.getMulExpr(SE.getUnknown(CanonicalIV),
1414 SE.getNoopOrAnyExtend(S->getOperand(1),
1415 CanonicalIV->getType())),
1416 Ty));
1417
1418 // If this is a chain of recurrences, turn it into a closed form, using the
1419 // folders, then expandCodeFor the closed form. This allows the folders to
1420 // simplify the expression without having to build a bunch of special code
1421 // into this folder.
1422 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1423
1424 // Promote S up to the canonical IV type, if the cast is foldable.
1425 const SCEV *NewS = S;
1426 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1427 if (isa<SCEVAddRecExpr>(Ext))
1428 NewS = Ext;
1429
1430 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1431
1432 // Truncate the result down to the original type, if needed.
1433 const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1434 return expand(T);
1435}
1436
1437Value *SCEVExpander::visitPtrToAddrExpr(const SCEVPtrToAddrExpr *S) {
1438 Value *V = expand(S->getOperand());
1439 return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToAddr,
1440 GetOptimalInsertionPointForCastOf(V));
1441}
1442
1443Value *SCEVExpander::visitPtrToIntExpr(const SCEVPtrToIntExpr *S) {
1444 Value *V = expand(S->getOperand());
1445 return ReuseOrCreateCast(V, S->getType(), CastInst::PtrToInt,
1446 GetOptimalInsertionPointForCastOf(V));
1447}
1448
1449Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1450 Value *V = expand(S->getOperand());
1451 return Builder.CreateTrunc(V, S->getType());
1452}
1453
1454Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1455 Value *V = expand(S->getOperand());
1456 return Builder.CreateZExt(V, S->getType(), "",
1457 SE.isKnownNonNegative(S->getOperand()));
1458}
1459
1460Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1461 Value *V = expand(S->getOperand());
1462 return Builder.CreateSExt(V, S->getType());
1463}
1464
1465Value *SCEVExpander::expandMinMaxExpr(const SCEVNAryExpr *S,
1466 Intrinsic::ID IntrinID, Twine Name,
1467 bool IsSequential) {
1468 bool PrevSafeMode = SafeUDivMode;
1469 SafeUDivMode |= IsSequential;
1470 Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1471 Type *Ty = LHS->getType();
1472 if (IsSequential)
1473 LHS = Builder.CreateFreeze(LHS);
1474 for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1475 SafeUDivMode = (IsSequential && i != 0) || PrevSafeMode;
1476 Value *RHS = expand(S->getOperand(i));
1477 if (IsSequential && i != 0)
1478 RHS = Builder.CreateFreeze(RHS);
1479 Value *Sel;
1480 if (Ty->isIntegerTy())
1481 Sel = Builder.CreateIntrinsic(IntrinID, {Ty}, {LHS, RHS},
1482 /*FMFSource=*/nullptr, Name);
1483 else {
1484 Value *ICmp =
1485 Builder.CreateICmp(MinMaxIntrinsic::getPredicate(IntrinID), LHS, RHS);
1486 Sel = Builder.CreateSelect(ICmp, LHS, RHS, Name);
1487 }
1488 LHS = Sel;
1489 }
1490 SafeUDivMode = PrevSafeMode;
1491 return LHS;
1492}
1493
1494Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1495 return expandMinMaxExpr(S, Intrinsic::smax, "smax");
1496}
1497
1498Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1499 return expandMinMaxExpr(S, Intrinsic::umax, "umax");
1500}
1501
1502Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1503 return expandMinMaxExpr(S, Intrinsic::smin, "smin");
1504}
1505
1506Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1507 return expandMinMaxExpr(S, Intrinsic::umin, "umin");
1508}
1509
1510Value *SCEVExpander::visitSequentialUMinExpr(const SCEVSequentialUMinExpr *S) {
1511 return expandMinMaxExpr(S, Intrinsic::umin, "umin", /*IsSequential*/true);
1512}
1513
1514Value *SCEVExpander::visitVScale(const SCEVVScale *S) {
1515 return Builder.CreateVScale(S->getType());
1516}
1517
1520 setInsertPoint(IP);
1521 Value *V = expandCodeFor(SH, Ty);
1522 return V;
1523}
1524
1526 // Expand the code for this SCEV.
1527 Value *V = expand(SH);
1528
1529 if (Ty && Ty != V->getType()) {
1530 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1531 "non-trivial casts should be done with the SCEVs directly!");
1532 V = InsertNoopCastOfTo(V, Ty);
1533 }
1534 return V;
1535}
1536
1537Value *SCEVExpander::FindValueInExprValueMap(
1538 const SCEV *S, const Instruction *InsertPt,
1539 SmallVectorImpl<Instruction *> &DropPoisonGeneratingInsts) {
1540 // If the expansion is not in CanonicalMode, and the SCEV contains any
1541 // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1542 if (!CanonicalMode && SE.containsAddRecurrence(S))
1543 return nullptr;
1544
1545 // If S is a constant or unknown, it may be worse to reuse an existing Value.
1547 return nullptr;
1548
1549 for (Value *V : SE.getSCEVValues(S)) {
1550 Instruction *EntInst = dyn_cast<Instruction>(V);
1551 if (!EntInst)
1552 continue;
1553
1554 // Choose a Value from the set which dominates the InsertPt.
1555 // InsertPt should be inside the Value's parent loop so as not to break
1556 // the LCSSA form.
1557 assert(EntInst->getFunction() == InsertPt->getFunction());
1558 if (S->getType() != V->getType() || !SE.DT.dominates(EntInst, InsertPt) ||
1559 !(SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1560 SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1561 continue;
1562
1563 // Make sure reusing the instruction is poison-safe.
1564 if (SE.canReuseInstruction(S, EntInst, DropPoisonGeneratingInsts))
1565 return V;
1566 DropPoisonGeneratingInsts.clear();
1567 }
1568 return nullptr;
1569}
1570
1571// The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1572// or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1573// and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1574// literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1575// the expansion will try to reuse Value from ExprValueMap, and only when it
1576// fails, expand the SCEV literally.
1577Value *SCEVExpander::expand(const SCEV *S) {
1578 // Compute an insertion point for this SCEV object. Hoist the instructions
1579 // as far out in the loop nest as possible.
1580 BasicBlock::iterator InsertPt = Builder.GetInsertPoint();
1581
1582 // We can move insertion point only if there is no div or rem operations
1583 // otherwise we are risky to move it over the check for zero denominator.
1584 auto SafeToHoist = [](const SCEV *S) {
1585 return !SCEVExprContains(S, [](const SCEV *S) {
1586 if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1587 if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1588 // Division by non-zero constants can be hoisted.
1589 return SC->getValue()->isZero();
1590 // All other divisions should not be moved as they may be
1591 // divisions by zero and should be kept within the
1592 // conditions of the surrounding loops that guard their
1593 // execution (see PR35406).
1594 return true;
1595 }
1596 return false;
1597 });
1598 };
1599 if (SafeToHoist(S)) {
1600 for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1601 L = L->getParentLoop()) {
1602 if (SE.isLoopInvariant(S, L)) {
1603 if (!L) break;
1604 if (BasicBlock *Preheader = L->getLoopPreheader()) {
1605 InsertPt = Preheader->getTerminator()->getIterator();
1606 } else {
1607 // LSR sets the insertion point for AddRec start/step values to the
1608 // block start to simplify value reuse, even though it's an invalid
1609 // position. SCEVExpander must correct for this in all cases.
1610 InsertPt = L->getHeader()->getFirstInsertionPt();
1611 }
1612 } else {
1613 // If the SCEV is computable at this level, insert it into the header
1614 // after the PHIs (and after any other instructions that we've inserted
1615 // there) so that it is guaranteed to dominate any user inside the loop.
1616 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1617 InsertPt = L->getHeader()->getFirstInsertionPt();
1618
1619 while (InsertPt != Builder.GetInsertPoint() &&
1620 (isInsertedInstruction(&*InsertPt))) {
1621 InsertPt = std::next(InsertPt);
1622 }
1623 break;
1624 }
1625 }
1626 }
1627
1628 // Check to see if we already expanded this here.
1629 auto I = InsertedExpressions.find(std::make_pair(S, &*InsertPt));
1630 if (I != InsertedExpressions.end())
1631 return I->second;
1632
1633 SCEVInsertPointGuard Guard(Builder, this);
1634 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1635
1636 // Expand the expression into instructions.
1637 SmallVector<Instruction *> DropPoisonGeneratingInsts;
1638 Value *V = FindValueInExprValueMap(S, &*InsertPt, DropPoisonGeneratingInsts);
1639 if (!V) {
1640 V = visit(S);
1641 V = fixupLCSSAFormFor(V);
1642 } else {
1643 for (Instruction *I : DropPoisonGeneratingInsts) {
1644 rememberFlags(I);
1645 I->dropPoisonGeneratingAnnotations();
1646 // See if we can re-infer from first principles any of the flags we just
1647 // dropped.
1648 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(I))
1649 if (auto Flags = SE.getStrengthenedNoWrapFlagsFromBinOp(OBO)) {
1650 auto *BO = cast<BinaryOperator>(I);
1655 }
1656 if (auto *NNI = dyn_cast<PossiblyNonNegInst>(I)) {
1657 auto *Src = NNI->getOperand(0);
1659 Constant::getNullValue(Src->getType()), I,
1660 DL).value_or(false))
1661 NNI->setNonNeg(true);
1662 }
1663 }
1664 }
1665 // Remember the expanded value for this SCEV at this location.
1666 //
1667 // This is independent of PostIncLoops. The mapped value simply materializes
1668 // the expression at this insertion point. If the mapped value happened to be
1669 // a postinc expansion, it could be reused by a non-postinc user, but only if
1670 // its insertion point was already at the head of the loop.
1671 InsertedExpressions[std::make_pair(S, &*InsertPt)] = V;
1672 return V;
1673}
1674
1675void SCEVExpander::rememberInstruction(Value *I) {
1676 auto DoInsert = [this](Value *V) {
1677 if (!PostIncLoops.empty())
1678 InsertedPostIncValues.insert(V);
1679 else
1680 InsertedValues.insert(V);
1681 };
1682 DoInsert(I);
1683}
1684
1685void SCEVExpander::rememberFlags(Instruction *I) {
1686 // If we already have flags for the instruction, keep the existing ones.
1687 OrigFlags.try_emplace(I, PoisonFlags(I));
1688}
1689
1690void SCEVExpander::replaceCongruentIVInc(
1691 PHINode *&Phi, PHINode *&OrigPhi, Loop *L, const DominatorTree *DT,
1693 BasicBlock *LatchBlock = L->getLoopLatch();
1694 if (!LatchBlock)
1695 return;
1696
1697 Instruction *OrigInc =
1698 dyn_cast<Instruction>(OrigPhi->getIncomingValueForBlock(LatchBlock));
1699 Instruction *IsomorphicInc =
1700 dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1701 if (!OrigInc || !IsomorphicInc)
1702 return;
1703
1704 // If this phi has the same width but is more canonical, replace the
1705 // original with it. As part of the "more canonical" determination,
1706 // respect a prior decision to use an IV chain.
1707 if (OrigPhi->getType() == Phi->getType()) {
1708 bool Chained = ChainedPhis.contains(Phi);
1709 if (!(Chained || isExpandedAddRecExprPHI(OrigPhi, OrigInc, L)) &&
1710 (Chained || isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1711 std::swap(OrigPhi, Phi);
1712 std::swap(OrigInc, IsomorphicInc);
1713 }
1714 }
1715
1716 // Replacing the congruent phi is sufficient because acyclic
1717 // redundancy elimination, CSE/GVN, should handle the
1718 // rest. However, once SCEV proves that a phi is congruent,
1719 // it's often the head of an IV user cycle that is isomorphic
1720 // with the original phi. It's worth eagerly cleaning up the
1721 // common case of a single IV increment so that DeleteDeadPHIs
1722 // can remove cycles that had postinc uses.
1723 // Because we may potentially introduce a new use of OrigIV that didn't
1724 // exist before at this point, its poison flags need readjustment.
1725 const SCEV *TruncExpr =
1726 SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
1727 if (OrigInc == IsomorphicInc || TruncExpr != SE.getSCEV(IsomorphicInc) ||
1728 !SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc))
1729 return;
1730
1731 bool BothHaveNUW = false;
1732 bool BothHaveNSW = false;
1733 auto *OBOIncV = dyn_cast<OverflowingBinaryOperator>(OrigInc);
1734 auto *OBOIsomorphic = dyn_cast<OverflowingBinaryOperator>(IsomorphicInc);
1735 if (OBOIncV && OBOIsomorphic) {
1736 BothHaveNUW =
1737 OBOIncV->hasNoUnsignedWrap() && OBOIsomorphic->hasNoUnsignedWrap();
1738 BothHaveNSW =
1739 OBOIncV->hasNoSignedWrap() && OBOIsomorphic->hasNoSignedWrap();
1740 }
1741
1742 if (!hoistIVInc(OrigInc, IsomorphicInc,
1743 /*RecomputePoisonFlags*/ true))
1744 return;
1745
1746 // We are replacing with a wider increment. If both OrigInc and IsomorphicInc
1747 // are NUW/NSW, then we can preserve them on the wider increment; the narrower
1748 // IsomorphicInc would wrap before the wider OrigInc, so the replacement won't
1749 // make IsomorphicInc's uses more poisonous.
1750 assert(OrigInc->getType()->getScalarSizeInBits() >=
1751 IsomorphicInc->getType()->getScalarSizeInBits() &&
1752 "Should only replace an increment with a wider one.");
1753 if (BothHaveNUW || BothHaveNSW) {
1754 OrigInc->setHasNoUnsignedWrap(OBOIncV->hasNoUnsignedWrap() || BothHaveNUW);
1755 OrigInc->setHasNoSignedWrap(OBOIncV->hasNoSignedWrap() || BothHaveNSW);
1756 }
1757
1758 SCEV_DEBUG_WITH_TYPE(DebugType,
1759 dbgs() << "INDVARS: Eliminated congruent iv.inc: "
1760 << *IsomorphicInc << '\n');
1761 Value *NewInc = OrigInc;
1762 if (OrigInc->getType() != IsomorphicInc->getType()) {
1764 if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
1765 IP = PN->getParent()->getFirstInsertionPt();
1766 else
1767 IP = OrigInc->getNextNode()->getIterator();
1768
1769 IRBuilder<> Builder(IP->getParent(), IP);
1770 Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1771 NewInc =
1772 Builder.CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1773 }
1774 IsomorphicInc->replaceAllUsesWith(NewInc);
1775 DeadInsts.emplace_back(IsomorphicInc);
1776}
1777
1778/// replaceCongruentIVs - Check for congruent phis in this loop header and
1779/// replace them with their most canonical representative. Return the number of
1780/// phis eliminated.
1781///
1782/// This does not depend on any SCEVExpander state but should be used in
1783/// the same context that SCEVExpander is used.
1784unsigned
1787 const TargetTransformInfo *TTI) {
1788 // Find integer phis in order of increasing width.
1790 llvm::make_pointer_range(L->getHeader()->phis()));
1791
1792 if (TTI)
1793 // Use stable_sort to preserve order of equivalent PHIs, so the order
1794 // of the sorted Phis is the same from run to run on the same loop.
1795 llvm::stable_sort(Phis, [](Value *LHS, Value *RHS) {
1796 // Put pointers at the back and make sure pointer < pointer = false.
1797 if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1798 return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1799 return RHS->getType()->getPrimitiveSizeInBits().getFixedValue() <
1800 LHS->getType()->getPrimitiveSizeInBits().getFixedValue();
1801 });
1802
1803 unsigned NumElim = 0;
1805 // Process phis from wide to narrow. Map wide phis to their truncation
1806 // so narrow phis can reuse them.
1807 for (PHINode *Phi : Phis) {
1808 auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1809 if (Value *V = simplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
1810 return V;
1811 if (!SE.isSCEVable(PN->getType()))
1812 return nullptr;
1813 auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1814 if (!Const)
1815 return nullptr;
1816 return Const->getValue();
1817 };
1818
1819 // Fold constant phis. They may be congruent to other constant phis and
1820 // would confuse the logic below that expects proper IVs.
1821 if (Value *V = SimplifyPHINode(Phi)) {
1822 if (V->getType() != Phi->getType())
1823 continue;
1824 SE.forgetValue(Phi);
1825 Phi->replaceAllUsesWith(V);
1826 DeadInsts.emplace_back(Phi);
1827 ++NumElim;
1828 SCEV_DEBUG_WITH_TYPE(DebugType,
1829 dbgs() << "INDVARS: Eliminated constant iv: " << *Phi
1830 << '\n');
1831 continue;
1832 }
1833
1834 if (!SE.isSCEVable(Phi->getType()))
1835 continue;
1836
1837 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1838 if (!OrigPhiRef) {
1839 OrigPhiRef = Phi;
1840 if (Phi->getType()->isIntegerTy() && TTI &&
1841 TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1842 // Make sure we only rewrite using simple induction variables;
1843 // otherwise, we can make the trip count of a loop unanalyzable
1844 // to SCEV.
1845 const SCEV *PhiExpr = SE.getSCEV(Phi);
1846 if (isa<SCEVAddRecExpr>(PhiExpr)) {
1847 // This phi can be freely truncated to the narrowest phi type. Map the
1848 // truncated expression to it so it will be reused for narrow types.
1849 const SCEV *TruncExpr =
1850 SE.getTruncateExpr(PhiExpr, Phis.back()->getType());
1851 ExprToIVMap[TruncExpr] = Phi;
1852 }
1853 }
1854 continue;
1855 }
1856
1857 // Replacing a pointer phi with an integer phi or vice-versa doesn't make
1858 // sense.
1859 if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1860 continue;
1861
1862 replaceCongruentIVInc(Phi, OrigPhiRef, L, DT, DeadInsts);
1863 SCEV_DEBUG_WITH_TYPE(DebugType,
1864 dbgs() << "INDVARS: Eliminated congruent iv: " << *Phi
1865 << '\n');
1867 DebugType, dbgs() << "INDVARS: Original iv: " << *OrigPhiRef << '\n');
1868 ++NumElim;
1869 Value *NewIV = OrigPhiRef;
1870 if (OrigPhiRef->getType() != Phi->getType()) {
1871 IRBuilder<> Builder(L->getHeader(),
1872 L->getHeader()->getFirstInsertionPt());
1873 Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1874 NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1875 }
1876 Phi->replaceAllUsesWith(NewIV);
1877 DeadInsts.emplace_back(Phi);
1878 }
1879 return NumElim;
1880}
1881
1883 const Instruction *At,
1884 Loop *L) {
1885 using namespace llvm::PatternMatch;
1886
1887 SmallVector<BasicBlock *, 4> ExitingBlocks;
1888 L->getExitingBlocks(ExitingBlocks);
1889
1890 // Look for suitable value in simple conditions at the loop exits.
1891 for (BasicBlock *BB : ExitingBlocks) {
1892 CmpPredicate Pred;
1893 Instruction *LHS, *RHS;
1894
1895 if (!match(BB->getTerminator(),
1896 m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
1898 continue;
1899
1900 if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
1901 return true;
1902
1903 if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
1904 return true;
1905 }
1906
1907 // Use expand's logic which is used for reusing a previous Value in
1908 // ExprValueMap. Note that we don't currently model the cost of
1909 // needing to drop poison generating flags on the instruction if we
1910 // want to reuse it. We effectively assume that has zero cost.
1911 SmallVector<Instruction *> DropPoisonGeneratingInsts;
1912 return FindValueInExprValueMap(S, At, DropPoisonGeneratingInsts) != nullptr;
1913}
1914
1915template<typename T> static InstructionCost costAndCollectOperands(
1918 SmallVectorImpl<SCEVOperand> &Worklist) {
1919
1920 const T *S = cast<T>(WorkItem.S);
1921 InstructionCost Cost = 0;
1922 // Object to help map SCEV operands to expanded IR instructions.
1923 struct OperationIndices {
1924 OperationIndices(unsigned Opc, size_t min, size_t max) :
1925 Opcode(Opc), MinIdx(min), MaxIdx(max) { }
1926 unsigned Opcode;
1927 size_t MinIdx;
1928 size_t MaxIdx;
1929 };
1930
1931 // Collect the operations of all the instructions that will be needed to
1932 // expand the SCEVExpr. This is so that when we come to cost the operands,
1933 // we know what the generated user(s) will be.
1935
1936 auto CastCost = [&](unsigned Opcode) -> InstructionCost {
1937 Operations.emplace_back(Opcode, 0, 0);
1938 return TTI.getCastInstrCost(Opcode, S->getType(),
1939 S->getOperand(0)->getType(),
1941 };
1942
1943 auto ArithCost = [&](unsigned Opcode, unsigned NumRequired,
1944 unsigned MinIdx = 0,
1945 unsigned MaxIdx = 1) -> InstructionCost {
1946 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1947 return NumRequired *
1948 TTI.getArithmeticInstrCost(Opcode, S->getType(), CostKind);
1949 };
1950
1951 auto CmpSelCost = [&](unsigned Opcode, unsigned NumRequired, unsigned MinIdx,
1952 unsigned MaxIdx) -> InstructionCost {
1953 Operations.emplace_back(Opcode, MinIdx, MaxIdx);
1954 Type *OpType = S->getType();
1955 return NumRequired * TTI.getCmpSelInstrCost(
1956 Opcode, OpType, CmpInst::makeCmpResultType(OpType),
1958 };
1959
1960 switch (S->getSCEVType()) {
1961 case scCouldNotCompute:
1962 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
1963 case scUnknown:
1964 case scConstant:
1965 case scVScale:
1966 return 0;
1967 case scPtrToAddr:
1968 Cost = CastCost(Instruction::PtrToAddr);
1969 break;
1970 case scPtrToInt:
1971 Cost = CastCost(Instruction::PtrToInt);
1972 break;
1973 case scTruncate:
1974 Cost = CastCost(Instruction::Trunc);
1975 break;
1976 case scZeroExtend:
1977 Cost = CastCost(Instruction::ZExt);
1978 break;
1979 case scSignExtend:
1980 Cost = CastCost(Instruction::SExt);
1981 break;
1982 case scUDivExpr: {
1983 unsigned Opcode = Instruction::UDiv;
1984 if (auto *SC = dyn_cast<SCEVConstant>(S->getOperand(1)))
1985 if (SC->getAPInt().isPowerOf2())
1986 Opcode = Instruction::LShr;
1987 Cost = ArithCost(Opcode, 1);
1988 break;
1989 }
1990 case scAddExpr:
1991 Cost = ArithCost(Instruction::Add, S->getNumOperands() - 1);
1992 break;
1993 case scMulExpr:
1994 // TODO: this is a very pessimistic cost modelling for Mul,
1995 // because of Bin Pow algorithm actually used by the expander,
1996 // see SCEVExpander::visitMulExpr(), ExpandOpBinPowN().
1997 Cost = ArithCost(Instruction::Mul, S->getNumOperands() - 1);
1998 break;
1999 case scSMaxExpr:
2000 case scUMaxExpr:
2001 case scSMinExpr:
2002 case scUMinExpr:
2003 case scSequentialUMinExpr: {
2004 // FIXME: should this ask the cost for Intrinsic's?
2005 // The reduction tree.
2006 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 1);
2007 Cost += CmpSelCost(Instruction::Select, S->getNumOperands() - 1, 0, 2);
2008 switch (S->getSCEVType()) {
2009 case scSequentialUMinExpr: {
2010 // The safety net against poison.
2011 // FIXME: this is broken.
2012 Cost += CmpSelCost(Instruction::ICmp, S->getNumOperands() - 1, 0, 0);
2013 Cost += ArithCost(Instruction::Or,
2014 S->getNumOperands() > 2 ? S->getNumOperands() - 2 : 0);
2015 Cost += CmpSelCost(Instruction::Select, 1, 0, 1);
2016 break;
2017 }
2018 default:
2020 "Unhandled SCEV expression type?");
2021 break;
2022 }
2023 break;
2024 }
2025 case scAddRecExpr: {
2026 // Addrec expands to a phi and add per recurrence.
2027 unsigned NumRecurrences = S->getNumOperands() - 1;
2028 Cost += TTI.getCFInstrCost(Instruction::PHI, CostKind) * NumRecurrences;
2029 Cost +=
2030 TTI.getArithmeticInstrCost(Instruction::Add, S->getType(), CostKind) *
2031 NumRecurrences;
2032 // AR start is used in phi.
2033 Worklist.emplace_back(Instruction::PHI, 0, S->getOperand(0));
2034 // Other operands are used in add.
2035 for (const SCEV *Op : S->operands().drop_front())
2036 Worklist.emplace_back(Instruction::Add, 1, Op);
2037 break;
2038 }
2039 }
2040
2041 for (auto &CostOp : Operations) {
2042 for (auto SCEVOp : enumerate(S->operands())) {
2043 // Clamp the index to account for multiple IR operations being chained.
2044 size_t MinIdx = std::max(SCEVOp.index(), CostOp.MinIdx);
2045 size_t OpIdx = std::min(MinIdx, CostOp.MaxIdx);
2046 Worklist.emplace_back(CostOp.Opcode, OpIdx, SCEVOp.value());
2047 }
2048 }
2049 return Cost;
2050}
2051
2052bool SCEVExpander::isHighCostExpansionHelper(
2053 const SCEVOperand &WorkItem, Loop *L, const Instruction &At,
2054 InstructionCost &Cost, unsigned Budget, const TargetTransformInfo &TTI,
2056 SmallVectorImpl<SCEVOperand> &Worklist) {
2057 if (Cost > Budget)
2058 return true; // Already run out of budget, give up.
2059
2060 const SCEV *S = WorkItem.S;
2061 // Was the cost of expansion of this expression already accounted for?
2062 if (!isa<SCEVConstant>(S) && !Processed.insert(S).second)
2063 return false; // We have already accounted for this expression.
2064
2065 // If we can find an existing value for this scev available at the point "At"
2066 // then consider the expression cheap.
2067 if (hasRelatedExistingExpansion(S, &At, L))
2068 return false; // Consider the expression to be free.
2069
2071 L->getHeader()->getParent()->hasMinSize()
2074
2075 switch (S->getSCEVType()) {
2076 case scCouldNotCompute:
2077 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
2078 case scUnknown:
2079 case scVScale:
2080 // Assume to be zero-cost.
2081 return false;
2082 case scConstant: {
2083 // Only evalulate the costs of constants when optimizing for size.
2085 return false;
2086 const APInt &Imm = cast<SCEVConstant>(S)->getAPInt();
2087 Type *Ty = S->getType();
2089 WorkItem.ParentOpcode, WorkItem.OperandIdx, Imm, Ty, CostKind);
2090 return Cost > Budget;
2091 }
2092 case scTruncate:
2093 case scPtrToAddr:
2094 case scPtrToInt:
2095 case scZeroExtend:
2096 case scSignExtend: {
2097 Cost +=
2099 return false; // Will answer upon next entry into this function.
2100 }
2101 case scUDivExpr: {
2102 // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2103 // HowManyLessThans produced to compute a precise expression, rather than a
2104 // UDiv from the user's code. If we can't find a UDiv in the code with some
2105 // simple searching, we need to account for it's cost.
2106
2107 // At the beginning of this function we already tried to find existing
2108 // value for plain 'S'. Now try to lookup 'S + 1' since it is common
2109 // pattern involving division. This is just a simple search heuristic.
2111 SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), &At, L))
2112 return false; // Consider it to be free.
2113
2114 Cost +=
2116 return false; // Will answer upon next entry into this function.
2117 }
2118 case scAddExpr:
2119 case scMulExpr:
2120 case scUMaxExpr:
2121 case scSMaxExpr:
2122 case scUMinExpr:
2123 case scSMinExpr:
2124 case scSequentialUMinExpr: {
2125 assert(cast<SCEVNAryExpr>(S)->getNumOperands() > 1 &&
2126 "Nary expr should have more than 1 operand.");
2127 // The simple nary expr will require one less op (or pair of ops)
2128 // than the number of it's terms.
2129 Cost +=
2131 return Cost > Budget;
2132 }
2133 case scAddRecExpr: {
2134 assert(cast<SCEVAddRecExpr>(S)->getNumOperands() >= 2 &&
2135 "Polynomial should be at least linear");
2137 WorkItem, TTI, CostKind, Worklist);
2138 return Cost > Budget;
2139 }
2140 }
2141 llvm_unreachable("Unknown SCEV kind!");
2142}
2143
2145 Instruction *IP) {
2146 assert(IP);
2147 switch (Pred->getKind()) {
2152 case SCEVPredicate::P_Wrap: {
2153 auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2154 return expandWrapPredicate(AddRecPred, IP);
2155 }
2156 }
2157 llvm_unreachable("Unknown SCEV predicate type");
2158}
2159
2161 Instruction *IP) {
2162 Value *Expr0 = expand(Pred->getLHS(), IP);
2163 Value *Expr1 = expand(Pred->getRHS(), IP);
2164
2165 Builder.SetInsertPoint(IP);
2166 auto InvPred = ICmpInst::getInversePredicate(Pred->getPredicate());
2167 auto *I = Builder.CreateICmp(InvPred, Expr0, Expr1, "ident.check");
2168 return I;
2169}
2170
2172 Instruction *Loc, bool Signed) {
2173 assert(AR->isAffine() && "Cannot generate RT check for "
2174 "non-affine expression");
2175
2176 // FIXME: It is highly suspicious that we're ignoring the predicates here.
2178 const SCEV *ExitCount =
2179 SE.getPredicatedSymbolicMaxBackedgeTakenCount(AR->getLoop(), Pred);
2180
2181 assert(!isa<SCEVCouldNotCompute>(ExitCount) && "Invalid loop count");
2182
2183 const SCEV *Step = AR->getStepRecurrence(SE);
2184 const SCEV *Start = AR->getStart();
2185
2186 Type *ARTy = AR->getType();
2187 unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2188 unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2189
2190 // The expression {Start,+,Step} has nusw/nssw if
2191 // Step < 0, Start - |Step| * Backedge <= Start
2192 // Step >= 0, Start + |Step| * Backedge > Start
2193 // and |Step| * Backedge doesn't unsigned overflow.
2194
2195 Builder.SetInsertPoint(Loc);
2196 Value *TripCountVal = expand(ExitCount, Loc);
2197
2198 IntegerType *Ty =
2199 IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2200
2201 Value *StepValue = expand(Step, Loc);
2202 Value *NegStepValue = expand(SE.getNegativeSCEV(Step), Loc);
2203 Value *StartValue = expand(Start, Loc);
2204
2205 ConstantInt *Zero =
2206 ConstantInt::get(Loc->getContext(), APInt::getZero(DstBits));
2207
2208 Builder.SetInsertPoint(Loc);
2209 // Compute |Step|
2210 Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2211 Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2212
2213 // Compute |Step| * Backedge
2214 // Compute:
2215 // 1. Start + |Step| * Backedge < Start
2216 // 2. Start - |Step| * Backedge > Start
2217 //
2218 // And select either 1. or 2. depending on whether step is positive or
2219 // negative. If Step is known to be positive or negative, only create
2220 // either 1. or 2.
2221 auto ComputeEndCheck = [&]() -> Value * {
2222 // Get the backedge taken count and truncate or extended to the AR type.
2223 Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2224
2225 CallInst *Mul = Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, Ty,
2226 {AbsStep, TruncTripCount},
2227 /*FMFSource=*/nullptr, "mul");
2228 Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2229 Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2230
2231 Value *Add = nullptr, *Sub = nullptr;
2232 bool NeedPosCheck = !SE.isKnownNegative(Step);
2233 bool NeedNegCheck = !SE.isKnownPositive(Step);
2234
2235 if (isa<PointerType>(ARTy)) {
2236 Value *NegMulV = Builder.CreateNeg(MulV);
2237 if (NeedPosCheck)
2238 Add = Builder.CreatePtrAdd(StartValue, MulV);
2239 if (NeedNegCheck)
2240 Sub = Builder.CreatePtrAdd(StartValue, NegMulV);
2241 } else {
2242 if (NeedPosCheck)
2243 Add = Builder.CreateAdd(StartValue, MulV);
2244 if (NeedNegCheck)
2245 Sub = Builder.CreateSub(StartValue, MulV);
2246 }
2247
2248 Value *EndCompareLT = nullptr;
2249 Value *EndCompareGT = nullptr;
2250 Value *EndCheck = nullptr;
2251 if (NeedPosCheck)
2252 EndCheck = EndCompareLT = Builder.CreateICmp(
2254 if (NeedNegCheck)
2255 EndCheck = EndCompareGT = Builder.CreateICmp(
2257 if (NeedPosCheck && NeedNegCheck) {
2258 // Select the answer based on the sign of Step.
2259 EndCheck = Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2260 }
2261 return Builder.CreateOr(EndCheck, OfMul);
2262 };
2263 Value *EndCheck = ComputeEndCheck();
2264
2265 // If the backedge taken count type is larger than the AR type,
2266 // check that we don't drop any bits by truncating it. If we are
2267 // dropping bits, then we have overflow (unless the step is zero).
2268 if (SrcBits > DstBits) {
2269 auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2270 auto *BackedgeCheck =
2271 Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2272 ConstantInt::get(Loc->getContext(), MaxVal));
2273 BackedgeCheck = Builder.CreateAnd(
2274 BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2275
2276 EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2277 }
2278
2279 return EndCheck;
2280}
2281
2283 Instruction *IP) {
2284 const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2285 Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2286
2287 // Add a check for NUSW
2288 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNUSW)
2289 NUSWCheck = generateOverflowCheck(A, IP, false);
2290
2291 // Add a check for NSSW
2292 if (Pred->getFlags() & SCEVWrapPredicate::IncrementNSSW)
2293 NSSWCheck = generateOverflowCheck(A, IP, true);
2294
2295 if (NUSWCheck && NSSWCheck)
2296 return Builder.CreateOr(NUSWCheck, NSSWCheck);
2297
2298 if (NUSWCheck)
2299 return NUSWCheck;
2300
2301 if (NSSWCheck)
2302 return NSSWCheck;
2303
2304 return ConstantInt::getFalse(IP->getContext());
2305}
2306
2308 Instruction *IP) {
2309 // Loop over all checks in this set.
2310 SmallVector<Value *> Checks;
2311 for (const auto *Pred : Union->getPredicates()) {
2312 Checks.push_back(expandCodeForPredicate(Pred, IP));
2313 Builder.SetInsertPoint(IP);
2314 }
2315
2316 if (Checks.empty())
2317 return ConstantInt::getFalse(IP->getContext());
2318 return Builder.CreateOr(Checks);
2319}
2320
2321Value *SCEVExpander::fixupLCSSAFormFor(Value *V) {
2322 auto *DefI = dyn_cast<Instruction>(V);
2323 if (!PreserveLCSSA || !DefI)
2324 return V;
2325
2326 BasicBlock::iterator InsertPt = Builder.GetInsertPoint();
2327 Loop *DefLoop = SE.LI.getLoopFor(DefI->getParent());
2328 Loop *UseLoop = SE.LI.getLoopFor(InsertPt->getParent());
2329 if (!DefLoop || UseLoop == DefLoop || DefLoop->contains(UseLoop))
2330 return V;
2331
2332 // Create a temporary instruction to at the current insertion point, so we
2333 // can hand it off to the helper to create LCSSA PHIs if required for the
2334 // new use.
2335 // FIXME: Ideally formLCSSAForInstructions (used in fixupLCSSAFormFor)
2336 // would accept a insertion point and return an LCSSA phi for that
2337 // insertion point, so there is no need to insert & remove the temporary
2338 // instruction.
2339 Type *ToTy;
2340 if (DefI->getType()->isIntegerTy())
2341 ToTy = PointerType::get(DefI->getContext(), 0);
2342 else
2343 ToTy = Type::getInt32Ty(DefI->getContext());
2344 Instruction *User =
2345 CastInst::CreateBitOrPointerCast(DefI, ToTy, "tmp.lcssa.user", InsertPt);
2346 llvm::scope_exit RemoveUserOnExit([User]() { User->eraseFromParent(); });
2347
2349 ToUpdate.push_back(DefI);
2350 SmallVector<PHINode *, 16> PHIsToRemove;
2351 SmallVector<PHINode *, 16> InsertedPHIs;
2352 formLCSSAForInstructions(ToUpdate, SE.DT, SE.LI, &SE, &PHIsToRemove,
2353 &InsertedPHIs);
2354 for (PHINode *PN : InsertedPHIs)
2355 rememberInstruction(PN);
2356 for (PHINode *PN : PHIsToRemove) {
2357 if (!PN->use_empty())
2358 continue;
2359 InsertedValues.erase(PN);
2360 InsertedPostIncValues.erase(PN);
2361 PN->eraseFromParent();
2362 }
2363
2364 return User->getOperand(0);
2365}
2366
2367namespace {
2368// Search for a SCEV subexpression that is not safe to expand. Any expression
2369// that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2370// UDiv expressions. We don't know if the UDiv is derived from an IR divide
2371// instruction, but the important thing is that we prove the denominator is
2372// nonzero before expansion.
2373//
2374// IVUsers already checks that IV-derived expressions are safe. So this check is
2375// only needed when the expression includes some subexpression that is not IV
2376// derived.
2377//
2378// Currently, we only allow division by a value provably non-zero here.
2379//
2380// We cannot generally expand recurrences unless the step dominates the loop
2381// header. The expander handles the special case of affine recurrences by
2382// scaling the recurrence outside the loop, but this technique isn't generally
2383// applicable. Expanding a nested recurrence outside a loop requires computing
2384// binomial coefficients. This could be done, but the recurrence has to be in a
2385// perfectly reduced form, which can't be guaranteed.
2386struct SCEVFindUnsafe {
2387 ScalarEvolution &SE;
2388 bool CanonicalMode;
2389 bool IsUnsafe = false;
2390
2391 SCEVFindUnsafe(ScalarEvolution &SE, bool CanonicalMode)
2392 : SE(SE), CanonicalMode(CanonicalMode) {}
2393
2394 bool follow(const SCEV *S) {
2395 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2396 if (!SE.isKnownNonZero(D->getRHS())) {
2397 IsUnsafe = true;
2398 return false;
2399 }
2400 }
2401 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2402 // For non-affine addrecs or in non-canonical mode we need a preheader
2403 // to insert into.
2404 if (!AR->getLoop()->getLoopPreheader() &&
2405 (!CanonicalMode || !AR->isAffine())) {
2406 IsUnsafe = true;
2407 return false;
2408 }
2409 }
2410 return true;
2411 }
2412 bool isDone() const { return IsUnsafe; }
2413};
2414} // namespace
2415
2417 SCEVFindUnsafe Search(SE, CanonicalMode);
2418 visitAll(S, Search);
2419 return !Search.IsUnsafe;
2420}
2421
2423 const Instruction *InsertionPoint) const {
2424 if (!isSafeToExpand(S))
2425 return false;
2426 // We have to prove that the expanded site of S dominates InsertionPoint.
2427 // This is easy when not in the same block, but hard when S is an instruction
2428 // to be expanded somewhere inside the same block as our insertion point.
2429 // What we really need here is something analogous to an OrderedBasicBlock,
2430 // but for the moment, we paper over the problem by handling two common and
2431 // cheap to check cases.
2432 if (SE.properlyDominates(S, InsertionPoint->getParent()))
2433 return true;
2434 if (SE.dominates(S, InsertionPoint->getParent())) {
2435 if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2436 return true;
2437 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2438 if (llvm::is_contained(InsertionPoint->operand_values(), U->getValue()))
2439 return true;
2440 }
2441 return false;
2442}
2443
2445 // Result is used, nothing to remove.
2446 if (ResultUsed)
2447 return;
2448
2449 // Restore original poison flags.
2450 for (auto [I, Flags] : Expander.OrigFlags)
2451 Flags.apply(I);
2452
2453 auto InsertedInstructions = Expander.getAllInsertedInstructions();
2454#ifndef NDEBUG
2456 InsertedInstructions);
2457 (void)InsertedSet;
2458#endif
2459 // Remove sets with value handles.
2460 Expander.clear();
2461
2462 // Remove all inserted instructions.
2463 for (Instruction *I : reverse(InsertedInstructions)) {
2464#ifndef NDEBUG
2465 assert(all_of(I->users(),
2466 [&InsertedSet](Value *U) {
2467 return InsertedSet.contains(cast<Instruction>(U));
2468 }) &&
2469 "removed instruction should only be used by instructions inserted "
2470 "during expansion");
2471#endif
2472 assert(!I->getType()->isVoidTy() &&
2473 "inserted instruction should have non-void types");
2474 I->replaceAllUsesWith(PoisonValue::get(I->getType()));
2475 I->eraseFromParent();
2476 }
2477}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static cl::opt< OutputCostKind > CostKind("cost-kind", cl::desc("Target cost kind"), cl::init(OutputCostKind::RecipThroughput), cl::values(clEnumValN(OutputCostKind::RecipThroughput, "throughput", "Reciprocal throughput"), clEnumValN(OutputCostKind::Latency, "latency", "Instruction latency"), clEnumValN(OutputCostKind::CodeSize, "code-size", "Code size"), clEnumValN(OutputCostKind::SizeAndLatency, "size-latency", "Code size and latency"), clEnumValN(OutputCostKind::All, "all", "Print all cost kinds")))
Hexagon Common GEP
Hexagon Hardware Loops
#define I(x, y, z)
Definition MD5.cpp:57
#define T
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define P(N)
This file contains some templates that are useful if you are working with the STL at all.
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that's most relevant for SCEV expansion.
static InstructionCost costAndCollectOperands(const SCEVOperand &WorkItem, const TargetTransformInfo &TTI, TargetTransformInfo::TargetCostKind CostKind, SmallVectorImpl< SCEVOperand > &Worklist)
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
#define SCEV_DEBUG_WITH_TYPE(TYPE, X)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
This pass exposes codegen information to IR-level passes.
Value * RHS
Value * LHS
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
Definition APInt.cpp:1023
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition APInt.h:207
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:201
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
LLVM Basic Block Representation.
Definition BasicBlock.h:62
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition BasicBlock.h:539
LLVM_ABI const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
InstListType::iterator iterator
Instruction iterators...
Definition BasicBlock.h:170
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static LLVM_ABI BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), InsertPosition InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
This class represents a function call, abstracting a target machine's calling convention.
This is the base class for all instructions that perform data casts.
Definition InstrTypes.h:448
static LLVM_ABI Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition InstrTypes.h:610
static LLVM_ABI CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name="", InsertPosition InsertBefore=nullptr)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:164
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
static GEPNoWrapFlags noUnsignedWrap()
static GEPNoWrapFlags none()
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2762
LLVM_ABI void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag.
LLVM_ABI void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
LLVM_ABI void insertBefore(InstListType::iterator InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified position.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI bool mayHaveSideEffects() const LLVM_READONLY
Return true if the instruction may have side effects.
LLVM_ABI bool comesBefore(const Instruction *Other) const
Given an instruction Other in the same basic block as this instruction, return true if this instructi...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Class to represent integer types.
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
Represents a single loop in the control flow graph.
Definition LoopInfo.h:40
ICmpInst::Predicate getPredicate() const
Returns the comparison predicate underlying the intrinsic.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isComplete() const
If the PHI node is complete which means all of its parent's predecessors have incoming value in this ...
Value * getIncomingValueForBlock(const BasicBlock *BB) const
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
const SCEV * getOperand() const
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
LLVM_ABI Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
LLVM_ABI bool hasRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Determine whether there is an existing expansion of S that can be reused.
SmallVector< Instruction *, 32 > getAllInsertedInstructions() const
Return a vector containing all instructions inserted during expansion.
LLVM_ABI bool isSafeToExpand(const SCEV *S) const
Return true if the given expression is safe to expand in the sense that all materialized values are s...
LLVM_ABI bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint) const
Return true if the given expression is safe to expand in the sense that all materialized values are d...
LLVM_ABI unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
LLVM_ABI Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI bool hoistIVInc(Instruction *IncV, Instruction *InsertPos, bool RecomputePoisonFlags=false)
Utility for hoisting IncV (with all subexpressions requried for its computation) before InsertPos.
bool isInsertedInstruction(Instruction *I) const
Return true if the specified instruction was inserted by the code rewriter.
LLVM_ABI Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
static LLVM_ABI bool canReuseFlagsFromOriginalIVInc(PHINode *OrigPhi, PHINode *WidePhi, Instruction *OrigInc, Instruction *WideInc)
Return true if both increments directly increment the corresponding IV PHI nodes and have the same op...
LLVM_ABI Value * expandComparePredicate(const SCEVComparePredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Value * expandCodeFor(const SCEV *SH, Type *Ty, BasicBlock::iterator I)
Insert code to directly compute the specified SCEV expression into the program.
LLVM_ABI Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM_ABI Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment's IV operand.
void eraseDeadInstructions(Value *Root)
Remove inserted instructions that are dead, e.g.
LLVM_ABI BasicBlock::iterator findInsertPointAfter(Instruction *I, Instruction *MustDominate) const
Returns a suitable insert point after I, that dominates MustDominate.
void setInsertPoint(Instruction *IP)
Set the current insertion point.
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
This class represents a cast from a pointer to a pointer-sized integer value, without capturing the p...
This class represents a cast from a pointer to a pointer-sized integer value.
This class represents a signed maximum selection.
This class represents a signed minimum selection.
This class represents a sequential/in-order unsigned minimum selection.
This class represents a sign extension of a small integer value to a larger integer value.
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
This class represents an unsigned maximum selection.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
LLVM_ABI ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
LLVM_ABI bool isOne() const
Return true if the expression is a constant one.
LLVM_ABI bool isZero() const
Return true if the expression is a constant zero.
LLVM_ABI bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
SCEVTypes getSCEVType() const
LLVM_ABI Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
The main scalar evolution driver.
LLVM_ABI bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
LLVM_ABI const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
LLVM_ABI bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
LLVM_ABI const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
LLVM_ABI const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
LLVM_ABI const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
LLVM_ABI bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
LLVM_ABI const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
LLVM_ABI InstructionCost getIntImmCostInst(unsigned Opc, unsigned Idx, const APInt &Imm, Type *Ty, TargetCostKind CostKind, Instruction *Inst=nullptr) const
Return the expected cost of materialization for the given integer immediate of the specified type for...
TargetCostKind
The kind of cost model.
@ TCK_RecipThroughput
Reciprocal throughput.
@ TCK_CodeSize
Instruction code size.
@ None
The cast is not used with a load/store of any kind.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI unsigned getIntegerBitWidth() const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isPointerTy() const
True if this is an instance of PointerType.
Definition Type.h:267
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition Type.h:240
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:267
Value * getOperand(unsigned i) const
Definition User.h:207
unsigned getNumOperands() const
Definition User.h:229
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
iterator_range< user_iterator > users()
Definition Value.h:426
bool use_empty() const
Definition Value.h:346
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
#define UINT64_MAX
Definition DataTypes.h:77
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
bool match(Val *V, const Pattern &P)
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
cst_pred_ty< is_all_ones > m_scev_AllOnes()
Match an integer with all bits set.
class_match< const SCEVConstant > m_SCEVConstant()
bind_ty< const SCEVMulExpr > m_scev_Mul(const SCEVMulExpr *&V)
bind_ty< const SCEVAddExpr > m_scev_Add(const SCEVAddExpr *&V)
SCEVUnaryExpr_match< SCEVPtrToIntExpr, Op0_t > m_scev_PtrToInt(const Op0_t &Op0)
SCEVURem_match< Op0_t, Op1_t > m_scev_URem(Op0_t LHS, Op1_t RHS, ScalarEvolution &SE)
Match the mathematical pattern A - (A / B) * B, where A and B can be arbitrary expressions.
class_match< const SCEV > m_SCEV()
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
friend class Instruction
Iterator for Instructions in a `BasicBlock.
Definition BasicBlock.h:73
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
GCNRegPressure max(const GCNRegPressure &P1, const GCNRegPressure &P2)
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition STLExtras.h:316
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2106
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
InstructionCost Cost
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
Definition STLExtras.h:2544
auto pred_end(const MachineBasicBlock *BB)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
constexpr from_range_t from_range
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2198
auto pred_size(const MachineBasicBlock *BB)
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
LLVM_ABI bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
Definition Local.cpp:402
auto reverse(ContainerTy &&C)
Definition STLExtras.h:406
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI cl::opt< unsigned > SCEVCheapExpansionBudget
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE, bool CheckInvertible=true)
Normalize S to be post-increment for all loops present in Loops.
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ Mul
Product of integers.
@ Sub
Subtraction of integers.
@ Add
Sum of integers.
DWARFExpression::Operation Op
PredIterator< BasicBlock, Value::user_iterator > pred_iterator
Definition CFG.h:105
constexpr unsigned BitWidth
LLVM_ABI bool formLCSSAForInstructions(SmallVectorImpl< Instruction * > &Worklist, const DominatorTree &DT, const LoopInfo &LI, ScalarEvolution *SE, SmallVectorImpl< PHINode * > *PHIsToRemove=nullptr, SmallVectorImpl< PHINode * > *InsertedPHIs=nullptr)
Ensures LCSSA form for every instruction from the Worklist in the scope of innermost containing loop.
Definition LCSSA.cpp:308
auto pred_begin(const MachineBasicBlock *BB)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
SmallPtrSet< const Loop *, 2 > PostIncLoopSet
auto predecessors(const MachineBasicBlock *BB)
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
Definition iterator.h:368
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1945
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
LLVM_ABI void apply(Instruction *I)
LLVM_ABI PoisonFlags(const Instruction *I)
struct for holding enough information to help calculate the cost of the given SCEV when expanded into...
const SCEV * S
The SCEV operand to be costed.
unsigned ParentOpcode
LLVM instruction opcode that uses the operand.
int OperandIdx
The use index of an expanded instruction.