LLVM  10.0.0svn
ScalarEvolutionExpander.cpp
Go to the documentation of this file.
1 //===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution expander,
10 // which is used to generate the code corresponding to a given scalar evolution
11 // expression.
12 //
13 //===----------------------------------------------------------------------===//
14 
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallSet.h"
19 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Dominators.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/IR/PatternMatch.h"
27 #include "llvm/Support/Debug.h"
29 
30 using namespace llvm;
31 using namespace PatternMatch;
32 
33 /// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
34 /// reusing an existing cast if a suitable one exists, moving an existing
35 /// cast if a suitable one exists but isn't in the right place, or
36 /// creating a new one.
37 Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
40  // This function must be called with the builder having a valid insertion
41  // point. It doesn't need to be the actual IP where the uses of the returned
42  // cast will be added, but it must dominate such IP.
43  // We use this precondition to produce a cast that will dominate all its
44  // uses. In particular, this is crucial for the case where the builder's
45  // insertion point *is* the point where we were asked to put the cast.
46  // Since we don't know the builder's insertion point is actually
47  // where the uses will be added (only that it dominates it), we are
48  // not allowed to move it.
49  BasicBlock::iterator BIP = Builder.GetInsertPoint();
50 
51  Instruction *Ret = nullptr;
52 
53  // Check to see if there is already a cast!
54  for (User *U : V->users())
55  if (U->getType() == Ty)
56  if (CastInst *CI = dyn_cast<CastInst>(U))
57  if (CI->getOpcode() == Op) {
58  // If the cast isn't where we want it, create a new cast at IP.
59  // Likewise, do not reuse a cast at BIP because it must dominate
60  // instructions that might be inserted before BIP.
61  if (BasicBlock::iterator(CI) != IP || BIP == IP) {
62  // Create a new cast, and leave the old cast in place in case
63  // it is being used as an insert point.
64  Ret = CastInst::Create(Op, V, Ty, "", &*IP);
65  Ret->takeName(CI);
66  CI->replaceAllUsesWith(Ret);
67  break;
68  }
69  Ret = CI;
70  break;
71  }
72 
73  // Create a new cast.
74  if (!Ret)
75  Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP);
76 
77  // We assert at the end of the function since IP might point to an
78  // instruction with different dominance properties than a cast
79  // (an invoke for example) and not dominate BIP (but the cast does).
80  assert(SE.DT.dominates(Ret, &*BIP));
81 
82  rememberInstruction(Ret);
83  return Ret;
84 }
85 
87  BasicBlock *MustDominate) {
89  if (auto *II = dyn_cast<InvokeInst>(I))
90  IP = II->getNormalDest()->begin();
91 
92  while (isa<PHINode>(IP))
93  ++IP;
94 
95  if (isa<FuncletPadInst>(IP) || isa<LandingPadInst>(IP)) {
96  ++IP;
97  } else if (isa<CatchSwitchInst>(IP)) {
98  IP = MustDominate->getFirstInsertionPt();
99  } else {
100  assert(!IP->isEHPad() && "unexpected eh pad!");
101  }
102 
103  return IP;
104 }
105 
106 /// InsertNoopCastOfTo - Insert a cast of V to the specified type,
107 /// which must be possible with a noop cast, doing what we can to share
108 /// the casts.
109 Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
110  Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
111  assert((Op == Instruction::BitCast ||
112  Op == Instruction::PtrToInt ||
113  Op == Instruction::IntToPtr) &&
114  "InsertNoopCastOfTo cannot perform non-noop casts!");
115  assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
116  "InsertNoopCastOfTo cannot change sizes!");
117 
118  // Short-circuit unnecessary bitcasts.
119  if (Op == Instruction::BitCast) {
120  if (V->getType() == Ty)
121  return V;
122  if (CastInst *CI = dyn_cast<CastInst>(V)) {
123  if (CI->getOperand(0)->getType() == Ty)
124  return CI->getOperand(0);
125  }
126  }
127  // Short-circuit unnecessary inttoptr<->ptrtoint casts.
128  if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
129  SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
130  if (CastInst *CI = dyn_cast<CastInst>(V))
131  if ((CI->getOpcode() == Instruction::PtrToInt ||
132  CI->getOpcode() == Instruction::IntToPtr) &&
133  SE.getTypeSizeInBits(CI->getType()) ==
134  SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
135  return CI->getOperand(0);
136  if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
137  if ((CE->getOpcode() == Instruction::PtrToInt ||
138  CE->getOpcode() == Instruction::IntToPtr) &&
139  SE.getTypeSizeInBits(CE->getType()) ==
140  SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
141  return CE->getOperand(0);
142  }
143 
144  // Fold a cast of a constant.
145  if (Constant *C = dyn_cast<Constant>(V))
146  return ConstantExpr::getCast(Op, C, Ty);
147 
148  // Cast the argument at the beginning of the entry block, after
149  // any bitcasts of other arguments.
150  if (Argument *A = dyn_cast<Argument>(V)) {
151  BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
152  while ((isa<BitCastInst>(IP) &&
153  isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
154  cast<BitCastInst>(IP)->getOperand(0) != A) ||
155  isa<DbgInfoIntrinsic>(IP))
156  ++IP;
157  return ReuseOrCreateCast(A, Ty, Op, IP);
158  }
159 
160  // Cast the instruction immediately after the instruction.
161  Instruction *I = cast<Instruction>(V);
162  BasicBlock::iterator IP = findInsertPointAfter(I, Builder.GetInsertBlock());
163  return ReuseOrCreateCast(I, Ty, Op, IP);
164 }
165 
166 /// InsertBinop - Insert the specified binary operator, doing a small amount
167 /// of work to avoid inserting an obviously redundant operation, and hoisting
168 /// to an outer loop when the opportunity is there and it is safe.
169 Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
170  Value *LHS, Value *RHS,
171  SCEV::NoWrapFlags Flags, bool IsSafeToHoist) {
172  // Fold a binop with constant operands.
173  if (Constant *CLHS = dyn_cast<Constant>(LHS))
174  if (Constant *CRHS = dyn_cast<Constant>(RHS))
175  return ConstantExpr::get(Opcode, CLHS, CRHS);
176 
177  // Do a quick scan to see if we have this binop nearby. If so, reuse it.
178  unsigned ScanLimit = 6;
179  BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
180  // Scanning starts from the last instruction before the insertion point.
181  BasicBlock::iterator IP = Builder.GetInsertPoint();
182  if (IP != BlockBegin) {
183  --IP;
184  for (; ScanLimit; --IP, --ScanLimit) {
185  // Don't count dbg.value against the ScanLimit, to avoid perturbing the
186  // generated code.
187  if (isa<DbgInfoIntrinsic>(IP))
188  ScanLimit++;
189 
190  auto canGenerateIncompatiblePoison = [&Flags](Instruction *I) {
191  // Ensure that no-wrap flags match.
192  if (isa<OverflowingBinaryOperator>(I)) {
193  if (I->hasNoSignedWrap() != (Flags & SCEV::FlagNSW))
194  return true;
195  if (I->hasNoUnsignedWrap() != (Flags & SCEV::FlagNUW))
196  return true;
197  }
198  // Conservatively, do not use any instruction which has any of exact
199  // flags installed.
200  if (isa<PossiblyExactOperator>(I) && I->isExact())
201  return true;
202  return false;
203  };
204  if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
205  IP->getOperand(1) == RHS && !canGenerateIncompatiblePoison(&*IP))
206  return &*IP;
207  if (IP == BlockBegin) break;
208  }
209  }
210 
211  // Save the original insertion point so we can restore it when we're done.
212  DebugLoc Loc = Builder.GetInsertPoint()->getDebugLoc();
213  SCEVInsertPointGuard Guard(Builder, this);
214 
215  if (IsSafeToHoist) {
216  // Move the insertion point out of as many loops as we can.
217  while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
218  if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
219  BasicBlock *Preheader = L->getLoopPreheader();
220  if (!Preheader) break;
221 
222  // Ok, move up a level.
223  Builder.SetInsertPoint(Preheader->getTerminator());
224  }
225  }
226 
227  // If we haven't found this binop, insert it.
228  Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
229  BO->setDebugLoc(Loc);
230  if (Flags & SCEV::FlagNUW)
231  BO->setHasNoUnsignedWrap();
232  if (Flags & SCEV::FlagNSW)
233  BO->setHasNoSignedWrap();
234  rememberInstruction(BO);
235 
236  return BO;
237 }
238 
239 /// FactorOutConstant - Test if S is divisible by Factor, using signed
240 /// division. If so, update S with Factor divided out and return true.
241 /// S need not be evenly divisible if a reasonable remainder can be
242 /// computed.
243 static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder,
244  const SCEV *Factor, ScalarEvolution &SE,
245  const DataLayout &DL) {
246  // Everything is divisible by one.
247  if (Factor->isOne())
248  return true;
249 
250  // x/x == 1.
251  if (S == Factor) {
252  S = SE.getConstant(S->getType(), 1);
253  return true;
254  }
255 
256  // For a Constant, check for a multiple of the given factor.
257  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
258  // 0/x == 0.
259  if (C->isZero())
260  return true;
261  // Check for divisibility.
262  if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
263  ConstantInt *CI =
264  ConstantInt::get(SE.getContext(), C->getAPInt().sdiv(FC->getAPInt()));
265  // If the quotient is zero and the remainder is non-zero, reject
266  // the value at this scale. It will be considered for subsequent
267  // smaller scales.
268  if (!CI->isZero()) {
269  const SCEV *Div = SE.getConstant(CI);
270  S = Div;
271  Remainder = SE.getAddExpr(
272  Remainder, SE.getConstant(C->getAPInt().srem(FC->getAPInt())));
273  return true;
274  }
275  }
276  }
277 
278  // In a Mul, check if there is a constant operand which is a multiple
279  // of the given factor.
280  if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
281  // Size is known, check if there is a constant operand which is a multiple
282  // of the given factor. If so, we can factor it.
283  const SCEVConstant *FC = cast<SCEVConstant>(Factor);
284  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
285  if (!C->getAPInt().srem(FC->getAPInt())) {
286  SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
287  NewMulOps[0] = SE.getConstant(C->getAPInt().sdiv(FC->getAPInt()));
288  S = SE.getMulExpr(NewMulOps);
289  return true;
290  }
291  }
292 
293  // In an AddRec, check if both start and step are divisible.
294  if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
295  const SCEV *Step = A->getStepRecurrence(SE);
296  const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
297  if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
298  return false;
299  if (!StepRem->isZero())
300  return false;
301  const SCEV *Start = A->getStart();
302  if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
303  return false;
304  S = SE.getAddRecExpr(Start, Step, A->getLoop(),
305  A->getNoWrapFlags(SCEV::FlagNW));
306  return true;
307  }
308 
309  return false;
310 }
311 
312 /// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
313 /// is the number of SCEVAddRecExprs present, which are kept at the end of
314 /// the list.
315 ///
317  Type *Ty,
318  ScalarEvolution &SE) {
319  unsigned NumAddRecs = 0;
320  for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
321  ++NumAddRecs;
322  // Group Ops into non-addrecs and addrecs.
323  SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
324  SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
325  // Let ScalarEvolution sort and simplify the non-addrecs list.
326  const SCEV *Sum = NoAddRecs.empty() ?
327  SE.getConstant(Ty, 0) :
328  SE.getAddExpr(NoAddRecs);
329  // If it returned an add, use the operands. Otherwise it simplified
330  // the sum into a single value, so just use that.
331  Ops.clear();
332  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
333  Ops.append(Add->op_begin(), Add->op_end());
334  else if (!Sum->isZero())
335  Ops.push_back(Sum);
336  // Then append the addrecs.
337  Ops.append(AddRecs.begin(), AddRecs.end());
338 }
339 
340 /// SplitAddRecs - Flatten a list of add operands, moving addrec start values
341 /// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
342 /// This helps expose more opportunities for folding parts of the expressions
343 /// into GEP indices.
344 ///
346  Type *Ty,
347  ScalarEvolution &SE) {
348  // Find the addrecs.
350  for (unsigned i = 0, e = Ops.size(); i != e; ++i)
351  while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
352  const SCEV *Start = A->getStart();
353  if (Start->isZero()) break;
354  const SCEV *Zero = SE.getConstant(Ty, 0);
355  AddRecs.push_back(SE.getAddRecExpr(Zero,
356  A->getStepRecurrence(SE),
357  A->getLoop(),
358  A->getNoWrapFlags(SCEV::FlagNW)));
359  if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
360  Ops[i] = Zero;
361  Ops.append(Add->op_begin(), Add->op_end());
362  e += Add->getNumOperands();
363  } else {
364  Ops[i] = Start;
365  }
366  }
367  if (!AddRecs.empty()) {
368  // Add the addrecs onto the end of the list.
369  Ops.append(AddRecs.begin(), AddRecs.end());
370  // Resort the operand list, moving any constants to the front.
371  SimplifyAddOperands(Ops, Ty, SE);
372  }
373 }
374 
375 /// expandAddToGEP - Expand an addition expression with a pointer type into
376 /// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
377 /// BasicAliasAnalysis and other passes analyze the result. See the rules
378 /// for getelementptr vs. inttoptr in
379 /// http://llvm.org/docs/LangRef.html#pointeraliasing
380 /// for details.
381 ///
382 /// Design note: The correctness of using getelementptr here depends on
383 /// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
384 /// they may introduce pointer arithmetic which may not be safely converted
385 /// into getelementptr.
386 ///
387 /// Design note: It might seem desirable for this function to be more
388 /// loop-aware. If some of the indices are loop-invariant while others
389 /// aren't, it might seem desirable to emit multiple GEPs, keeping the
390 /// loop-invariant portions of the overall computation outside the loop.
391 /// However, there are a few reasons this is not done here. Hoisting simple
392 /// arithmetic is a low-level optimization that often isn't very
393 /// important until late in the optimization process. In fact, passes
394 /// like InstructionCombining will combine GEPs, even if it means
395 /// pushing loop-invariant computation down into loops, so even if the
396 /// GEPs were split here, the work would quickly be undone. The
397 /// LoopStrengthReduction pass, which is usually run quite late (and
398 /// after the last InstructionCombining pass), takes care of hoisting
399 /// loop-invariant portions of expressions, after considering what
400 /// can be folded using target addressing modes.
401 ///
402 Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
403  const SCEV *const *op_end,
404  PointerType *PTy,
405  Type *Ty,
406  Value *V) {
407  Type *OriginalElTy = PTy->getElementType();
408  Type *ElTy = OriginalElTy;
409  SmallVector<Value *, 4> GepIndices;
410  SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
411  bool AnyNonZeroIndices = false;
412 
413  // Split AddRecs up into parts as either of the parts may be usable
414  // without the other.
415  SplitAddRecs(Ops, Ty, SE);
416 
417  Type *IntPtrTy = DL.getIntPtrType(PTy);
418 
419  // Descend down the pointer's type and attempt to convert the other
420  // operands into GEP indices, at each level. The first index in a GEP
421  // indexes into the array implied by the pointer operand; the rest of
422  // the indices index into the element or field type selected by the
423  // preceding index.
424  for (;;) {
425  // If the scale size is not 0, attempt to factor out a scale for
426  // array indexing.
428  if (ElTy->isSized()) {
429  const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
430  if (!ElSize->isZero()) {
432  for (const SCEV *Op : Ops) {
433  const SCEV *Remainder = SE.getConstant(Ty, 0);
434  if (FactorOutConstant(Op, Remainder, ElSize, SE, DL)) {
435  // Op now has ElSize factored out.
436  ScaledOps.push_back(Op);
437  if (!Remainder->isZero())
438  NewOps.push_back(Remainder);
439  AnyNonZeroIndices = true;
440  } else {
441  // The operand was not divisible, so add it to the list of operands
442  // we'll scan next iteration.
443  NewOps.push_back(Op);
444  }
445  }
446  // If we made any changes, update Ops.
447  if (!ScaledOps.empty()) {
448  Ops = NewOps;
449  SimplifyAddOperands(Ops, Ty, SE);
450  }
451  }
452  }
453 
454  // Record the scaled array index for this level of the type. If
455  // we didn't find any operands that could be factored, tentatively
456  // assume that element zero was selected (since the zero offset
457  // would obviously be folded away).
458  Value *Scaled = ScaledOps.empty() ?
460  expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
461  GepIndices.push_back(Scaled);
462 
463  // Collect struct field index operands.
464  while (StructType *STy = dyn_cast<StructType>(ElTy)) {
465  bool FoundFieldNo = false;
466  // An empty struct has no fields.
467  if (STy->getNumElements() == 0) break;
468  // Field offsets are known. See if a constant offset falls within any of
469  // the struct fields.
470  if (Ops.empty())
471  break;
472  if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
473  if (SE.getTypeSizeInBits(C->getType()) <= 64) {
474  const StructLayout &SL = *DL.getStructLayout(STy);
475  uint64_t FullOffset = C->getValue()->getZExtValue();
476  if (FullOffset < SL.getSizeInBytes()) {
477  unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
478  GepIndices.push_back(
480  ElTy = STy->getTypeAtIndex(ElIdx);
481  Ops[0] =
482  SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
483  AnyNonZeroIndices = true;
484  FoundFieldNo = true;
485  }
486  }
487  // If no struct field offsets were found, tentatively assume that
488  // field zero was selected (since the zero offset would obviously
489  // be folded away).
490  if (!FoundFieldNo) {
491  ElTy = STy->getTypeAtIndex(0u);
492  GepIndices.push_back(
494  }
495  }
496 
497  if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
498  ElTy = ATy->getElementType();
499  else
500  break;
501  }
502 
503  // If none of the operands were convertible to proper GEP indices, cast
504  // the base to i8* and do an ugly getelementptr with that. It's still
505  // better than ptrtoint+arithmetic+inttoptr at least.
506  if (!AnyNonZeroIndices) {
507  // Cast the base to i8*.
508  V = InsertNoopCastOfTo(V,
510 
511  assert(!isa<Instruction>(V) ||
512  SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
513 
514  // Expand the operands for a plain byte offset.
515  Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
516 
517  // Fold a GEP with constant operands.
518  if (Constant *CLHS = dyn_cast<Constant>(V))
519  if (Constant *CRHS = dyn_cast<Constant>(Idx))
521  CLHS, CRHS);
522 
523  // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
524  unsigned ScanLimit = 6;
525  BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
526  // Scanning starts from the last instruction before the insertion point.
527  BasicBlock::iterator IP = Builder.GetInsertPoint();
528  if (IP != BlockBegin) {
529  --IP;
530  for (; ScanLimit; --IP, --ScanLimit) {
531  // Don't count dbg.value against the ScanLimit, to avoid perturbing the
532  // generated code.
533  if (isa<DbgInfoIntrinsic>(IP))
534  ScanLimit++;
535  if (IP->getOpcode() == Instruction::GetElementPtr &&
536  IP->getOperand(0) == V && IP->getOperand(1) == Idx)
537  return &*IP;
538  if (IP == BlockBegin) break;
539  }
540  }
541 
542  // Save the original insertion point so we can restore it when we're done.
543  SCEVInsertPointGuard Guard(Builder, this);
544 
545  // Move the insertion point out of as many loops as we can.
546  while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
547  if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
548  BasicBlock *Preheader = L->getLoopPreheader();
549  if (!Preheader) break;
550 
551  // Ok, move up a level.
552  Builder.SetInsertPoint(Preheader->getTerminator());
553  }
554 
555  // Emit a GEP.
556  Value *GEP = Builder.CreateGEP(Builder.getInt8Ty(), V, Idx, "uglygep");
557  rememberInstruction(GEP);
558 
559  return GEP;
560  }
561 
562  {
563  SCEVInsertPointGuard Guard(Builder, this);
564 
565  // Move the insertion point out of as many loops as we can.
566  while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) {
567  if (!L->isLoopInvariant(V)) break;
568 
569  bool AnyIndexNotLoopInvariant = any_of(
570  GepIndices, [L](Value *Op) { return !L->isLoopInvariant(Op); });
571 
572  if (AnyIndexNotLoopInvariant)
573  break;
574 
575  BasicBlock *Preheader = L->getLoopPreheader();
576  if (!Preheader) break;
577 
578  // Ok, move up a level.
579  Builder.SetInsertPoint(Preheader->getTerminator());
580  }
581 
582  // Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
583  // because ScalarEvolution may have changed the address arithmetic to
584  // compute a value which is beyond the end of the allocated object.
585  Value *Casted = V;
586  if (V->getType() != PTy)
587  Casted = InsertNoopCastOfTo(Casted, PTy);
588  Value *GEP = Builder.CreateGEP(OriginalElTy, Casted, GepIndices, "scevgep");
589  Ops.push_back(SE.getUnknown(GEP));
590  rememberInstruction(GEP);
591  }
592 
593  return expand(SE.getAddExpr(Ops));
594 }
595 
596 Value *SCEVExpander::expandAddToGEP(const SCEV *Op, PointerType *PTy, Type *Ty,
597  Value *V) {
598  const SCEV *const Ops[1] = {Op};
599  return expandAddToGEP(Ops, Ops + 1, PTy, Ty, V);
600 }
601 
602 /// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
603 /// SCEV expansion. If they are nested, this is the most nested. If they are
604 /// neighboring, pick the later.
605 static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
606  DominatorTree &DT) {
607  if (!A) return B;
608  if (!B) return A;
609  if (A->contains(B)) return B;
610  if (B->contains(A)) return A;
611  if (DT.dominates(A->getHeader(), B->getHeader())) return B;
612  if (DT.dominates(B->getHeader(), A->getHeader())) return A;
613  return A; // Arbitrarily break the tie.
614 }
615 
616 /// getRelevantLoop - Get the most relevant loop associated with the given
617 /// expression, according to PickMostRelevantLoop.
618 const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
619  // Test whether we've already computed the most relevant loop for this SCEV.
620  auto Pair = RelevantLoops.insert(std::make_pair(S, nullptr));
621  if (!Pair.second)
622  return Pair.first->second;
623 
624  if (isa<SCEVConstant>(S))
625  // A constant has no relevant loops.
626  return nullptr;
627  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
628  if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
629  return Pair.first->second = SE.LI.getLoopFor(I->getParent());
630  // A non-instruction has no relevant loops.
631  return nullptr;
632  }
633  if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
634  const Loop *L = nullptr;
635  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
636  L = AR->getLoop();
637  for (const SCEV *Op : N->operands())
638  L = PickMostRelevantLoop(L, getRelevantLoop(Op), SE.DT);
639  return RelevantLoops[N] = L;
640  }
641  if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
642  const Loop *Result = getRelevantLoop(C->getOperand());
643  return RelevantLoops[C] = Result;
644  }
645  if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
646  const Loop *Result = PickMostRelevantLoop(
647  getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT);
648  return RelevantLoops[D] = Result;
649  }
650  llvm_unreachable("Unexpected SCEV type!");
651 }
652 
653 namespace {
654 
655 /// LoopCompare - Compare loops by PickMostRelevantLoop.
656 class LoopCompare {
657  DominatorTree &DT;
658 public:
659  explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
660 
661  bool operator()(std::pair<const Loop *, const SCEV *> LHS,
662  std::pair<const Loop *, const SCEV *> RHS) const {
663  // Keep pointer operands sorted at the end.
664  if (LHS.second->getType()->isPointerTy() !=
665  RHS.second->getType()->isPointerTy())
666  return LHS.second->getType()->isPointerTy();
667 
668  // Compare loops with PickMostRelevantLoop.
669  if (LHS.first != RHS.first)
670  return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
671 
672  // If one operand is a non-constant negative and the other is not,
673  // put the non-constant negative on the right so that a sub can
674  // be used instead of a negate and add.
675  if (LHS.second->isNonConstantNegative()) {
676  if (!RHS.second->isNonConstantNegative())
677  return false;
678  } else if (RHS.second->isNonConstantNegative())
679  return true;
680 
681  // Otherwise they are equivalent according to this comparison.
682  return false;
683  }
684 };
685 
686 }
687 
688 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
689  Type *Ty = SE.getEffectiveSCEVType(S->getType());
690 
691  // Collect all the add operands in a loop, along with their associated loops.
692  // Iterate in reverse so that constants are emitted last, all else equal, and
693  // so that pointer operands are inserted first, which the code below relies on
694  // to form more involved GEPs.
696  for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
697  E(S->op_begin()); I != E; ++I)
698  OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
699 
700  // Sort by loop. Use a stable sort so that constants follow non-constants and
701  // pointer operands precede non-pointer operands.
702  llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
703 
704  // Emit instructions to add all the operands. Hoist as much as possible
705  // out of loops, and form meaningful getelementptrs where possible.
706  Value *Sum = nullptr;
707  for (auto I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E;) {
708  const Loop *CurLoop = I->first;
709  const SCEV *Op = I->second;
710  if (!Sum) {
711  // This is the first operand. Just expand it.
712  Sum = expand(Op);
713  ++I;
714  } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
715  // The running sum expression is a pointer. Try to form a getelementptr
716  // at this level with that as the base.
718  for (; I != E && I->first == CurLoop; ++I) {
719  // If the operand is SCEVUnknown and not instructions, peek through
720  // it, to enable more of it to be folded into the GEP.
721  const SCEV *X = I->second;
722  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
723  if (!isa<Instruction>(U->getValue()))
724  X = SE.getSCEV(U->getValue());
725  NewOps.push_back(X);
726  }
727  Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
728  } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
729  // The running sum is an integer, and there's a pointer at this level.
730  // Try to form a getelementptr. If the running sum is instructions,
731  // use a SCEVUnknown to avoid re-analyzing them.
733  NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
734  SE.getSCEV(Sum));
735  for (++I; I != E && I->first == CurLoop; ++I)
736  NewOps.push_back(I->second);
737  Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
738  } else if (Op->isNonConstantNegative()) {
739  // Instead of doing a negate and add, just do a subtract.
740  Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
741  Sum = InsertNoopCastOfTo(Sum, Ty);
742  Sum = InsertBinop(Instruction::Sub, Sum, W, SCEV::FlagAnyWrap,
743  /*IsSafeToHoist*/ true);
744  ++I;
745  } else {
746  // A simple add.
747  Value *W = expandCodeFor(Op, Ty);
748  Sum = InsertNoopCastOfTo(Sum, Ty);
749  // Canonicalize a constant to the RHS.
750  if (isa<Constant>(Sum)) std::swap(Sum, W);
751  Sum = InsertBinop(Instruction::Add, Sum, W, S->getNoWrapFlags(),
752  /*IsSafeToHoist*/ true);
753  ++I;
754  }
755  }
756 
757  return Sum;
758 }
759 
760 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
761  Type *Ty = SE.getEffectiveSCEVType(S->getType());
762 
763  // Collect all the mul operands in a loop, along with their associated loops.
764  // Iterate in reverse so that constants are emitted last, all else equal.
766  for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
767  E(S->op_begin()); I != E; ++I)
768  OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
769 
770  // Sort by loop. Use a stable sort so that constants follow non-constants.
771  llvm::stable_sort(OpsAndLoops, LoopCompare(SE.DT));
772 
773  // Emit instructions to mul all the operands. Hoist as much as possible
774  // out of loops.
775  Value *Prod = nullptr;
776  auto I = OpsAndLoops.begin();
777 
778  // Expand the calculation of X pow N in the following manner:
779  // Let N = P1 + P2 + ... + PK, where all P are powers of 2. Then:
780  // X pow N = (X pow P1) * (X pow P2) * ... * (X pow PK).
781  const auto ExpandOpBinPowN = [this, &I, &OpsAndLoops, &Ty]() {
782  auto E = I;
783  // Calculate how many times the same operand from the same loop is included
784  // into this power.
785  uint64_t Exponent = 0;
786  const uint64_t MaxExponent = UINT64_MAX >> 1;
787  // No one sane will ever try to calculate such huge exponents, but if we
788  // need this, we stop on UINT64_MAX / 2 because we need to exit the loop
789  // below when the power of 2 exceeds our Exponent, and we want it to be
790  // 1u << 31 at most to not deal with unsigned overflow.
791  while (E != OpsAndLoops.end() && *I == *E && Exponent != MaxExponent) {
792  ++Exponent;
793  ++E;
794  }
795  assert(Exponent > 0 && "Trying to calculate a zeroth exponent of operand?");
796 
797  // Calculate powers with exponents 1, 2, 4, 8 etc. and include those of them
798  // that are needed into the result.
799  Value *P = expandCodeFor(I->second, Ty);
800  Value *Result = nullptr;
801  if (Exponent & 1)
802  Result = P;
803  for (uint64_t BinExp = 2; BinExp <= Exponent; BinExp <<= 1) {
804  P = InsertBinop(Instruction::Mul, P, P, SCEV::FlagAnyWrap,
805  /*IsSafeToHoist*/ true);
806  if (Exponent & BinExp)
807  Result = Result ? InsertBinop(Instruction::Mul, Result, P,
809  /*IsSafeToHoist*/ true)
810  : P;
811  }
812 
813  I = E;
814  assert(Result && "Nothing was expanded?");
815  return Result;
816  };
817 
818  while (I != OpsAndLoops.end()) {
819  if (!Prod) {
820  // This is the first operand. Just expand it.
821  Prod = ExpandOpBinPowN();
822  } else if (I->second->isAllOnesValue()) {
823  // Instead of doing a multiply by negative one, just do a negate.
824  Prod = InsertNoopCastOfTo(Prod, Ty);
825  Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod,
826  SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
827  ++I;
828  } else {
829  // A simple mul.
830  Value *W = ExpandOpBinPowN();
831  Prod = InsertNoopCastOfTo(Prod, Ty);
832  // Canonicalize a constant to the RHS.
833  if (isa<Constant>(Prod)) std::swap(Prod, W);
834  const APInt *RHS;
835  if (match(W, m_Power2(RHS))) {
836  // Canonicalize Prod*(1<<C) to Prod<<C.
837  assert(!Ty->isVectorTy() && "vector types are not SCEVable");
838  auto NWFlags = S->getNoWrapFlags();
839  // clear nsw flag if shl will produce poison value.
840  if (RHS->logBase2() == RHS->getBitWidth() - 1)
841  NWFlags = ScalarEvolution::clearFlags(NWFlags, SCEV::FlagNSW);
842  Prod = InsertBinop(Instruction::Shl, Prod,
843  ConstantInt::get(Ty, RHS->logBase2()), NWFlags,
844  /*IsSafeToHoist*/ true);
845  } else {
846  Prod = InsertBinop(Instruction::Mul, Prod, W, S->getNoWrapFlags(),
847  /*IsSafeToHoist*/ true);
848  }
849  }
850  }
851 
852  return Prod;
853 }
854 
855 Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
856  Type *Ty = SE.getEffectiveSCEVType(S->getType());
857 
858  Value *LHS = expandCodeFor(S->getLHS(), Ty);
859  if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
860  const APInt &RHS = SC->getAPInt();
861  if (RHS.isPowerOf2())
862  return InsertBinop(Instruction::LShr, LHS,
863  ConstantInt::get(Ty, RHS.logBase2()),
864  SCEV::FlagAnyWrap, /*IsSafeToHoist*/ true);
865  }
866 
867  Value *RHS = expandCodeFor(S->getRHS(), Ty);
868  return InsertBinop(Instruction::UDiv, LHS, RHS, SCEV::FlagAnyWrap,
869  /*IsSafeToHoist*/ SE.isKnownNonZero(S->getRHS()));
870 }
871 
872 /// Move parts of Base into Rest to leave Base with the minimal
873 /// expression that provides a pointer operand suitable for a
874 /// GEP expansion.
875 static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
876  ScalarEvolution &SE) {
877  while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
878  Base = A->getStart();
879  Rest = SE.getAddExpr(Rest,
880  SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
881  A->getStepRecurrence(SE),
882  A->getLoop(),
883  A->getNoWrapFlags(SCEV::FlagNW)));
884  }
885  if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
886  Base = A->getOperand(A->getNumOperands()-1);
887  SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
888  NewAddOps.back() = Rest;
889  Rest = SE.getAddExpr(NewAddOps);
890  ExposePointerBase(Base, Rest, SE);
891  }
892 }
893 
894 /// Determine if this is a well-behaved chain of instructions leading back to
895 /// the PHI. If so, it may be reused by expanded expressions.
896 bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
897  const Loop *L) {
898  if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
899  (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
900  return false;
901  // If any of the operands don't dominate the insert position, bail.
902  // Addrec operands are always loop-invariant, so this can only happen
903  // if there are instructions which haven't been hoisted.
904  if (L == IVIncInsertLoop) {
905  for (User::op_iterator OI = IncV->op_begin()+1,
906  OE = IncV->op_end(); OI != OE; ++OI)
907  if (Instruction *OInst = dyn_cast<Instruction>(OI))
908  if (!SE.DT.dominates(OInst, IVIncInsertPos))
909  return false;
910  }
911  // Advance to the next instruction.
912  IncV = dyn_cast<Instruction>(IncV->getOperand(0));
913  if (!IncV)
914  return false;
915 
916  if (IncV->mayHaveSideEffects())
917  return false;
918 
919  if (IncV == PN)
920  return true;
921 
922  return isNormalAddRecExprPHI(PN, IncV, L);
923 }
924 
925 /// getIVIncOperand returns an induction variable increment's induction
926 /// variable operand.
927 ///
928 /// If allowScale is set, any type of GEP is allowed as long as the nonIV
929 /// operands dominate InsertPos.
930 ///
931 /// If allowScale is not set, ensure that a GEP increment conforms to one of the
932 /// simple patterns generated by getAddRecExprPHILiterally and
933 /// expandAddtoGEP. If the pattern isn't recognized, return NULL.
935  Instruction *InsertPos,
936  bool allowScale) {
937  if (IncV == InsertPos)
938  return nullptr;
939 
940  switch (IncV->getOpcode()) {
941  default:
942  return nullptr;
943  // Check for a simple Add/Sub or GEP of a loop invariant step.
944  case Instruction::Add:
945  case Instruction::Sub: {
946  Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
947  if (!OInst || SE.DT.dominates(OInst, InsertPos))
948  return dyn_cast<Instruction>(IncV->getOperand(0));
949  return nullptr;
950  }
951  case Instruction::BitCast:
952  return dyn_cast<Instruction>(IncV->getOperand(0));
953  case Instruction::GetElementPtr:
954  for (auto I = IncV->op_begin() + 1, E = IncV->op_end(); I != E; ++I) {
955  if (isa<Constant>(*I))
956  continue;
957  if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
958  if (!SE.DT.dominates(OInst, InsertPos))
959  return nullptr;
960  }
961  if (allowScale) {
962  // allow any kind of GEP as long as it can be hoisted.
963  continue;
964  }
965  // This must be a pointer addition of constants (pretty), which is already
966  // handled, or some number of address-size elements (ugly). Ugly geps
967  // have 2 operands. i1* is used by the expander to represent an
968  // address-size element.
969  if (IncV->getNumOperands() != 2)
970  return nullptr;
971  unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
972  if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
973  && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
974  return nullptr;
975  break;
976  }
977  return dyn_cast<Instruction>(IncV->getOperand(0));
978  }
979 }
980 
981 /// If the insert point of the current builder or any of the builders on the
982 /// stack of saved builders has 'I' as its insert point, update it to point to
983 /// the instruction after 'I'. This is intended to be used when the instruction
984 /// 'I' is being moved. If this fixup is not done and 'I' is moved to a
985 /// different block, the inconsistent insert point (with a mismatched
986 /// Instruction and Block) can lead to an instruction being inserted in a block
987 /// other than its parent.
988 void SCEVExpander::fixupInsertPoints(Instruction *I) {
989  BasicBlock::iterator It(*I);
990  BasicBlock::iterator NewInsertPt = std::next(It);
991  if (Builder.GetInsertPoint() == It)
992  Builder.SetInsertPoint(&*NewInsertPt);
993  for (auto *InsertPtGuard : InsertPointGuards)
994  if (InsertPtGuard->GetInsertPoint() == It)
995  InsertPtGuard->SetInsertPoint(NewInsertPt);
996 }
997 
998 /// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
999 /// it available to other uses in this loop. Recursively hoist any operands,
1000 /// until we reach a value that dominates InsertPos.
1002  if (SE.DT.dominates(IncV, InsertPos))
1003  return true;
1004 
1005  // InsertPos must itself dominate IncV so that IncV's new position satisfies
1006  // its existing users.
1007  if (isa<PHINode>(InsertPos) ||
1008  !SE.DT.dominates(InsertPos->getParent(), IncV->getParent()))
1009  return false;
1010 
1011  if (!SE.LI.movementPreservesLCSSAForm(IncV, InsertPos))
1012  return false;
1013 
1014  // Check that the chain of IV operands leading back to Phi can be hoisted.
1016  for(;;) {
1017  Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
1018  if (!Oper)
1019  return false;
1020  // IncV is safe to hoist.
1021  IVIncs.push_back(IncV);
1022  IncV = Oper;
1023  if (SE.DT.dominates(IncV, InsertPos))
1024  break;
1025  }
1026  for (auto I = IVIncs.rbegin(), E = IVIncs.rend(); I != E; ++I) {
1027  fixupInsertPoints(*I);
1028  (*I)->moveBefore(InsertPos);
1029  }
1030  return true;
1031 }
1032 
1033 /// Determine if this cyclic phi is in a form that would have been generated by
1034 /// LSR. We don't care if the phi was actually expanded in this pass, as long
1035 /// as it is in a low-cost form, for example, no implied multiplication. This
1036 /// should match any patterns generated by getAddRecExprPHILiterally and
1037 /// expandAddtoGEP.
1038 bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
1039  const Loop *L) {
1040  for(Instruction *IVOper = IncV;
1041  (IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
1042  /*allowScale=*/false));) {
1043  if (IVOper == PN)
1044  return true;
1045  }
1046  return false;
1047 }
1048 
1049 /// expandIVInc - Expand an IV increment at Builder's current InsertPos.
1050 /// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
1051 /// need to materialize IV increments elsewhere to handle difficult situations.
1052 Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1053  Type *ExpandTy, Type *IntTy,
1054  bool useSubtract) {
1055  Value *IncV;
1056  // If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1057  if (ExpandTy->isPointerTy()) {
1058  PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1059  // If the step isn't constant, don't use an implicitly scaled GEP, because
1060  // that would require a multiply inside the loop.
1061  if (!isa<ConstantInt>(StepV))
1062  GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1063  GEPPtrTy->getAddressSpace());
1064  IncV = expandAddToGEP(SE.getSCEV(StepV), GEPPtrTy, IntTy, PN);
1065  if (IncV->getType() != PN->getType()) {
1066  IncV = Builder.CreateBitCast(IncV, PN->getType());
1067  rememberInstruction(IncV);
1068  }
1069  } else {
1070  IncV = useSubtract ?
1071  Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1072  Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1073  rememberInstruction(IncV);
1074  }
1075  return IncV;
1076 }
1077 
1078 /// Hoist the addrec instruction chain rooted in the loop phi above the
1079 /// position. This routine assumes that this is possible (has been checked).
1080 void SCEVExpander::hoistBeforePos(DominatorTree *DT, Instruction *InstToHoist,
1081  Instruction *Pos, PHINode *LoopPhi) {
1082  do {
1083  if (DT->dominates(InstToHoist, Pos))
1084  break;
1085  // Make sure the increment is where we want it. But don't move it
1086  // down past a potential existing post-inc user.
1087  fixupInsertPoints(InstToHoist);
1088  InstToHoist->moveBefore(Pos);
1089  Pos = InstToHoist;
1090  InstToHoist = cast<Instruction>(InstToHoist->getOperand(0));
1091  } while (InstToHoist != LoopPhi);
1092 }
1093 
1094 /// Check whether we can cheaply express the requested SCEV in terms of
1095 /// the available PHI SCEV by truncation and/or inversion of the step.
1097  const SCEVAddRecExpr *Phi,
1098  const SCEVAddRecExpr *Requested,
1099  bool &InvertStep) {
1100  Type *PhiTy = SE.getEffectiveSCEVType(Phi->getType());
1101  Type *RequestedTy = SE.getEffectiveSCEVType(Requested->getType());
1102 
1103  if (RequestedTy->getIntegerBitWidth() > PhiTy->getIntegerBitWidth())
1104  return false;
1105 
1106  // Try truncate it if necessary.
1107  Phi = dyn_cast<SCEVAddRecExpr>(SE.getTruncateOrNoop(Phi, RequestedTy));
1108  if (!Phi)
1109  return false;
1110 
1111  // Check whether truncation will help.
1112  if (Phi == Requested) {
1113  InvertStep = false;
1114  return true;
1115  }
1116 
1117  // Check whether inverting will help: {R,+,-1} == R - {0,+,1}.
1118  if (SE.getAddExpr(Requested->getStart(),
1119  SE.getNegativeSCEV(Requested)) == Phi) {
1120  InvertStep = true;
1121  return true;
1122  }
1123 
1124  return false;
1125 }
1126 
1127 static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1128  if (!isa<IntegerType>(AR->getType()))
1129  return false;
1130 
1131  unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1132  Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1133  const SCEV *Step = AR->getStepRecurrence(SE);
1134  const SCEV *OpAfterExtend = SE.getAddExpr(SE.getSignExtendExpr(Step, WideTy),
1135  SE.getSignExtendExpr(AR, WideTy));
1136  const SCEV *ExtendAfterOp =
1137  SE.getSignExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1138  return ExtendAfterOp == OpAfterExtend;
1139 }
1140 
1141 static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR) {
1142  if (!isa<IntegerType>(AR->getType()))
1143  return false;
1144 
1145  unsigned BitWidth = cast<IntegerType>(AR->getType())->getBitWidth();
1146  Type *WideTy = IntegerType::get(AR->getType()->getContext(), BitWidth * 2);
1147  const SCEV *Step = AR->getStepRecurrence(SE);
1148  const SCEV *OpAfterExtend = SE.getAddExpr(SE.getZeroExtendExpr(Step, WideTy),
1149  SE.getZeroExtendExpr(AR, WideTy));
1150  const SCEV *ExtendAfterOp =
1151  SE.getZeroExtendExpr(SE.getAddExpr(AR, Step), WideTy);
1152  return ExtendAfterOp == OpAfterExtend;
1153 }
1154 
1155 /// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1156 /// the base addrec, which is the addrec without any non-loop-dominating
1157 /// values, and return the PHI.
1158 PHINode *
1159 SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1160  const Loop *L,
1161  Type *ExpandTy,
1162  Type *IntTy,
1163  Type *&TruncTy,
1164  bool &InvertStep) {
1165  assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1166 
1167  // Reuse a previously-inserted PHI, if present.
1168  BasicBlock *LatchBlock = L->getLoopLatch();
1169  if (LatchBlock) {
1170  PHINode *AddRecPhiMatch = nullptr;
1171  Instruction *IncV = nullptr;
1172  TruncTy = nullptr;
1173  InvertStep = false;
1174 
1175  // Only try partially matching scevs that need truncation and/or
1176  // step-inversion if we know this loop is outside the current loop.
1177  bool TryNonMatchingSCEV =
1178  IVIncInsertLoop &&
1179  SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader());
1180 
1181  for (PHINode &PN : L->getHeader()->phis()) {
1182  if (!SE.isSCEVable(PN.getType()))
1183  continue;
1184 
1185  const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
1186  if (!PhiSCEV)
1187  continue;
1188 
1189  bool IsMatchingSCEV = PhiSCEV == Normalized;
1190  // We only handle truncation and inversion of phi recurrences for the
1191  // expanded expression if the expanded expression's loop dominates the
1192  // loop we insert to. Check now, so we can bail out early.
1193  if (!IsMatchingSCEV && !TryNonMatchingSCEV)
1194  continue;
1195 
1196  // TODO: this possibly can be reworked to avoid this cast at all.
1197  Instruction *TempIncV =
1199  if (!TempIncV)
1200  continue;
1201 
1202  // Check whether we can reuse this PHI node.
1203  if (LSRMode) {
1204  if (!isExpandedAddRecExprPHI(&PN, TempIncV, L))
1205  continue;
1206  if (L == IVIncInsertLoop && !hoistIVInc(TempIncV, IVIncInsertPos))
1207  continue;
1208  } else {
1209  if (!isNormalAddRecExprPHI(&PN, TempIncV, L))
1210  continue;
1211  }
1212 
1213  // Stop if we have found an exact match SCEV.
1214  if (IsMatchingSCEV) {
1215  IncV = TempIncV;
1216  TruncTy = nullptr;
1217  InvertStep = false;
1218  AddRecPhiMatch = &PN;
1219  break;
1220  }
1221 
1222  // Try whether the phi can be translated into the requested form
1223  // (truncated and/or offset by a constant).
1224  if ((!TruncTy || InvertStep) &&
1225  canBeCheaplyTransformed(SE, PhiSCEV, Normalized, InvertStep)) {
1226  // Record the phi node. But don't stop we might find an exact match
1227  // later.
1228  AddRecPhiMatch = &PN;
1229  IncV = TempIncV;
1230  TruncTy = SE.getEffectiveSCEVType(Normalized->getType());
1231  }
1232  }
1233 
1234  if (AddRecPhiMatch) {
1235  // Potentially, move the increment. We have made sure in
1236  // isExpandedAddRecExprPHI or hoistIVInc that this is possible.
1237  if (L == IVIncInsertLoop)
1238  hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch);
1239 
1240  // Ok, the add recurrence looks usable.
1241  // Remember this PHI, even in post-inc mode.
1242  InsertedValues.insert(AddRecPhiMatch);
1243  // Remember the increment.
1244  rememberInstruction(IncV);
1245  return AddRecPhiMatch;
1246  }
1247  }
1248 
1249  // Save the original insertion point so we can restore it when we're done.
1250  SCEVInsertPointGuard Guard(Builder, this);
1251 
1252  // Another AddRec may need to be recursively expanded below. For example, if
1253  // this AddRec is quadratic, the StepV may itself be an AddRec in this
1254  // loop. Remove this loop from the PostIncLoops set before expanding such
1255  // AddRecs. Otherwise, we cannot find a valid position for the step
1256  // (i.e. StepV can never dominate its loop header). Ideally, we could do
1257  // SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1258  // so it's not worth implementing SmallPtrSet::swap.
1259  PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1260  PostIncLoops.clear();
1261 
1262  // Expand code for the start value into the loop preheader.
1263  assert(L->getLoopPreheader() &&
1264  "Can't expand add recurrences without a loop preheader!");
1265  Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
1267 
1268  // StartV must have been be inserted into L's preheader to dominate the new
1269  // phi.
1270  assert(!isa<Instruction>(StartV) ||
1271  SE.DT.properlyDominates(cast<Instruction>(StartV)->getParent(),
1272  L->getHeader()));
1273 
1274  // Expand code for the step value. Do this before creating the PHI so that PHI
1275  // reuse code doesn't see an incomplete PHI.
1276  const SCEV *Step = Normalized->getStepRecurrence(SE);
1277  // If the stride is negative, insert a sub instead of an add for the increment
1278  // (unless it's a constant, because subtracts of constants are canonicalized
1279  // to adds).
1280  bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1281  if (useSubtract)
1282  Step = SE.getNegativeSCEV(Step);
1283  // Expand the step somewhere that dominates the loop header.
1284  Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
1285 
1286  // The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
1287  // we actually do emit an addition. It does not apply if we emit a
1288  // subtraction.
1289  bool IncrementIsNUW = !useSubtract && IsIncrementNUW(SE, Normalized);
1290  bool IncrementIsNSW = !useSubtract && IsIncrementNSW(SE, Normalized);
1291 
1292  // Create the PHI.
1293  BasicBlock *Header = L->getHeader();
1294  Builder.SetInsertPoint(Header, Header->begin());
1295  pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1296  PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1297  Twine(IVName) + ".iv");
1298  rememberInstruction(PN);
1299 
1300  // Create the step instructions and populate the PHI.
1301  for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1302  BasicBlock *Pred = *HPI;
1303 
1304  // Add a start value.
1305  if (!L->contains(Pred)) {
1306  PN->addIncoming(StartV, Pred);
1307  continue;
1308  }
1309 
1310  // Create a step value and add it to the PHI.
1311  // If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1312  // instructions at IVIncInsertPos.
1313  Instruction *InsertPos = L == IVIncInsertLoop ?
1314  IVIncInsertPos : Pred->getTerminator();
1315  Builder.SetInsertPoint(InsertPos);
1316  Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1317 
1318  if (isa<OverflowingBinaryOperator>(IncV)) {
1319  if (IncrementIsNUW)
1320  cast<BinaryOperator>(IncV)->setHasNoUnsignedWrap();
1321  if (IncrementIsNSW)
1322  cast<BinaryOperator>(IncV)->setHasNoSignedWrap();
1323  }
1324  PN->addIncoming(IncV, Pred);
1325  }
1326 
1327  // After expanding subexpressions, restore the PostIncLoops set so the caller
1328  // can ensure that IVIncrement dominates the current uses.
1329  PostIncLoops = SavedPostIncLoops;
1330 
1331  // Remember this PHI, even in post-inc mode.
1332  InsertedValues.insert(PN);
1333 
1334  return PN;
1335 }
1336 
1337 Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1338  Type *STy = S->getType();
1339  Type *IntTy = SE.getEffectiveSCEVType(STy);
1340  const Loop *L = S->getLoop();
1341 
1342  // Determine a normalized form of this expression, which is the expression
1343  // before any post-inc adjustment is made.
1344  const SCEVAddRecExpr *Normalized = S;
1345  if (PostIncLoops.count(L)) {
1347  Loops.insert(L);
1348  Normalized = cast<SCEVAddRecExpr>(normalizeForPostIncUse(S, Loops, SE));
1349  }
1350 
1351  // Strip off any non-loop-dominating component from the addrec start.
1352  const SCEV *Start = Normalized->getStart();
1353  const SCEV *PostLoopOffset = nullptr;
1354  if (!SE.properlyDominates(Start, L->getHeader())) {
1355  PostLoopOffset = Start;
1356  Start = SE.getConstant(Normalized->getType(), 0);
1357  Normalized = cast<SCEVAddRecExpr>(
1358  SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1359  Normalized->getLoop(),
1360  Normalized->getNoWrapFlags(SCEV::FlagNW)));
1361  }
1362 
1363  // Strip off any non-loop-dominating component from the addrec step.
1364  const SCEV *Step = Normalized->getStepRecurrence(SE);
1365  const SCEV *PostLoopScale = nullptr;
1366  if (!SE.dominates(Step, L->getHeader())) {
1367  PostLoopScale = Step;
1368  Step = SE.getConstant(Normalized->getType(), 1);
1369  if (!Start->isZero()) {
1370  // The normalization below assumes that Start is constant zero, so if
1371  // it isn't re-associate Start to PostLoopOffset.
1372  assert(!PostLoopOffset && "Start not-null but PostLoopOffset set?");
1373  PostLoopOffset = Start;
1374  Start = SE.getConstant(Normalized->getType(), 0);
1375  }
1376  Normalized =
1377  cast<SCEVAddRecExpr>(SE.getAddRecExpr(
1378  Start, Step, Normalized->getLoop(),
1379  Normalized->getNoWrapFlags(SCEV::FlagNW)));
1380  }
1381 
1382  // Expand the core addrec. If we need post-loop scaling, force it to
1383  // expand to an integer type to avoid the need for additional casting.
1384  Type *ExpandTy = PostLoopScale ? IntTy : STy;
1385  // We can't use a pointer type for the addrec if the pointer type is
1386  // non-integral.
1387  Type *AddRecPHIExpandTy =
1388  DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
1389 
1390  // In some cases, we decide to reuse an existing phi node but need to truncate
1391  // it and/or invert the step.
1392  Type *TruncTy = nullptr;
1393  bool InvertStep = false;
1394  PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
1395  IntTy, TruncTy, InvertStep);
1396 
1397  // Accommodate post-inc mode, if necessary.
1398  Value *Result;
1399  if (!PostIncLoops.count(L))
1400  Result = PN;
1401  else {
1402  // In PostInc mode, use the post-incremented value.
1403  BasicBlock *LatchBlock = L->getLoopLatch();
1404  assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1405  Result = PN->getIncomingValueForBlock(LatchBlock);
1406 
1407  // For an expansion to use the postinc form, the client must call
1408  // expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1409  // or dominated by IVIncInsertPos.
1410  if (isa<Instruction>(Result) &&
1411  !SE.DT.dominates(cast<Instruction>(Result),
1412  &*Builder.GetInsertPoint())) {
1413  // The induction variable's postinc expansion does not dominate this use.
1414  // IVUsers tries to prevent this case, so it is rare. However, it can
1415  // happen when an IVUser outside the loop is not dominated by the latch
1416  // block. Adjusting IVIncInsertPos before expansion begins cannot handle
1417  // all cases. Consider a phi outside whose operand is replaced during
1418  // expansion with the value of the postinc user. Without fundamentally
1419  // changing the way postinc users are tracked, the only remedy is
1420  // inserting an extra IV increment. StepV might fold into PostLoopOffset,
1421  // but hopefully expandCodeFor handles that.
1422  bool useSubtract =
1423  !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1424  if (useSubtract)
1425  Step = SE.getNegativeSCEV(Step);
1426  Value *StepV;
1427  {
1428  // Expand the step somewhere that dominates the loop header.
1429  SCEVInsertPointGuard Guard(Builder, this);
1430  StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
1431  }
1432  Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1433  }
1434  }
1435 
1436  // We have decided to reuse an induction variable of a dominating loop. Apply
1437  // truncation and/or inversion of the step.
1438  if (TruncTy) {
1439  Type *ResTy = Result->getType();
1440  // Normalize the result type.
1441  if (ResTy != SE.getEffectiveSCEVType(ResTy))
1442  Result = InsertNoopCastOfTo(Result, SE.getEffectiveSCEVType(ResTy));
1443  // Truncate the result.
1444  if (TruncTy != Result->getType()) {
1445  Result = Builder.CreateTrunc(Result, TruncTy);
1446  rememberInstruction(Result);
1447  }
1448  // Invert the result.
1449  if (InvertStep) {
1450  Result = Builder.CreateSub(expandCodeFor(Normalized->getStart(), TruncTy),
1451  Result);
1452  rememberInstruction(Result);
1453  }
1454  }
1455 
1456  // Re-apply any non-loop-dominating scale.
1457  if (PostLoopScale) {
1458  assert(S->isAffine() && "Can't linearly scale non-affine recurrences.");
1459  Result = InsertNoopCastOfTo(Result, IntTy);
1460  Result = Builder.CreateMul(Result,
1461  expandCodeFor(PostLoopScale, IntTy));
1462  rememberInstruction(Result);
1463  }
1464 
1465  // Re-apply any non-loop-dominating offset.
1466  if (PostLoopOffset) {
1467  if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1468  if (Result->getType()->isIntegerTy()) {
1469  Value *Base = expandCodeFor(PostLoopOffset, ExpandTy);
1470  Result = expandAddToGEP(SE.getUnknown(Result), PTy, IntTy, Base);
1471  } else {
1472  Result = expandAddToGEP(PostLoopOffset, PTy, IntTy, Result);
1473  }
1474  } else {
1475  Result = InsertNoopCastOfTo(Result, IntTy);
1476  Result = Builder.CreateAdd(Result,
1477  expandCodeFor(PostLoopOffset, IntTy));
1478  rememberInstruction(Result);
1479  }
1480  }
1481 
1482  return Result;
1483 }
1484 
1485 Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1486  // In canonical mode we compute the addrec as an expression of a canonical IV
1487  // using evaluateAtIteration and expand the resulting SCEV expression. This
1488  // way we avoid introducing new IVs to carry on the comutation of the addrec
1489  // throughout the loop.
1490  //
1491  // For nested addrecs evaluateAtIteration might need a canonical IV of a
1492  // type wider than the addrec itself. Emitting a canonical IV of the
1493  // proper type might produce non-legal types, for example expanding an i64
1494  // {0,+,2,+,1} addrec would need an i65 canonical IV. To avoid this just fall
1495  // back to non-canonical mode for nested addrecs.
1496  if (!CanonicalMode || (S->getNumOperands() > 2))
1497  return expandAddRecExprLiterally(S);
1498 
1499  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1500  const Loop *L = S->getLoop();
1501 
1502  // First check for an existing canonical IV in a suitable type.
1503  PHINode *CanonicalIV = nullptr;
1504  if (PHINode *PN = L->getCanonicalInductionVariable())
1505  if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1506  CanonicalIV = PN;
1507 
1508  // Rewrite an AddRec in terms of the canonical induction variable, if
1509  // its type is more narrow.
1510  if (CanonicalIV &&
1511  SE.getTypeSizeInBits(CanonicalIV->getType()) >
1512  SE.getTypeSizeInBits(Ty)) {
1514  for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1515  NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1516  Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1518  BasicBlock::iterator NewInsertPt =
1519  findInsertPointAfter(cast<Instruction>(V), Builder.GetInsertBlock());
1520  V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
1521  &*NewInsertPt);
1522  return V;
1523  }
1524 
1525  // {X,+,F} --> X + {0,+,F}
1526  if (!S->getStart()->isZero()) {
1527  SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1528  NewOps[0] = SE.getConstant(Ty, 0);
1529  const SCEV *Rest = SE.getAddRecExpr(NewOps, L,
1531 
1532  // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1533  // comments on expandAddToGEP for details.
1534  const SCEV *Base = S->getStart();
1535  // Dig into the expression to find the pointer base for a GEP.
1536  const SCEV *ExposedRest = Rest;
1537  ExposePointerBase(Base, ExposedRest, SE);
1538  // If we found a pointer, expand the AddRec with a GEP.
1539  if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1540  // Make sure the Base isn't something exotic, such as a multiplied
1541  // or divided pointer value. In those cases, the result type isn't
1542  // actually a pointer type.
1543  if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1544  Value *StartV = expand(Base);
1545  assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1546  return expandAddToGEP(ExposedRest, PTy, Ty, StartV);
1547  }
1548  }
1549 
1550  // Just do a normal add. Pre-expand the operands to suppress folding.
1551  //
1552  // The LHS and RHS values are factored out of the expand call to make the
1553  // output independent of the argument evaluation order.
1554  const SCEV *AddExprLHS = SE.getUnknown(expand(S->getStart()));
1555  const SCEV *AddExprRHS = SE.getUnknown(expand(Rest));
1556  return expand(SE.getAddExpr(AddExprLHS, AddExprRHS));
1557  }
1558 
1559  // If we don't yet have a canonical IV, create one.
1560  if (!CanonicalIV) {
1561  // Create and insert the PHI node for the induction variable in the
1562  // specified loop.
1563  BasicBlock *Header = L->getHeader();
1564  pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1565  CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1566  &Header->front());
1567  rememberInstruction(CanonicalIV);
1568 
1569  SmallSet<BasicBlock *, 4> PredSeen;
1570  Constant *One = ConstantInt::get(Ty, 1);
1571  for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1572  BasicBlock *HP = *HPI;
1573  if (!PredSeen.insert(HP).second) {
1574  // There must be an incoming value for each predecessor, even the
1575  // duplicates!
1576  CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
1577  continue;
1578  }
1579 
1580  if (L->contains(HP)) {
1581  // Insert a unit add instruction right before the terminator
1582  // corresponding to the back-edge.
1583  Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1584  "indvar.next",
1585  HP->getTerminator());
1586  Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1587  rememberInstruction(Add);
1588  CanonicalIV->addIncoming(Add, HP);
1589  } else {
1590  CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1591  }
1592  }
1593  }
1594 
1595  // {0,+,1} --> Insert a canonical induction variable into the loop!
1596  if (S->isAffine() && S->getOperand(1)->isOne()) {
1597  assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1598  "IVs with types different from the canonical IV should "
1599  "already have been handled!");
1600  return CanonicalIV;
1601  }
1602 
1603  // {0,+,F} --> {0,+,1} * F
1604 
1605  // If this is a simple linear addrec, emit it now as a special case.
1606  if (S->isAffine()) // {0,+,F} --> i*F
1607  return
1608  expand(SE.getTruncateOrNoop(
1609  SE.getMulExpr(SE.getUnknown(CanonicalIV),
1610  SE.getNoopOrAnyExtend(S->getOperand(1),
1611  CanonicalIV->getType())),
1612  Ty));
1613 
1614  // If this is a chain of recurrences, turn it into a closed form, using the
1615  // folders, then expandCodeFor the closed form. This allows the folders to
1616  // simplify the expression without having to build a bunch of special code
1617  // into this folder.
1618  const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1619 
1620  // Promote S up to the canonical IV type, if the cast is foldable.
1621  const SCEV *NewS = S;
1622  const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1623  if (isa<SCEVAddRecExpr>(Ext))
1624  NewS = Ext;
1625 
1626  const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1627  //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1628 
1629  // Truncate the result down to the original type, if needed.
1630  const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1631  return expand(T);
1632 }
1633 
1634 Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1635  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1636  Value *V = expandCodeFor(S->getOperand(),
1637  SE.getEffectiveSCEVType(S->getOperand()->getType()));
1638  Value *I = Builder.CreateTrunc(V, Ty);
1639  rememberInstruction(I);
1640  return I;
1641 }
1642 
1643 Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1644  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1645  Value *V = expandCodeFor(S->getOperand(),
1646  SE.getEffectiveSCEVType(S->getOperand()->getType()));
1647  Value *I = Builder.CreateZExt(V, Ty);
1648  rememberInstruction(I);
1649  return I;
1650 }
1651 
1652 Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1653  Type *Ty = SE.getEffectiveSCEVType(S->getType());
1654  Value *V = expandCodeFor(S->getOperand(),
1655  SE.getEffectiveSCEVType(S->getOperand()->getType()));
1656  Value *I = Builder.CreateSExt(V, Ty);
1657  rememberInstruction(I);
1658  return I;
1659 }
1660 
1661 Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1662  Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1663  Type *Ty = LHS->getType();
1664  for (int i = S->getNumOperands()-2; i >= 0; --i) {
1665  // In the case of mixed integer and pointer types, do the
1666  // rest of the comparisons as integer.
1667  Type *OpTy = S->getOperand(i)->getType();
1668  if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1669  Ty = SE.getEffectiveSCEVType(Ty);
1670  LHS = InsertNoopCastOfTo(LHS, Ty);
1671  }
1672  Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1673  Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1674  rememberInstruction(ICmp);
1675  Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1676  rememberInstruction(Sel);
1677  LHS = Sel;
1678  }
1679  // In the case of mixed integer and pointer types, cast the
1680  // final result back to the pointer type.
1681  if (LHS->getType() != S->getType())
1682  LHS = InsertNoopCastOfTo(LHS, S->getType());
1683  return LHS;
1684 }
1685 
1686 Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1687  Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1688  Type *Ty = LHS->getType();
1689  for (int i = S->getNumOperands()-2; i >= 0; --i) {
1690  // In the case of mixed integer and pointer types, do the
1691  // rest of the comparisons as integer.
1692  Type *OpTy = S->getOperand(i)->getType();
1693  if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1694  Ty = SE.getEffectiveSCEVType(Ty);
1695  LHS = InsertNoopCastOfTo(LHS, Ty);
1696  }
1697  Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1698  Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1699  rememberInstruction(ICmp);
1700  Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1701  rememberInstruction(Sel);
1702  LHS = Sel;
1703  }
1704  // In the case of mixed integer and pointer types, cast the
1705  // final result back to the pointer type.
1706  if (LHS->getType() != S->getType())
1707  LHS = InsertNoopCastOfTo(LHS, S->getType());
1708  return LHS;
1709 }
1710 
1711 Value *SCEVExpander::visitSMinExpr(const SCEVSMinExpr *S) {
1712  Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1713  Type *Ty = LHS->getType();
1714  for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1715  // In the case of mixed integer and pointer types, do the
1716  // rest of the comparisons as integer.
1717  Type *OpTy = S->getOperand(i)->getType();
1718  if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1719  Ty = SE.getEffectiveSCEVType(Ty);
1720  LHS = InsertNoopCastOfTo(LHS, Ty);
1721  }
1722  Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1723  Value *ICmp = Builder.CreateICmpSLT(LHS, RHS);
1724  rememberInstruction(ICmp);
1725  Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smin");
1726  rememberInstruction(Sel);
1727  LHS = Sel;
1728  }
1729  // In the case of mixed integer and pointer types, cast the
1730  // final result back to the pointer type.
1731  if (LHS->getType() != S->getType())
1732  LHS = InsertNoopCastOfTo(LHS, S->getType());
1733  return LHS;
1734 }
1735 
1736 Value *SCEVExpander::visitUMinExpr(const SCEVUMinExpr *S) {
1737  Value *LHS = expand(S->getOperand(S->getNumOperands() - 1));
1738  Type *Ty = LHS->getType();
1739  for (int i = S->getNumOperands() - 2; i >= 0; --i) {
1740  // In the case of mixed integer and pointer types, do the
1741  // rest of the comparisons as integer.
1742  Type *OpTy = S->getOperand(i)->getType();
1743  if (OpTy->isIntegerTy() != Ty->isIntegerTy()) {
1744  Ty = SE.getEffectiveSCEVType(Ty);
1745  LHS = InsertNoopCastOfTo(LHS, Ty);
1746  }
1747  Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1748  Value *ICmp = Builder.CreateICmpULT(LHS, RHS);
1749  rememberInstruction(ICmp);
1750  Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umin");
1751  rememberInstruction(Sel);
1752  LHS = Sel;
1753  }
1754  // In the case of mixed integer and pointer types, cast the
1755  // final result back to the pointer type.
1756  if (LHS->getType() != S->getType())
1757  LHS = InsertNoopCastOfTo(LHS, S->getType());
1758  return LHS;
1759 }
1760 
1762  Instruction *IP) {
1763  setInsertPoint(IP);
1764  return expandCodeFor(SH, Ty);
1765 }
1766 
1768  // Expand the code for this SCEV.
1769  Value *V = expand(SH);
1770  if (Ty) {
1771  assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1772  "non-trivial casts should be done with the SCEVs directly!");
1773  V = InsertNoopCastOfTo(V, Ty);
1774  }
1775  return V;
1776 }
1777 
1778 ScalarEvolution::ValueOffsetPair
1779 SCEVExpander::FindValueInExprValueMap(const SCEV *S,
1780  const Instruction *InsertPt) {
1781  SetVector<ScalarEvolution::ValueOffsetPair> *Set = SE.getSCEVValues(S);
1782  // If the expansion is not in CanonicalMode, and the SCEV contains any
1783  // sub scAddRecExpr type SCEV, it is required to expand the SCEV literally.
1784  if (CanonicalMode || !SE.containsAddRecurrence(S)) {
1785  // If S is scConstant, it may be worse to reuse an existing Value.
1786  if (S->getSCEVType() != scConstant && Set) {
1787  // Choose a Value from the set which dominates the insertPt.
1788  // insertPt should be inside the Value's parent loop so as not to break
1789  // the LCSSA form.
1790  for (auto const &VOPair : *Set) {
1791  Value *V = VOPair.first;
1792  ConstantInt *Offset = VOPair.second;
1793  Instruction *EntInst = nullptr;
1794  if (V && isa<Instruction>(V) && (EntInst = cast<Instruction>(V)) &&
1795  S->getType() == V->getType() &&
1796  EntInst->getFunction() == InsertPt->getFunction() &&
1797  SE.DT.dominates(EntInst, InsertPt) &&
1798  (SE.LI.getLoopFor(EntInst->getParent()) == nullptr ||
1799  SE.LI.getLoopFor(EntInst->getParent())->contains(InsertPt)))
1800  return {V, Offset};
1801  }
1802  }
1803  }
1804  return {nullptr, nullptr};
1805 }
1806 
1807 // The expansion of SCEV will either reuse a previous Value in ExprValueMap,
1808 // or expand the SCEV literally. Specifically, if the expansion is in LSRMode,
1809 // and the SCEV contains any sub scAddRecExpr type SCEV, it will be expanded
1810 // literally, to prevent LSR's transformed SCEV from being reverted. Otherwise,
1811 // the expansion will try to reuse Value from ExprValueMap, and only when it
1812 // fails, expand the SCEV literally.
1813 Value *SCEVExpander::expand(const SCEV *S) {
1814  // Compute an insertion point for this SCEV object. Hoist the instructions
1815  // as far out in the loop nest as possible.
1816  Instruction *InsertPt = &*Builder.GetInsertPoint();
1817 
1818  // We can move insertion point only if there is no div or rem operations
1819  // otherwise we are risky to move it over the check for zero denominator.
1820  auto SafeToHoist = [](const SCEV *S) {
1821  return !SCEVExprContains(S, [](const SCEV *S) {
1822  if (const auto *D = dyn_cast<SCEVUDivExpr>(S)) {
1823  if (const auto *SC = dyn_cast<SCEVConstant>(D->getRHS()))
1824  // Division by non-zero constants can be hoisted.
1825  return SC->getValue()->isZero();
1826  // All other divisions should not be moved as they may be
1827  // divisions by zero and should be kept within the
1828  // conditions of the surrounding loops that guard their
1829  // execution (see PR35406).
1830  return true;
1831  }
1832  return false;
1833  });
1834  };
1835  if (SafeToHoist(S)) {
1836  for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
1837  L = L->getParentLoop()) {
1838  if (SE.isLoopInvariant(S, L)) {
1839  if (!L) break;
1840  if (BasicBlock *Preheader = L->getLoopPreheader())
1841  InsertPt = Preheader->getTerminator();
1842  else
1843  // LSR sets the insertion point for AddRec start/step values to the
1844  // block start to simplify value reuse, even though it's an invalid
1845  // position. SCEVExpander must correct for this in all cases.
1846  InsertPt = &*L->getHeader()->getFirstInsertionPt();
1847  } else {
1848  // If the SCEV is computable at this level, insert it into the header
1849  // after the PHIs (and after any other instructions that we've inserted
1850  // there) so that it is guaranteed to dominate any user inside the loop.
1851  if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1852  InsertPt = &*L->getHeader()->getFirstInsertionPt();
1853  while (InsertPt->getIterator() != Builder.GetInsertPoint() &&
1854  (isInsertedInstruction(InsertPt) ||
1855  isa<DbgInfoIntrinsic>(InsertPt)))
1856  InsertPt = &*std::next(InsertPt->getIterator());
1857  break;
1858  }
1859  }
1860  }
1861 
1862  // IndVarSimplify sometimes sets the insertion point at the block start, even
1863  // when there are PHIs at that point. We must correct for this.
1864  if (isa<PHINode>(*InsertPt))
1865  InsertPt = &*InsertPt->getParent()->getFirstInsertionPt();
1866 
1867  // Check to see if we already expanded this here.
1868  auto I = InsertedExpressions.find(std::make_pair(S, InsertPt));
1869  if (I != InsertedExpressions.end())
1870  return I->second;
1871 
1872  SCEVInsertPointGuard Guard(Builder, this);
1873  Builder.SetInsertPoint(InsertPt);
1874 
1875  // Expand the expression into instructions.
1876  ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, InsertPt);
1877  Value *V = VO.first;
1878 
1879  if (!V)
1880  V = visit(S);
1881  else if (VO.second) {
1882  if (PointerType *Vty = dyn_cast<PointerType>(V->getType())) {
1883  Type *Ety = Vty->getPointerElementType();
1884  int64_t Offset = VO.second->getSExtValue();
1885  int64_t ESize = SE.getTypeSizeInBits(Ety);
1886  if ((Offset * 8) % ESize == 0) {
1887  ConstantInt *Idx =
1888  ConstantInt::getSigned(VO.second->getType(), -(Offset * 8) / ESize);
1889  V = Builder.CreateGEP(Ety, V, Idx, "scevgep");
1890  } else {
1891  ConstantInt *Idx =
1892  ConstantInt::getSigned(VO.second->getType(), -Offset);
1893  unsigned AS = Vty->getAddressSpace();
1894  V = Builder.CreateBitCast(V, Type::getInt8PtrTy(SE.getContext(), AS));
1895  V = Builder.CreateGEP(Type::getInt8Ty(SE.getContext()), V, Idx,
1896  "uglygep");
1897  V = Builder.CreateBitCast(V, Vty);
1898  }
1899  } else {
1900  V = Builder.CreateSub(V, VO.second);
1901  }
1902  }
1903  // Remember the expanded value for this SCEV at this location.
1904  //
1905  // This is independent of PostIncLoops. The mapped value simply materializes
1906  // the expression at this insertion point. If the mapped value happened to be
1907  // a postinc expansion, it could be reused by a non-postinc user, but only if
1908  // its insertion point was already at the head of the loop.
1909  InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1910  return V;
1911 }
1912 
1913 void SCEVExpander::rememberInstruction(Value *I) {
1914  if (!PostIncLoops.empty())
1915  InsertedPostIncValues.insert(I);
1916  else
1917  InsertedValues.insert(I);
1918 }
1919 
1920 /// getOrInsertCanonicalInductionVariable - This method returns the
1921 /// canonical induction variable of the specified type for the specified
1922 /// loop (inserting one if there is none). A canonical induction variable
1923 /// starts at zero and steps by one on each iteration.
1924 PHINode *
1926  Type *Ty) {
1927  assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1928 
1929  // Build a SCEV for {0,+,1}<L>.
1930  // Conservatively use FlagAnyWrap for now.
1931  const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1932  SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1933 
1934  // Emit code for it.
1935  SCEVInsertPointGuard Guard(Builder, this);
1936  PHINode *V =
1937  cast<PHINode>(expandCodeFor(H, nullptr, &L->getHeader()->front()));
1938 
1939  return V;
1940 }
1941 
1942 /// replaceCongruentIVs - Check for congruent phis in this loop header and
1943 /// replace them with their most canonical representative. Return the number of
1944 /// phis eliminated.
1945 ///
1946 /// This does not depend on any SCEVExpander state but should be used in
1947 /// the same context that SCEVExpander is used.
1948 unsigned
1951  const TargetTransformInfo *TTI) {
1952  // Find integer phis in order of increasing width.
1954  for (PHINode &PN : L->getHeader()->phis())
1955  Phis.push_back(&PN);
1956 
1957  if (TTI)
1958  llvm::sort(Phis, [](Value *LHS, Value *RHS) {
1959  // Put pointers at the back and make sure pointer < pointer = false.
1960  if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
1961  return RHS->getType()->isIntegerTy() && !LHS->getType()->isIntegerTy();
1962  return RHS->getType()->getPrimitiveSizeInBits() <
1963  LHS->getType()->getPrimitiveSizeInBits();
1964  });
1965 
1966  unsigned NumElim = 0;
1968  // Process phis from wide to narrow. Map wide phis to their truncation
1969  // so narrow phis can reuse them.
1970  for (PHINode *Phi : Phis) {
1971  auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
1972  if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
1973  return V;
1974  if (!SE.isSCEVable(PN->getType()))
1975  return nullptr;
1976  auto *Const = dyn_cast<SCEVConstant>(SE.getSCEV(PN));
1977  if (!Const)
1978  return nullptr;
1979  return Const->getValue();
1980  };
1981 
1982  // Fold constant phis. They may be congruent to other constant phis and
1983  // would confuse the logic below that expects proper IVs.
1984  if (Value *V = SimplifyPHINode(Phi)) {
1985  if (V->getType() != Phi->getType())
1986  continue;
1987  Phi->replaceAllUsesWith(V);
1988  DeadInsts.emplace_back(Phi);
1989  ++NumElim;
1991  << "INDVARS: Eliminated constant iv: " << *Phi << '\n');
1992  continue;
1993  }
1994 
1995  if (!SE.isSCEVable(Phi->getType()))
1996  continue;
1997 
1998  PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1999  if (!OrigPhiRef) {
2000  OrigPhiRef = Phi;
2001  if (Phi->getType()->isIntegerTy() && TTI &&
2002  TTI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
2003  // This phi can be freely truncated to the narrowest phi type. Map the
2004  // truncated expression to it so it will be reused for narrow types.
2005  const SCEV *TruncExpr =
2006  SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
2007  ExprToIVMap[TruncExpr] = Phi;
2008  }
2009  continue;
2010  }
2011 
2012  // Replacing a pointer phi with an integer phi or vice-versa doesn't make
2013  // sense.
2014  if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
2015  continue;
2016 
2017  if (BasicBlock *LatchBlock = L->getLoopLatch()) {
2018  Instruction *OrigInc = dyn_cast<Instruction>(
2019  OrigPhiRef->getIncomingValueForBlock(LatchBlock));
2020  Instruction *IsomorphicInc =
2021  dyn_cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
2022 
2023  if (OrigInc && IsomorphicInc) {
2024  // If this phi has the same width but is more canonical, replace the
2025  // original with it. As part of the "more canonical" determination,
2026  // respect a prior decision to use an IV chain.
2027  if (OrigPhiRef->getType() == Phi->getType() &&
2028  !(ChainedPhis.count(Phi) ||
2029  isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L)) &&
2030  (ChainedPhis.count(Phi) ||
2031  isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
2032  std::swap(OrigPhiRef, Phi);
2033  std::swap(OrigInc, IsomorphicInc);
2034  }
2035  // Replacing the congruent phi is sufficient because acyclic
2036  // redundancy elimination, CSE/GVN, should handle the
2037  // rest. However, once SCEV proves that a phi is congruent,
2038  // it's often the head of an IV user cycle that is isomorphic
2039  // with the original phi. It's worth eagerly cleaning up the
2040  // common case of a single IV increment so that DeleteDeadPHIs
2041  // can remove cycles that had postinc uses.
2042  const SCEV *TruncExpr =
2043  SE.getTruncateOrNoop(SE.getSCEV(OrigInc), IsomorphicInc->getType());
2044  if (OrigInc != IsomorphicInc &&
2045  TruncExpr == SE.getSCEV(IsomorphicInc) &&
2046  SE.LI.replacementPreservesLCSSAForm(IsomorphicInc, OrigInc) &&
2047  hoistIVInc(OrigInc, IsomorphicInc)) {
2049  dbgs() << "INDVARS: Eliminated congruent iv.inc: "
2050  << *IsomorphicInc << '\n');
2051  Value *NewInc = OrigInc;
2052  if (OrigInc->getType() != IsomorphicInc->getType()) {
2053  Instruction *IP = nullptr;
2054  if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
2055  IP = &*PN->getParent()->getFirstInsertionPt();
2056  else
2057  IP = OrigInc->getNextNode();
2058 
2059  IRBuilder<> Builder(IP);
2060  Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
2061  NewInc = Builder.CreateTruncOrBitCast(
2062  OrigInc, IsomorphicInc->getType(), IVName);
2063  }
2064  IsomorphicInc->replaceAllUsesWith(NewInc);
2065  DeadInsts.emplace_back(IsomorphicInc);
2066  }
2067  }
2068  }
2069  DEBUG_WITH_TYPE(DebugType, dbgs() << "INDVARS: Eliminated congruent iv: "
2070  << *Phi << '\n');
2071  ++NumElim;
2072  Value *NewIV = OrigPhiRef;
2073  if (OrigPhiRef->getType() != Phi->getType()) {
2074  IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
2075  Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
2076  NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
2077  }
2078  Phi->replaceAllUsesWith(NewIV);
2079  DeadInsts.emplace_back(Phi);
2080  }
2081  return NumElim;
2082 }
2083 
2085  const Instruction *At, Loop *L) {
2087  getRelatedExistingExpansion(S, At, L);
2088  if (VO && VO.getValue().second == nullptr)
2089  return VO.getValue().first;
2090  return nullptr;
2091 }
2092 
2095  Loop *L) {
2096  using namespace llvm::PatternMatch;
2097 
2098  SmallVector<BasicBlock *, 4> ExitingBlocks;
2099  L->getExitingBlocks(ExitingBlocks);
2100 
2101  // Look for suitable value in simple conditions at the loop exits.
2102  for (BasicBlock *BB : ExitingBlocks) {
2103  ICmpInst::Predicate Pred;
2104  Instruction *LHS, *RHS;
2105 
2106  if (!match(BB->getTerminator(),
2107  m_Br(m_ICmp(Pred, m_Instruction(LHS), m_Instruction(RHS)),
2108  m_BasicBlock(), m_BasicBlock())))
2109  continue;
2110 
2111  if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At))
2112  return ScalarEvolution::ValueOffsetPair(LHS, nullptr);
2113 
2114  if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At))
2115  return ScalarEvolution::ValueOffsetPair(RHS, nullptr);
2116  }
2117 
2118  // Use expand's logic which is used for reusing a previous Value in
2119  // ExprValueMap.
2120  ScalarEvolution::ValueOffsetPair VO = FindValueInExprValueMap(S, At);
2121  if (VO.first)
2122  return VO;
2123 
2124  // There is potential to make this significantly smarter, but this simple
2125  // heuristic already gets some interesting cases.
2126 
2127  // Can not find suitable value.
2128  return None;
2129 }
2130 
2131 bool SCEVExpander::isHighCostExpansionHelper(
2132  const SCEV *S, Loop *L, const Instruction *At,
2133  SmallPtrSetImpl<const SCEV *> &Processed) {
2134 
2135  // If we can find an existing value for this scev available at the point "At"
2136  // then consider the expression cheap.
2137  if (At && getRelatedExistingExpansion(S, At, L))
2138  return false;
2139 
2140  // Zero/One operand expressions
2141  switch (S->getSCEVType()) {
2142  case scUnknown:
2143  case scConstant:
2144  return false;
2145  case scTruncate:
2146  return isHighCostExpansionHelper(cast<SCEVTruncateExpr>(S)->getOperand(),
2147  L, At, Processed);
2148  case scZeroExtend:
2149  return isHighCostExpansionHelper(cast<SCEVZeroExtendExpr>(S)->getOperand(),
2150  L, At, Processed);
2151  case scSignExtend:
2152  return isHighCostExpansionHelper(cast<SCEVSignExtendExpr>(S)->getOperand(),
2153  L, At, Processed);
2154  }
2155 
2156  if (!Processed.insert(S).second)
2157  return false;
2158 
2159  if (auto *UDivExpr = dyn_cast<SCEVUDivExpr>(S)) {
2160  // If the divisor is a power of two and the SCEV type fits in a native
2161  // integer (and the LHS not expensive), consider the division cheap
2162  // irrespective of whether it occurs in the user code since it can be
2163  // lowered into a right shift.
2164  if (auto *SC = dyn_cast<SCEVConstant>(UDivExpr->getRHS()))
2165  if (SC->getAPInt().isPowerOf2()) {
2166  if (isHighCostExpansionHelper(UDivExpr->getLHS(), L, At, Processed))
2167  return true;
2168  const DataLayout &DL =
2170  unsigned Width = cast<IntegerType>(UDivExpr->getType())->getBitWidth();
2171  return DL.isIllegalInteger(Width);
2172  }
2173 
2174  // UDivExpr is very likely a UDiv that ScalarEvolution's HowFarToZero or
2175  // HowManyLessThans produced to compute a precise expression, rather than a
2176  // UDiv from the user's code. If we can't find a UDiv in the code with some
2177  // simple searching, assume the former consider UDivExpr expensive to
2178  // compute.
2179  BasicBlock *ExitingBB = L->getExitingBlock();
2180  if (!ExitingBB)
2181  return true;
2182 
2183  // At the beginning of this function we already tried to find existing value
2184  // for plain 'S'. Now try to lookup 'S + 1' since it is common pattern
2185  // involving division. This is just a simple search heuristic.
2186  if (!At)
2187  At = &ExitingBB->back();
2188  if (!getRelatedExistingExpansion(
2189  SE.getAddExpr(S, SE.getConstant(S->getType(), 1)), At, L))
2190  return true;
2191  }
2192 
2193  // HowManyLessThans uses a Max expression whenever the loop is not guarded by
2194  // the exit condition.
2195  if (isa<SCEVMinMaxExpr>(S))
2196  return true;
2197 
2198  // Recurse past nary expressions, which commonly occur in the
2199  // BackedgeTakenCount. They may already exist in program code, and if not,
2200  // they are not too expensive rematerialize.
2201  if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(S)) {
2202  for (auto *Op : NAry->operands())
2203  if (isHighCostExpansionHelper(Op, L, At, Processed))
2204  return true;
2205  }
2206 
2207  // If we haven't recognized an expensive SCEV pattern, assume it's an
2208  // expression produced by program code.
2209  return false;
2210 }
2211 
2213  Instruction *IP) {
2214  assert(IP);
2215  switch (Pred->getKind()) {
2217  return expandUnionPredicate(cast<SCEVUnionPredicate>(Pred), IP);
2219  return expandEqualPredicate(cast<SCEVEqualPredicate>(Pred), IP);
2220  case SCEVPredicate::P_Wrap: {
2221  auto *AddRecPred = cast<SCEVWrapPredicate>(Pred);
2222  return expandWrapPredicate(AddRecPred, IP);
2223  }
2224  }
2225  llvm_unreachable("Unknown SCEV predicate type");
2226 }
2227 
2229  Instruction *IP) {
2230  Value *Expr0 = expandCodeFor(Pred->getLHS(), Pred->getLHS()->getType(), IP);
2231  Value *Expr1 = expandCodeFor(Pred->getRHS(), Pred->getRHS()->getType(), IP);
2232 
2233  Builder.SetInsertPoint(IP);
2234  auto *I = Builder.CreateICmpNE(Expr0, Expr1, "ident.check");
2235  return I;
2236 }
2237 
2239  Instruction *Loc, bool Signed) {
2240  assert(AR->isAffine() && "Cannot generate RT check for "
2241  "non-affine expression");
2242 
2243  SCEVUnionPredicate Pred;
2244  const SCEV *ExitCount =
2245  SE.getPredicatedBackedgeTakenCount(AR->getLoop(), Pred);
2246 
2247  assert(ExitCount != SE.getCouldNotCompute() && "Invalid loop count");
2248 
2249  const SCEV *Step = AR->getStepRecurrence(SE);
2250  const SCEV *Start = AR->getStart();
2251 
2252  Type *ARTy = AR->getType();
2253  unsigned SrcBits = SE.getTypeSizeInBits(ExitCount->getType());
2254  unsigned DstBits = SE.getTypeSizeInBits(ARTy);
2255 
2256  // The expression {Start,+,Step} has nusw/nssw if
2257  // Step < 0, Start - |Step| * Backedge <= Start
2258  // Step >= 0, Start + |Step| * Backedge > Start
2259  // and |Step| * Backedge doesn't unsigned overflow.
2260 
2261  IntegerType *CountTy = IntegerType::get(Loc->getContext(), SrcBits);
2262  Builder.SetInsertPoint(Loc);
2263  Value *TripCountVal = expandCodeFor(ExitCount, CountTy, Loc);
2264 
2265  IntegerType *Ty =
2266  IntegerType::get(Loc->getContext(), SE.getTypeSizeInBits(ARTy));
2267  Type *ARExpandTy = DL.isNonIntegralPointerType(ARTy) ? ARTy : Ty;
2268 
2269  Value *StepValue = expandCodeFor(Step, Ty, Loc);
2270  Value *NegStepValue = expandCodeFor(SE.getNegativeSCEV(Step), Ty, Loc);
2271  Value *StartValue = expandCodeFor(Start, ARExpandTy, Loc);
2272 
2273  ConstantInt *Zero =
2275 
2276  Builder.SetInsertPoint(Loc);
2277  // Compute |Step|
2278  Value *StepCompare = Builder.CreateICmp(ICmpInst::ICMP_SLT, StepValue, Zero);
2279  Value *AbsStep = Builder.CreateSelect(StepCompare, NegStepValue, StepValue);
2280 
2281  // Get the backedge taken count and truncate or extended to the AR type.
2282  Value *TruncTripCount = Builder.CreateZExtOrTrunc(TripCountVal, Ty);
2283  auto *MulF = Intrinsic::getDeclaration(Loc->getModule(),
2284  Intrinsic::umul_with_overflow, Ty);
2285 
2286  // Compute |Step| * Backedge
2287  CallInst *Mul = Builder.CreateCall(MulF, {AbsStep, TruncTripCount}, "mul");
2288  Value *MulV = Builder.CreateExtractValue(Mul, 0, "mul.result");
2289  Value *OfMul = Builder.CreateExtractValue(Mul, 1, "mul.overflow");
2290 
2291  // Compute:
2292  // Start + |Step| * Backedge < Start
2293  // Start - |Step| * Backedge > Start
2294  Value *Add = nullptr, *Sub = nullptr;
2295  if (PointerType *ARPtrTy = dyn_cast<PointerType>(ARExpandTy)) {
2296  const SCEV *MulS = SE.getSCEV(MulV);
2297  const SCEV *NegMulS = SE.getNegativeSCEV(MulS);
2298  Add = Builder.CreateBitCast(expandAddToGEP(MulS, ARPtrTy, Ty, StartValue),
2299  ARPtrTy);
2300  Sub = Builder.CreateBitCast(
2301  expandAddToGEP(NegMulS, ARPtrTy, Ty, StartValue), ARPtrTy);
2302  } else {
2303  Add = Builder.CreateAdd(StartValue, MulV);
2304  Sub = Builder.CreateSub(StartValue, MulV);
2305  }
2306 
2307  Value *EndCompareGT = Builder.CreateICmp(
2308  Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT, Sub, StartValue);
2309 
2310  Value *EndCompareLT = Builder.CreateICmp(
2311  Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, Add, StartValue);
2312 
2313  // Select the answer based on the sign of Step.
2314  Value *EndCheck =
2315  Builder.CreateSelect(StepCompare, EndCompareGT, EndCompareLT);
2316 
2317  // If the backedge taken count type is larger than the AR type,
2318  // check that we don't drop any bits by truncating it. If we are
2319  // dropping bits, then we have overflow (unless the step is zero).
2320  if (SE.getTypeSizeInBits(CountTy) > SE.getTypeSizeInBits(Ty)) {
2321  auto MaxVal = APInt::getMaxValue(DstBits).zext(SrcBits);
2322  auto *BackedgeCheck =
2323  Builder.CreateICmp(ICmpInst::ICMP_UGT, TripCountVal,
2324  ConstantInt::get(Loc->getContext(), MaxVal));
2325  BackedgeCheck = Builder.CreateAnd(
2326  BackedgeCheck, Builder.CreateICmp(ICmpInst::ICMP_NE, StepValue, Zero));
2327 
2328  EndCheck = Builder.CreateOr(EndCheck, BackedgeCheck);
2329  }
2330 
2331  EndCheck = Builder.CreateOr(EndCheck, OfMul);
2332  return EndCheck;
2333 }
2334 
2336  Instruction *IP) {
2337  const auto *A = cast<SCEVAddRecExpr>(Pred->getExpr());
2338  Value *NSSWCheck = nullptr, *NUSWCheck = nullptr;
2339 
2340  // Add a check for NUSW
2342  NUSWCheck = generateOverflowCheck(A, IP, false);
2343 
2344  // Add a check for NSSW
2346  NSSWCheck = generateOverflowCheck(A, IP, true);
2347 
2348  if (NUSWCheck && NSSWCheck)
2349  return Builder.CreateOr(NUSWCheck, NSSWCheck);
2350 
2351  if (NUSWCheck)
2352  return NUSWCheck;
2353 
2354  if (NSSWCheck)
2355  return NSSWCheck;
2356 
2357  return ConstantInt::getFalse(IP->getContext());
2358 }
2359 
2361  Instruction *IP) {
2362  auto *BoolType = IntegerType::get(IP->getContext(), 1);
2363  Value *Check = ConstantInt::getNullValue(BoolType);
2364 
2365  // Loop over all checks in this set.
2366  for (auto Pred : Union->getPredicates()) {
2367  auto *NextCheck = expandCodeForPredicate(Pred, IP);
2368  Builder.SetInsertPoint(IP);
2369  Check = Builder.CreateOr(Check, NextCheck);
2370  }
2371 
2372  return Check;
2373 }
2374 
2375 namespace {
2376 // Search for a SCEV subexpression that is not safe to expand. Any expression
2377 // that may expand to a !isSafeToSpeculativelyExecute value is unsafe, namely
2378 // UDiv expressions. We don't know if the UDiv is derived from an IR divide
2379 // instruction, but the important thing is that we prove the denominator is
2380 // nonzero before expansion.
2381 //
2382 // IVUsers already checks that IV-derived expressions are safe. So this check is
2383 // only needed when the expression includes some subexpression that is not IV
2384 // derived.
2385 //
2386 // Currently, we only allow division by a nonzero constant here. If this is
2387 // inadequate, we could easily allow division by SCEVUnknown by using
2388 // ValueTracking to check isKnownNonZero().
2389 //
2390 // We cannot generally expand recurrences unless the step dominates the loop
2391 // header. The expander handles the special case of affine recurrences by
2392 // scaling the recurrence outside the loop, but this technique isn't generally
2393 // applicable. Expanding a nested recurrence outside a loop requires computing
2394 // binomial coefficients. This could be done, but the recurrence has to be in a
2395 // perfectly reduced form, which can't be guaranteed.
2396 struct SCEVFindUnsafe {
2397  ScalarEvolution &SE;
2398  bool IsUnsafe;
2399 
2400  SCEVFindUnsafe(ScalarEvolution &se): SE(se), IsUnsafe(false) {}
2401 
2402  bool follow(const SCEV *S) {
2403  if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2404  const SCEVConstant *SC = dyn_cast<SCEVConstant>(D->getRHS());
2405  if (!SC || SC->getValue()->isZero()) {
2406  IsUnsafe = true;
2407  return false;
2408  }
2409  }
2410  if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2411  const SCEV *Step = AR->getStepRecurrence(SE);
2412  if (!AR->isAffine() && !SE.dominates(Step, AR->getLoop()->getHeader())) {
2413  IsUnsafe = true;
2414  return false;
2415  }
2416  }
2417  return true;
2418  }
2419  bool isDone() const { return IsUnsafe; }
2420 };
2421 }
2422 
2423 namespace llvm {
2424 bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE) {
2425  SCEVFindUnsafe Search(SE);
2426  visitAll(S, Search);
2427  return !Search.IsUnsafe;
2428 }
2429 
2430 bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint,
2431  ScalarEvolution &SE) {
2432  if (!isSafeToExpand(S, SE))
2433  return false;
2434  // We have to prove that the expanded site of S dominates InsertionPoint.
2435  // This is easy when not in the same block, but hard when S is an instruction
2436  // to be expanded somewhere inside the same block as our insertion point.
2437  // What we really need here is something analogous to an OrderedBasicBlock,
2438  // but for the moment, we paper over the problem by handling two common and
2439  // cheap to check cases.
2440  if (SE.properlyDominates(S, InsertionPoint->getParent()))
2441  return true;
2442  if (SE.dominates(S, InsertionPoint->getParent())) {
2443  if (InsertionPoint->getParent()->getTerminator() == InsertionPoint)
2444  return true;
2445  if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S))
2446  for (const Value *V : InsertionPoint->operand_values())
2447  if (V == U->getValue())
2448  return true;
2449  }
2450  return false;
2451 }
2452 }
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type.
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
static bool Check(DecodeStatus &Out, DecodeStatus In)
const NoneType None
Definition: None.h:23
uint64_t CallInst * C
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
bool hoistIVInc(Instruction *IncV, Instruction *InsertPos)
Utility for hoisting an IV increment.
static ConstantInt * getFalse(LLVMContext &Context)
Definition: Constants.cpp:616
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:641
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static IntegerType * getInt1Ty(LLVMContext &C)
Definition: Type.cpp:177
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
Definition: LoopInfoImpl.h:209
Value * getExactExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Try to find existing LLVM IR value for S available at the point At.
This class represents an incoming formal argument to a Function.
Definition: Argument.h:29
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:288
const SCEV * getConstant(ConstantInt *V)
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
This class represents lattice values for constants.
Definition: AllocatorList.h:23
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant *> IdxList, bool InBounds=false, Optional< unsigned > InRangeIndex=None, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition: Constants.h:1153
bool isSized(SmallPtrSetImpl< Type *> *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
Definition: Type.h:265
bool isSafeToExpandAt(const SCEV *S, const Instruction *InsertionPoint, ScalarEvolution &SE)
Return true if the given expression is safe to expand in the sense that all materialized values are d...
const SCEV * normalizeForPostIncUse(const SCEV *S, const PostIncLoopSet &Loops, ScalarEvolution &SE)
Normalize S to be post-increment for all loops present in Loops.
The main scalar evolution driver.
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:912
bool isZero() const
Return true if the expression is a constant zero.
This class represents a function call, abstracting a target machine&#39;s calling convention.
BlockT * getLoopPreheader() const
If there is a preheader for this loop, return it.
Definition: LoopInfoImpl.h:160
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space...
Definition: Type.cpp:637
unsigned less than
Definition: InstrTypes.h:757
bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block...
Optional< ScalarEvolution::ValueOffsetPair > getRelatedExistingExpansion(const SCEV *S, const Instruction *At, Loop *L)
Try to find the ValueOffsetPair for S.
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:743
This class represents a truncation of an integer value to a smaller integer value.
Value * expandWrapPredicate(const SCEVWrapPredicate *P, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
A debug info location.
Definition: DebugLoc.h:33
const SCEV * getOperand() const
Hexagon Common GEP
static void SimplifyAddOperands(SmallVectorImpl< const SCEV *> &Ops, Type *Ty, ScalarEvolution &SE)
SimplifyAddOperands - Sort and simplify a list of add operands.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:144
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:230
op_iterator op_begin()
Definition: User.h:229
unsigned getElementContainingOffset(uint64_t Offset) const
Given a valid byte offset into the structure, returns the structure index that contains it...
Definition: DataLayout.cpp:80
This is the base class for unary cast operator classes.
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1517
return AArch64::GPR64RegClass contains(Reg)
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:130
static Constant * getNullValue(Type *Ty)
Constructor to create a &#39;0&#39; constant of arbitrary type.
Definition: Constants.cpp:289
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:343
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:273
#define DEBUG_WITH_TYPE(TYPE, X)
DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug information.
Definition: Debug.h:64
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:47
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:585
Hexagon Hardware Loops
Value * expandCodeForPredicate(const SCEVPredicate *Pred, Instruction *Loc)
Generates a code sequence that evaluates this predicate.
Type * getPointerElementType() const
Definition: Type.h:381
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:369
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:80
static const Loop * PickMostRelevantLoop(const Loop *A, const Loop *B, DominatorTree &DT)
PickMostRelevantLoop - Given two loops pick the one that&#39;s most relevant for SCEV expansion...
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:439
Class to represent struct types.
Definition: DerivedTypes.h:238
A Use represents the edge between a Value definition and its users.
Definition: Use.h:55
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:41
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
LLVMContext & getContext() const
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:779
This node represents multiplication of some number of SCEVs.
Value * generateOverflowCheck(const SCEVAddRecExpr *AR, Instruction *Loc, bool Signed)
Generates code that evaluates if the AR expression will overflow.
const APInt & getAPInt() const
BlockT * getHeader() const
Definition: LoopInfo.h:105
ConstantInt * getValue() const
A constant value that is initialized with an expression using other constant values.
Definition: Constants.h:888
#define UINT64_MAX
Definition: DataTypes.h:83
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:246
bool isTruncateFree(Type *Ty1, Type *Ty2) const
Return true if it&#39;s free to truncate a value of type Ty1 to type Ty2.
This node represents a polynomial recurrence on the trip count of the specified loop.
const T & getValue() const LLVM_LVALUE_FUNCTION
Definition: Optional.h:255
Class to represent array types.
Definition: DerivedTypes.h:408
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
op_iterator op_begin() const
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:156
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
Definition: PatternMatch.h:412
This class represents a signed minimum selection.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:291
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree...
Definition: Dominators.h:144
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1093
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
static BinaryOperator * CreateAdd(Value *S1, Value *S2, const Twine &Name, Instruction *InsertBefore, Value *FlagsOp)
Value * getOperand(unsigned i) const
Definition: User.h:169
Class to represent pointers.
Definition: DerivedTypes.h:579
#define P(N)
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
const SCEV * getOperand(unsigned i) const
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
Definition: BasicBlock.cpp:223
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:328
SCEVPredicateKind getKind() const
LLVM Basic Block Representation.
Definition: BasicBlock.h:57
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
This class represents a binary unsigned division operation.
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an unsigned minimum selection.
Value * getIncomingValueForBlock(const BasicBlock *BB) const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:134
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:224
const Instruction & front() const
Definition: BasicBlock.h:285
#define H(x, y, z)
Definition: MD5.cpp:57
const SCEV * getExpr() const override
Implementation of the SCEVPredicate interface.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:370
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
Definition: Instruction.h:582
const SCEV * getAddExpr(SmallVectorImpl< const SCEV *> &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getLHS() const
Interval::pred_iterator pred_begin(Interval *I)
pred_begin/pred_end - define methods so that Intervals may be used just like BasicBlocks can with the...
Definition: Interval.h:112
op_iterator op_end()
Definition: User.h:231
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:1172
const Instruction & back() const
Definition: BasicBlock.h:287
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:732
static bool FactorOutConstant(const SCEV *&S, const SCEV *&Remainder, const SCEV *Factor, ScalarEvolution &SE, const DataLayout &DL)
FactorOutConstant - Test if S is divisible by Factor, using signed division.
constexpr double e
Definition: MathExtras.h:57
Value * expandCodeFor(const SCEV *SH, Type *Ty, Instruction *I)
Insert code to directly compute the specified SCEV expression into the program.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Interval::pred_iterator pred_end(Interval *I)
Definition: Interval.h:115
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:607
self_iterator getIterator()
Definition: ilist_node.h:81
Class to represent integer types.
Definition: DerivedTypes.h:40
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn&#39;t already there.
Definition: SmallSet.h:180
static LLVM_NODISCARD SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
static Expected< BitVector > expand(StringRef S, StringRef Original)
Definition: GlobPattern.cpp:27
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:59
const SCEV * getLHS() const
Returns the left hand side of the equality.
void getExitingBlocks(SmallVectorImpl< BlockT *> &ExitingBlocks) const
Return all blocks inside the loop that have successors outside of the loop.
Definition: LoopInfoImpl.h:34
const SCEV * getRHS() const
Returns the right hand side of the equality.
size_t size() const
Definition: SmallVector.h:52
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:224
const SCEV * getMulExpr(SmallVectorImpl< const SCEV *> &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static void SplitAddRecs(SmallVectorImpl< const SCEV *> &Ops, Type *Ty, ScalarEvolution &SE)
SplitAddRecs - Flatten a list of add operands, moving addrec start values out to the top level...
signed greater than
Definition: InstrTypes.h:759
This class represents an assumption made using SCEV expressions which can be checked at run-time...
static bool IsIncrementNSW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
void sort(IteratorTy Start, IteratorTy End)
Definition: STLExtras.h:1095
static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, ScalarEvolution &SE)
Move parts of Base into Rest to leave Base with the minimal expression that provides a pointer operan...
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:244
void setHasNoSignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag...
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
Definition: LoopInfo.h:115
unsigned getSCEVType() const
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
unsigned getNumOperands() const
Definition: User.h:191
static PointerType * getInt1PtrTy(LLVMContext &C, unsigned AS=0)
Definition: Type.cpp:220
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
Type * getType() const
Return the LLVM type of this SCEV expression.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
PHINode * getOrInsertCanonicalInductionVariable(const Loop *L, Type *Ty)
This method returns the canonical induction variable of the specified type for the specified loop (in...
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:837
bool dominates(const Instruction *Def, const Use &U) const
Return true if Def dominates a use in User.
Definition: Dominators.cpp:248
Module.h This file contains the declarations for the Module class.
signed less than
Definition: InstrTypes.h:761
uint64_t getSizeInBytes() const
Definition: DataLayout.h:593
CHAIN = SC CHAIN, Imm128 - System call.
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:653
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition: Constants.cpp:667
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
unsigned logBase2() const
Definition: APInt.h:1756
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:940
PHINode * getCanonicalInductionVariable() const
Check to see if the loop has a canonical induction variable: an integer recurrence that starts at 0 a...
Definition: LoopInfo.cpp:146
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:55
Class for arbitrary precision integers.
Definition: APInt.h:69
This node represents an addition of some number of SCEVs.
static BasicBlock::iterator findInsertPointAfter(Instruction *I, BasicBlock *MustDominate)
bool isPowerOf2() const
Check if this APInt&#39;s value is a power of two greater than zero.
Definition: APInt.h:463
This class represents a signed maximum selection.
iterator_range< user_iterator > users()
Definition: Value.h:420
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:89
Value * expandUnionPredicate(const SCEVUnionPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
static Constant * getCast(unsigned ops, Constant *C, Type *Ty, bool OnlyIfReduced=false)
Convenience function for getting a Cast operation.
Definition: Constants.cpp:1561
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
Definition: APInt.h:529
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:387
This class represents a zero extension of a small integer value to a larger integer value...
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1991
static bool IsIncrementNUW(ScalarEvolution &SE, const SCEVAddRecExpr *AR)
LoopT * getParentLoop() const
Definition: LoopInfo.h:106
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass&#39;s ...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:331
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:607
This class represents an analyzed expression in the program.
static IntegerType * getInt32Ty(LLVMContext &C)
Definition: Type.cpp:180
unsigned getIntegerBitWidth() const
Definition: DerivedTypes.h:102
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:55
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:509
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:214
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:106
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
#define I(x, y, z)
Definition: MD5.cpp:58
#define N
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
Definition: Constants.h:192
iterator_range< value_op_iterator > operand_values()
Definition: User.h:261
This class represents a sign extension of a small integer value to a larger integer value...
This class represents an unsigned maximum selection.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:332
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
Definition: PatternMatch.h:92
Instruction * getIVIncOperand(Instruction *IncV, Instruction *InsertPos, bool allowScale)
Return the induction variable increment&#39;s IV operand.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:329
const SCEV * getRHS() const
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT, SmallVectorImpl< WeakTrackingVH > &DeadInsts, const TargetTransformInfo *TTI=nullptr)
replace congruent phis with their most canonical representative.
const SmallVectorImpl< const SCEVPredicate * > & getPredicates() const
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nuw flag on this instruction, which must be an operator which supports this flag...
DebugType
Definition: COFF.h:645
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This class represents a composition of other SCEV predicates, and is the class that most clients will...
bool isOne() const
Return true if the expression is a constant one.
void stable_sort(R &&Range)
Definition: STLExtras.h:1289
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:575
Value * expandEqualPredicate(const SCEVEqualPredicate *Pred, Instruction *Loc)
A specialized variant of expandCodeForPredicate, handling the case when we are expanding code for a S...
LLVM Value Representation.
Definition: Value.h:74
A vector that has set insertion semantics.
Definition: SetVector.h:40
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Definition: Instruction.cpp:86
static Value * SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block...
unsigned greater than
Definition: InstrTypes.h:755
This pass exposes codegen information to IR-level passes.
bool isIllegalInteger(uint64_t Width) const
Definition: DataLayout.h:261
static APInt getNullValue(unsigned numBits)
Get the &#39;0&#39; value.
Definition: APInt.h:568
This node is a base class providing common functionality for n&#39;ary operators.
This class represents an assumption made on an AddRec expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
bool isSafeToExpand(const SCEV *S, ScalarEvolution &SE)
Return true if the given expression is safe to expand in the sense that all materialized values are s...
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
BlockT * getExitingBlock() const
If getExitingBlocks would return exactly one block, return that block.
Definition: LoopInfoImpl.h:49
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
This class represents an assumption that two SCEV expressions are equal, and this can be checked at r...
static IntegerType * getInt8Ty(LLVMContext &C)
Definition: Type.cpp:178
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
Type * getElementType() const
Definition: DerivedTypes.h:598
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
Definition: PatternMatch.h:558
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
const BasicBlock * getParent() const
Definition: Instruction.h:66
static bool canBeCheaplyTransformed(ScalarEvolution &SE, const SCEVAddRecExpr *Phi, const SCEVAddRecExpr *Requested, bool &InvertStep)
Check whether we can cheaply express the requested SCEV in terms of the available PHI SCEV by truncat...
This class represents a constant integer value.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
static Constant * get(unsigned Opcode, Constant *C1, unsigned Flags=0, Type *OnlyIfReducedTy=nullptr)
get - Return a unary operator constant expression, folding if possible.
Definition: Constants.cpp:1837