LLVM  6.0.0svn
InstCombinePHI.cpp
Go to the documentation of this file.
1 //===- InstCombinePHI.cpp -------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visitPHINode function.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/IR/DebugInfo.h"
20 #include "llvm/IR/PatternMatch.h"
22 using namespace llvm;
23 using namespace llvm::PatternMatch;
24 
25 #define DEBUG_TYPE "instcombine"
26 
27 /// The PHI arguments will be folded into a single operation with a PHI node
28 /// as input. The debug location of the single operation will be the merged
29 /// locations of the original PHI node arguments.
30 DebugLoc InstCombiner::PHIArgMergedDebugLoc(PHINode &PN) {
31  auto *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
32  const DILocation *Loc = FirstInst->getDebugLoc();
33 
34  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
35  auto *I = cast<Instruction>(PN.getIncomingValue(i));
36  Loc = DILocation::getMergedLocation(Loc, I->getDebugLoc());
37  }
38 
39  return Loc;
40 }
41 
42 /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
43 /// adds all have a single use, turn this into a phi and a single binop.
44 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
45  Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
46  assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
47  unsigned Opc = FirstInst->getOpcode();
48  Value *LHSVal = FirstInst->getOperand(0);
49  Value *RHSVal = FirstInst->getOperand(1);
50 
51  Type *LHSType = LHSVal->getType();
52  Type *RHSType = RHSVal->getType();
53 
54  // Scan to see if all operands are the same opcode, and all have one use.
55  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
57  if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
58  // Verify type of the LHS matches so we don't fold cmp's of different
59  // types.
60  I->getOperand(0)->getType() != LHSType ||
61  I->getOperand(1)->getType() != RHSType)
62  return nullptr;
63 
64  // If they are CmpInst instructions, check their predicates
65  if (CmpInst *CI = dyn_cast<CmpInst>(I))
66  if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
67  return nullptr;
68 
69  // Keep track of which operand needs a phi node.
70  if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
71  if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
72  }
73 
74  // If both LHS and RHS would need a PHI, don't do this transformation,
75  // because it would increase the number of PHIs entering the block,
76  // which leads to higher register pressure. This is especially
77  // bad when the PHIs are in the header of a loop.
78  if (!LHSVal && !RHSVal)
79  return nullptr;
80 
81  // Otherwise, this is safe to transform!
82 
83  Value *InLHS = FirstInst->getOperand(0);
84  Value *InRHS = FirstInst->getOperand(1);
85  PHINode *NewLHS = nullptr, *NewRHS = nullptr;
86  if (!LHSVal) {
87  NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
88  FirstInst->getOperand(0)->getName() + ".pn");
89  NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
90  InsertNewInstBefore(NewLHS, PN);
91  LHSVal = NewLHS;
92  }
93 
94  if (!RHSVal) {
95  NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
96  FirstInst->getOperand(1)->getName() + ".pn");
97  NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
98  InsertNewInstBefore(NewRHS, PN);
99  RHSVal = NewRHS;
100  }
101 
102  // Add all operands to the new PHIs.
103  if (NewLHS || NewRHS) {
104  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
105  Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
106  if (NewLHS) {
107  Value *NewInLHS = InInst->getOperand(0);
108  NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
109  }
110  if (NewRHS) {
111  Value *NewInRHS = InInst->getOperand(1);
112  NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
113  }
114  }
115  }
116 
117  if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
118  CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
119  LHSVal, RHSVal);
120  NewCI->setDebugLoc(PHIArgMergedDebugLoc(PN));
121  return NewCI;
122  }
123 
124  BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
125  BinaryOperator *NewBinOp =
126  BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
127 
128  NewBinOp->copyIRFlags(PN.getIncomingValue(0));
129 
130  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i)
131  NewBinOp->andIRFlags(PN.getIncomingValue(i));
132 
133  NewBinOp->setDebugLoc(PHIArgMergedDebugLoc(PN));
134  return NewBinOp;
135 }
136 
137 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
138  GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
139 
140  SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
141  FirstInst->op_end());
142  // This is true if all GEP bases are allocas and if all indices into them are
143  // constants.
144  bool AllBasePointersAreAllocas = true;
145 
146  // We don't want to replace this phi if the replacement would require
147  // more than one phi, which leads to higher register pressure. This is
148  // especially bad when the PHIs are in the header of a loop.
149  bool NeededPhi = false;
150 
151  bool AllInBounds = true;
152 
153  // Scan to see if all operands are the same opcode, and all have one use.
154  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
156  if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
157  GEP->getNumOperands() != FirstInst->getNumOperands())
158  return nullptr;
159 
160  AllInBounds &= GEP->isInBounds();
161 
162  // Keep track of whether or not all GEPs are of alloca pointers.
163  if (AllBasePointersAreAllocas &&
164  (!isa<AllocaInst>(GEP->getOperand(0)) ||
165  !GEP->hasAllConstantIndices()))
166  AllBasePointersAreAllocas = false;
167 
168  // Compare the operand lists.
169  for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
170  if (FirstInst->getOperand(op) == GEP->getOperand(op))
171  continue;
172 
173  // Don't merge two GEPs when two operands differ (introducing phi nodes)
174  // if one of the PHIs has a constant for the index. The index may be
175  // substantially cheaper to compute for the constants, so making it a
176  // variable index could pessimize the path. This also handles the case
177  // for struct indices, which must always be constant.
178  if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
179  isa<ConstantInt>(GEP->getOperand(op)))
180  return nullptr;
181 
182  if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
183  return nullptr;
184 
185  // If we already needed a PHI for an earlier operand, and another operand
186  // also requires a PHI, we'd be introducing more PHIs than we're
187  // eliminating, which increases register pressure on entry to the PHI's
188  // block.
189  if (NeededPhi)
190  return nullptr;
191 
192  FixedOperands[op] = nullptr; // Needs a PHI.
193  NeededPhi = true;
194  }
195  }
196 
197  // If all of the base pointers of the PHI'd GEPs are from allocas, don't
198  // bother doing this transformation. At best, this will just save a bit of
199  // offset calculation, but all the predecessors will have to materialize the
200  // stack address into a register anyway. We'd actually rather *clone* the
201  // load up into the predecessors so that we have a load of a gep of an alloca,
202  // which can usually all be folded into the load.
203  if (AllBasePointersAreAllocas)
204  return nullptr;
205 
206  // Otherwise, this is safe to transform. Insert PHI nodes for each operand
207  // that is variable.
208  SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
209 
210  bool HasAnyPHIs = false;
211  for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
212  if (FixedOperands[i]) continue; // operand doesn't need a phi.
213  Value *FirstOp = FirstInst->getOperand(i);
214  PHINode *NewPN = PHINode::Create(FirstOp->getType(), e,
215  FirstOp->getName()+".pn");
216  InsertNewInstBefore(NewPN, PN);
217 
218  NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
219  OperandPhis[i] = NewPN;
220  FixedOperands[i] = NewPN;
221  HasAnyPHIs = true;
222  }
223 
224 
225  // Add all operands to the new PHIs.
226  if (HasAnyPHIs) {
227  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
228  GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
229  BasicBlock *InBB = PN.getIncomingBlock(i);
230 
231  for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
232  if (PHINode *OpPhi = OperandPhis[op])
233  OpPhi->addIncoming(InGEP->getOperand(op), InBB);
234  }
235  }
236 
237  Value *Base = FixedOperands[0];
238  GetElementPtrInst *NewGEP =
240  makeArrayRef(FixedOperands).slice(1));
241  if (AllInBounds) NewGEP->setIsInBounds();
242  NewGEP->setDebugLoc(PHIArgMergedDebugLoc(PN));
243  return NewGEP;
244 }
245 
246 
247 /// Return true if we know that it is safe to sink the load out of the block
248 /// that defines it. This means that it must be obvious the value of the load is
249 /// not changed from the point of the load to the end of the block it is in.
250 ///
251 /// Finally, it is safe, but not profitable, to sink a load targeting a
252 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
253 /// to a register.
255  BasicBlock::iterator BBI = L->getIterator(), E = L->getParent()->end();
256 
257  for (++BBI; BBI != E; ++BBI)
258  if (BBI->mayWriteToMemory())
259  return false;
260 
261  // Check for non-address taken alloca. If not address-taken already, it isn't
262  // profitable to do this xform.
263  if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
264  bool isAddressTaken = false;
265  for (User *U : AI->users()) {
266  if (isa<LoadInst>(U)) continue;
267  if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
268  // If storing TO the alloca, then the address isn't taken.
269  if (SI->getOperand(1) == AI) continue;
270  }
271  isAddressTaken = true;
272  break;
273  }
274 
275  if (!isAddressTaken && AI->isStaticAlloca())
276  return false;
277  }
278 
279  // If this load is a load from a GEP with a constant offset from an alloca,
280  // then we don't want to sink it. In its present form, it will be
281  // load [constant stack offset]. Sinking it will cause us to have to
282  // materialize the stack addresses in each predecessor in a register only to
283  // do a shared load from register in the successor.
284  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
285  if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
286  if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
287  return false;
288 
289  return true;
290 }
291 
292 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
293  LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
294 
295  // FIXME: This is overconservative; this transform is allowed in some cases
296  // for atomic operations.
297  if (FirstLI->isAtomic())
298  return nullptr;
299 
300  // When processing loads, we need to propagate two bits of information to the
301  // sunk load: whether it is volatile, and what its alignment is. We currently
302  // don't sink loads when some have their alignment specified and some don't.
303  // visitLoadInst will propagate an alignment onto the load when TD is around,
304  // and if TD isn't around, we can't handle the mixed case.
305  bool isVolatile = FirstLI->isVolatile();
306  unsigned LoadAlignment = FirstLI->getAlignment();
307  unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
308 
309  // We can't sink the load if the loaded value could be modified between the
310  // load and the PHI.
311  if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
313  return nullptr;
314 
315  // If the PHI is of volatile loads and the load block has multiple
316  // successors, sinking it would remove a load of the volatile value from
317  // the path through the other successor.
318  if (isVolatile &&
319  FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
320  return nullptr;
321 
322  // Check to see if all arguments are the same operation.
323  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
325  if (!LI || !LI->hasOneUse())
326  return nullptr;
327 
328  // We can't sink the load if the loaded value could be modified between
329  // the load and the PHI.
330  if (LI->isVolatile() != isVolatile ||
331  LI->getParent() != PN.getIncomingBlock(i) ||
332  LI->getPointerAddressSpace() != LoadAddrSpace ||
334  return nullptr;
335 
336  // If some of the loads have an alignment specified but not all of them,
337  // we can't do the transformation.
338  if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
339  return nullptr;
340 
341  LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
342 
343  // If the PHI is of volatile loads and the load block has multiple
344  // successors, sinking it would remove a load of the volatile value from
345  // the path through the other successor.
346  if (isVolatile &&
347  LI->getParent()->getTerminator()->getNumSuccessors() != 1)
348  return nullptr;
349  }
350 
351  // Okay, they are all the same operation. Create a new PHI node of the
352  // correct type, and PHI together all of the LHS's of the instructions.
353  PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
355  PN.getName()+".in");
356 
357  Value *InVal = FirstLI->getOperand(0);
358  NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
359  LoadInst *NewLI = new LoadInst(NewPN, "", isVolatile, LoadAlignment);
360 
361  unsigned KnownIDs[] = {
371  };
372 
373  for (unsigned ID : KnownIDs)
374  NewLI->setMetadata(ID, FirstLI->getMetadata(ID));
375 
376  // Add all operands to the new PHI and combine TBAA metadata.
377  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
378  LoadInst *LI = cast<LoadInst>(PN.getIncomingValue(i));
379  combineMetadata(NewLI, LI, KnownIDs);
380  Value *NewInVal = LI->getOperand(0);
381  if (NewInVal != InVal)
382  InVal = nullptr;
383  NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
384  }
385 
386  if (InVal) {
387  // The new PHI unions all of the same values together. This is really
388  // common, so we handle it intelligently here for compile-time speed.
389  NewLI->setOperand(0, InVal);
390  delete NewPN;
391  } else {
392  InsertNewInstBefore(NewPN, PN);
393  }
394 
395  // If this was a volatile load that we are merging, make sure to loop through
396  // and mark all the input loads as non-volatile. If we don't do this, we will
397  // insert a new volatile load and the old ones will not be deletable.
398  if (isVolatile)
399  for (Value *IncValue : PN.incoming_values())
400  cast<LoadInst>(IncValue)->setVolatile(false);
401 
402  NewLI->setDebugLoc(PHIArgMergedDebugLoc(PN));
403  return NewLI;
404 }
405 
406 /// TODO: This function could handle other cast types, but then it might
407 /// require special-casing a cast from the 'i1' type. See the comment in
408 /// FoldPHIArgOpIntoPHI() about pessimizing illegal integer types.
409 Instruction *InstCombiner::FoldPHIArgZextsIntoPHI(PHINode &Phi) {
410  // We cannot create a new instruction after the PHI if the terminator is an
411  // EHPad because there is no valid insertion point.
412  if (TerminatorInst *TI = Phi.getParent()->getTerminator())
413  if (TI->isEHPad())
414  return nullptr;
415 
416  // Early exit for the common case of a phi with two operands. These are
417  // handled elsewhere. See the comment below where we check the count of zexts
418  // and constants for more details.
419  unsigned NumIncomingValues = Phi.getNumIncomingValues();
420  if (NumIncomingValues < 3)
421  return nullptr;
422 
423  // Find the narrower type specified by the first zext.
424  Type *NarrowType = nullptr;
425  for (Value *V : Phi.incoming_values()) {
426  if (auto *Zext = dyn_cast<ZExtInst>(V)) {
427  NarrowType = Zext->getSrcTy();
428  break;
429  }
430  }
431  if (!NarrowType)
432  return nullptr;
433 
434  // Walk the phi operands checking that we only have zexts or constants that
435  // we can shrink for free. Store the new operands for the new phi.
436  SmallVector<Value *, 4> NewIncoming;
437  unsigned NumZexts = 0;
438  unsigned NumConsts = 0;
439  for (Value *V : Phi.incoming_values()) {
440  if (auto *Zext = dyn_cast<ZExtInst>(V)) {
441  // All zexts must be identical and have one use.
442  if (Zext->getSrcTy() != NarrowType || !Zext->hasOneUse())
443  return nullptr;
444  NewIncoming.push_back(Zext->getOperand(0));
445  NumZexts++;
446  } else if (auto *C = dyn_cast<Constant>(V)) {
447  // Make sure that constants can fit in the new type.
448  Constant *Trunc = ConstantExpr::getTrunc(C, NarrowType);
449  if (ConstantExpr::getZExt(Trunc, C->getType()) != C)
450  return nullptr;
451  NewIncoming.push_back(Trunc);
452  NumConsts++;
453  } else {
454  // If it's not a cast or a constant, bail out.
455  return nullptr;
456  }
457  }
458 
459  // The more common cases of a phi with no constant operands or just one
460  // variable operand are handled by FoldPHIArgOpIntoPHI() and foldOpIntoPhi()
461  // respectively. foldOpIntoPhi() wants to do the opposite transform that is
462  // performed here. It tries to replicate a cast in the phi operand's basic
463  // block to expose other folding opportunities. Thus, InstCombine will
464  // infinite loop without this check.
465  if (NumConsts == 0 || NumZexts < 2)
466  return nullptr;
467 
468  // All incoming values are zexts or constants that are safe to truncate.
469  // Create a new phi node of the narrow type, phi together all of the new
470  // operands, and zext the result back to the original type.
471  PHINode *NewPhi = PHINode::Create(NarrowType, NumIncomingValues,
472  Phi.getName() + ".shrunk");
473  for (unsigned i = 0; i != NumIncomingValues; ++i)
474  NewPhi->addIncoming(NewIncoming[i], Phi.getIncomingBlock(i));
475 
476  InsertNewInstBefore(NewPhi, Phi);
477  return CastInst::CreateZExtOrBitCast(NewPhi, Phi.getType());
478 }
479 
480 /// If all operands to a PHI node are the same "unary" operator and they all are
481 /// only used by the PHI, PHI together their inputs, and do the operation once,
482 /// to the result of the PHI.
483 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
484  // We cannot create a new instruction after the PHI if the terminator is an
485  // EHPad because there is no valid insertion point.
486  if (TerminatorInst *TI = PN.getParent()->getTerminator())
487  if (TI->isEHPad())
488  return nullptr;
489 
490  Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
491 
492  if (isa<GetElementPtrInst>(FirstInst))
493  return FoldPHIArgGEPIntoPHI(PN);
494  if (isa<LoadInst>(FirstInst))
495  return FoldPHIArgLoadIntoPHI(PN);
496 
497  // Scan the instruction, looking for input operations that can be folded away.
498  // If all input operands to the phi are the same instruction (e.g. a cast from
499  // the same type or "+42") we can pull the operation through the PHI, reducing
500  // code size and simplifying code.
501  Constant *ConstantOp = nullptr;
502  Type *CastSrcTy = nullptr;
503 
504  if (isa<CastInst>(FirstInst)) {
505  CastSrcTy = FirstInst->getOperand(0)->getType();
506 
507  // Be careful about transforming integer PHIs. We don't want to pessimize
508  // the code by turning an i32 into an i1293.
509  if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
510  if (!shouldChangeType(PN.getType(), CastSrcTy))
511  return nullptr;
512  }
513  } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
514  // Can fold binop, compare or shift here if the RHS is a constant,
515  // otherwise call FoldPHIArgBinOpIntoPHI.
516  ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
517  if (!ConstantOp)
518  return FoldPHIArgBinOpIntoPHI(PN);
519  } else {
520  return nullptr; // Cannot fold this operation.
521  }
522 
523  // Check to see if all arguments are the same operation.
524  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
526  if (!I || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
527  return nullptr;
528  if (CastSrcTy) {
529  if (I->getOperand(0)->getType() != CastSrcTy)
530  return nullptr; // Cast operation must match.
531  } else if (I->getOperand(1) != ConstantOp) {
532  return nullptr;
533  }
534  }
535 
536  // Okay, they are all the same operation. Create a new PHI node of the
537  // correct type, and PHI together all of the LHS's of the instructions.
538  PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
540  PN.getName()+".in");
541 
542  Value *InVal = FirstInst->getOperand(0);
543  NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
544 
545  // Add all operands to the new PHI.
546  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
547  Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
548  if (NewInVal != InVal)
549  InVal = nullptr;
550  NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
551  }
552 
553  Value *PhiVal;
554  if (InVal) {
555  // The new PHI unions all of the same values together. This is really
556  // common, so we handle it intelligently here for compile-time speed.
557  PhiVal = InVal;
558  delete NewPN;
559  } else {
560  InsertNewInstBefore(NewPN, PN);
561  PhiVal = NewPN;
562  }
563 
564  // Insert and return the new operation.
565  if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
566  CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
567  PN.getType());
568  NewCI->setDebugLoc(PHIArgMergedDebugLoc(PN));
569  return NewCI;
570  }
571 
572  if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
573  BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
574  BinOp->copyIRFlags(PN.getIncomingValue(0));
575 
576  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i)
577  BinOp->andIRFlags(PN.getIncomingValue(i));
578 
579  BinOp->setDebugLoc(PHIArgMergedDebugLoc(PN));
580  return BinOp;
581  }
582 
583  CmpInst *CIOp = cast<CmpInst>(FirstInst);
584  CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
585  PhiVal, ConstantOp);
586  NewCI->setDebugLoc(PHIArgMergedDebugLoc(PN));
587  return NewCI;
588 }
589 
590 /// Return true if this PHI node is only used by a PHI node cycle that is dead.
591 static bool DeadPHICycle(PHINode *PN,
592  SmallPtrSetImpl<PHINode*> &PotentiallyDeadPHIs) {
593  if (PN->use_empty()) return true;
594  if (!PN->hasOneUse()) return false;
595 
596  // Remember this node, and if we find the cycle, return.
597  if (!PotentiallyDeadPHIs.insert(PN).second)
598  return true;
599 
600  // Don't scan crazily complex things.
601  if (PotentiallyDeadPHIs.size() == 16)
602  return false;
603 
604  if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
605  return DeadPHICycle(PU, PotentiallyDeadPHIs);
606 
607  return false;
608 }
609 
610 /// Return true if this phi node is always equal to NonPhiInVal.
611 /// This happens with mutually cyclic phi nodes like:
612 /// z = some value; x = phi (y, z); y = phi (x, z)
613 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
614  SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
615  // See if we already saw this PHI node.
616  if (!ValueEqualPHIs.insert(PN).second)
617  return true;
618 
619  // Don't scan crazily complex things.
620  if (ValueEqualPHIs.size() == 16)
621  return false;
622 
623  // Scan the operands to see if they are either phi nodes or are equal to
624  // the value.
625  for (Value *Op : PN->incoming_values()) {
626  if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
627  if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
628  return false;
629  } else if (Op != NonPhiInVal)
630  return false;
631  }
632 
633  return true;
634 }
635 
636 /// Return an existing non-zero constant if this phi node has one, otherwise
637 /// return constant 1.
639  assert(isa<IntegerType>(PN.getType()) && "Expect only integer type phi");
640  for (Value *V : PN.operands())
641  if (auto *ConstVA = dyn_cast<ConstantInt>(V))
642  if (!ConstVA->isZero())
643  return ConstVA;
644  return ConstantInt::get(cast<IntegerType>(PN.getType()), 1);
645 }
646 
647 namespace {
648 struct PHIUsageRecord {
649  unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
650  unsigned Shift; // The amount shifted.
651  Instruction *Inst; // The trunc instruction.
652 
653  PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
654  : PHIId(pn), Shift(Sh), Inst(User) {}
655 
656  bool operator<(const PHIUsageRecord &RHS) const {
657  if (PHIId < RHS.PHIId) return true;
658  if (PHIId > RHS.PHIId) return false;
659  if (Shift < RHS.Shift) return true;
660  if (Shift > RHS.Shift) return false;
661  return Inst->getType()->getPrimitiveSizeInBits() <
662  RHS.Inst->getType()->getPrimitiveSizeInBits();
663  }
664 };
665 
666 struct LoweredPHIRecord {
667  PHINode *PN; // The PHI that was lowered.
668  unsigned Shift; // The amount shifted.
669  unsigned Width; // The width extracted.
670 
671  LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
672  : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
673 
674  // Ctor form used by DenseMap.
675  LoweredPHIRecord(PHINode *pn, unsigned Sh)
676  : PN(pn), Shift(Sh), Width(0) {}
677 };
678 }
679 
680 namespace llvm {
681  template<>
682  struct DenseMapInfo<LoweredPHIRecord> {
683  static inline LoweredPHIRecord getEmptyKey() {
684  return LoweredPHIRecord(nullptr, 0);
685  }
686  static inline LoweredPHIRecord getTombstoneKey() {
687  return LoweredPHIRecord(nullptr, 1);
688  }
689  static unsigned getHashValue(const LoweredPHIRecord &Val) {
690  return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
691  (Val.Width>>3);
692  }
693  static bool isEqual(const LoweredPHIRecord &LHS,
694  const LoweredPHIRecord &RHS) {
695  return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
696  LHS.Width == RHS.Width;
697  }
698  };
699 }
700 
701 
702 /// This is an integer PHI and we know that it has an illegal type: see if it is
703 /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
704 /// the various pieces being extracted. This sort of thing is introduced when
705 /// SROA promotes an aggregate to large integer values.
706 ///
707 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
708 /// inttoptr. We should produce new PHIs in the right type.
709 ///
711  // PHIUsers - Keep track of all of the truncated values extracted from a set
712  // of PHIs, along with their offset. These are the things we want to rewrite.
714 
715  // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
716  // nodes which are extracted from. PHIsToSlice is a set we use to avoid
717  // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
718  // check the uses of (to ensure they are all extracts).
719  SmallVector<PHINode*, 8> PHIsToSlice;
720  SmallPtrSet<PHINode*, 8> PHIsInspected;
721 
722  PHIsToSlice.push_back(&FirstPhi);
723  PHIsInspected.insert(&FirstPhi);
724 
725  for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
726  PHINode *PN = PHIsToSlice[PHIId];
727 
728  // Scan the input list of the PHI. If any input is an invoke, and if the
729  // input is defined in the predecessor, then we won't be split the critical
730  // edge which is required to insert a truncate. Because of this, we have to
731  // bail out.
732  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
734  if (!II) continue;
735  if (II->getParent() != PN->getIncomingBlock(i))
736  continue;
737 
738  // If we have a phi, and if it's directly in the predecessor, then we have
739  // a critical edge where we need to put the truncate. Since we can't
740  // split the edge in instcombine, we have to bail out.
741  return nullptr;
742  }
743 
744  for (User *U : PN->users()) {
745  Instruction *UserI = cast<Instruction>(U);
746 
747  // If the user is a PHI, inspect its uses recursively.
748  if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
749  if (PHIsInspected.insert(UserPN).second)
750  PHIsToSlice.push_back(UserPN);
751  continue;
752  }
753 
754  // Truncates are always ok.
755  if (isa<TruncInst>(UserI)) {
756  PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
757  continue;
758  }
759 
760  // Otherwise it must be a lshr which can only be used by one trunc.
761  if (UserI->getOpcode() != Instruction::LShr ||
762  !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
763  !isa<ConstantInt>(UserI->getOperand(1)))
764  return nullptr;
765 
766  unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
767  PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
768  }
769  }
770 
771  // If we have no users, they must be all self uses, just nuke the PHI.
772  if (PHIUsers.empty())
773  return replaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
774 
775  // If this phi node is transformable, create new PHIs for all the pieces
776  // extracted out of it. First, sort the users by their offset and size.
777  array_pod_sort(PHIUsers.begin(), PHIUsers.end());
778 
779  DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
780  for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
781  dbgs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';
782  );
783 
784  // PredValues - This is a temporary used when rewriting PHI nodes. It is
785  // hoisted out here to avoid construction/destruction thrashing.
787 
788  // ExtractedVals - Each new PHI we introduce is saved here so we don't
789  // introduce redundant PHIs.
791 
792  for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
793  unsigned PHIId = PHIUsers[UserI].PHIId;
794  PHINode *PN = PHIsToSlice[PHIId];
795  unsigned Offset = PHIUsers[UserI].Shift;
796  Type *Ty = PHIUsers[UserI].Inst->getType();
797 
798  PHINode *EltPHI;
799 
800  // If we've already lowered a user like this, reuse the previously lowered
801  // value.
802  if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
803 
804  // Otherwise, Create the new PHI node for this user.
805  EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
806  PN->getName()+".off"+Twine(Offset), PN);
807  assert(EltPHI->getType() != PN->getType() &&
808  "Truncate didn't shrink phi?");
809 
810  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
811  BasicBlock *Pred = PN->getIncomingBlock(i);
812  Value *&PredVal = PredValues[Pred];
813 
814  // If we already have a value for this predecessor, reuse it.
815  if (PredVal) {
816  EltPHI->addIncoming(PredVal, Pred);
817  continue;
818  }
819 
820  // Handle the PHI self-reuse case.
821  Value *InVal = PN->getIncomingValue(i);
822  if (InVal == PN) {
823  PredVal = EltPHI;
824  EltPHI->addIncoming(PredVal, Pred);
825  continue;
826  }
827 
828  if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
829  // If the incoming value was a PHI, and if it was one of the PHIs we
830  // already rewrote it, just use the lowered value.
831  if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
832  PredVal = Res;
833  EltPHI->addIncoming(PredVal, Pred);
834  continue;
835  }
836  }
837 
838  // Otherwise, do an extract in the predecessor.
839  Builder.SetInsertPoint(Pred->getTerminator());
840  Value *Res = InVal;
841  if (Offset)
842  Res = Builder.CreateLShr(Res, ConstantInt::get(InVal->getType(),
843  Offset), "extract");
844  Res = Builder.CreateTrunc(Res, Ty, "extract.t");
845  PredVal = Res;
846  EltPHI->addIncoming(Res, Pred);
847 
848  // If the incoming value was a PHI, and if it was one of the PHIs we are
849  // rewriting, we will ultimately delete the code we inserted. This
850  // means we need to revisit that PHI to make sure we extract out the
851  // needed piece.
852  if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
853  if (PHIsInspected.count(OldInVal)) {
854  unsigned RefPHIId =
855  find(PHIsToSlice, OldInVal) - PHIsToSlice.begin();
856  PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
857  cast<Instruction>(Res)));
858  ++UserE;
859  }
860  }
861  PredValues.clear();
862 
863  DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
864  << *EltPHI << '\n');
865  ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
866  }
867 
868  // Replace the use of this piece with the PHI node.
869  replaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
870  }
871 
872  // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
873  // with undefs.
874  Value *Undef = UndefValue::get(FirstPhi.getType());
875  for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
876  replaceInstUsesWith(*PHIsToSlice[i], Undef);
877  return replaceInstUsesWith(FirstPhi, Undef);
878 }
879 
880 // PHINode simplification
881 //
883  if (Value *V = SimplifyInstruction(&PN, SQ.getWithInstruction(&PN)))
884  return replaceInstUsesWith(PN, V);
885 
886  if (Instruction *Result = FoldPHIArgZextsIntoPHI(PN))
887  return Result;
888 
889  // If all PHI operands are the same operation, pull them through the PHI,
890  // reducing code size.
891  if (isa<Instruction>(PN.getIncomingValue(0)) &&
892  isa<Instruction>(PN.getIncomingValue(1)) &&
893  cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
894  cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
895  // FIXME: The hasOneUse check will fail for PHIs that use the value more
896  // than themselves more than once.
897  PN.getIncomingValue(0)->hasOneUse())
898  if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
899  return Result;
900 
901  // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
902  // this PHI only has a single use (a PHI), and if that PHI only has one use (a
903  // PHI)... break the cycle.
904  if (PN.hasOneUse()) {
905  Instruction *PHIUser = cast<Instruction>(PN.user_back());
906  if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
907  SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
908  PotentiallyDeadPHIs.insert(&PN);
909  if (DeadPHICycle(PU, PotentiallyDeadPHIs))
910  return replaceInstUsesWith(PN, UndefValue::get(PN.getType()));
911  }
912 
913  // If this phi has a single use, and if that use just computes a value for
914  // the next iteration of a loop, delete the phi. This occurs with unused
915  // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
916  // common case here is good because the only other things that catch this
917  // are induction variable analysis (sometimes) and ADCE, which is only run
918  // late.
919  if (PHIUser->hasOneUse() &&
920  (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
921  PHIUser->user_back() == &PN) {
922  return replaceInstUsesWith(PN, UndefValue::get(PN.getType()));
923  }
924  // When a PHI is used only to be compared with zero, it is safe to replace
925  // an incoming value proved as known nonzero with any non-zero constant.
926  // For example, in the code below, the incoming value %v can be replaced
927  // with any non-zero constant based on the fact that the PHI is only used to
928  // be compared with zero and %v is a known non-zero value:
929  // %v = select %cond, 1, 2
930  // %p = phi [%v, BB] ...
931  // icmp eq, %p, 0
932  auto *CmpInst = dyn_cast<ICmpInst>(PHIUser);
933  // FIXME: To be simple, handle only integer type for now.
934  if (CmpInst && isa<IntegerType>(PN.getType()) && CmpInst->isEquality() &&
935  match(CmpInst->getOperand(1), m_Zero())) {
936  ConstantInt *NonZeroConst = nullptr;
937  for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
938  Instruction *CtxI = PN.getIncomingBlock(i)->getTerminator();
939  Value *VA = PN.getIncomingValue(i);
940  if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
941  if (!NonZeroConst)
942  NonZeroConst = GetAnyNonZeroConstInt(PN);
943  PN.setIncomingValue(i, NonZeroConst);
944  }
945  }
946  }
947  }
948 
949  // We sometimes end up with phi cycles that non-obviously end up being the
950  // same value, for example:
951  // z = some value; x = phi (y, z); y = phi (x, z)
952  // where the phi nodes don't necessarily need to be in the same block. Do a
953  // quick check to see if the PHI node only contains a single non-phi value, if
954  // so, scan to see if the phi cycle is actually equal to that value.
955  {
956  unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
957  // Scan for the first non-phi operand.
958  while (InValNo != NumIncomingVals &&
959  isa<PHINode>(PN.getIncomingValue(InValNo)))
960  ++InValNo;
961 
962  if (InValNo != NumIncomingVals) {
963  Value *NonPhiInVal = PN.getIncomingValue(InValNo);
964 
965  // Scan the rest of the operands to see if there are any conflicts, if so
966  // there is no need to recursively scan other phis.
967  for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
968  Value *OpVal = PN.getIncomingValue(InValNo);
969  if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
970  break;
971  }
972 
973  // If we scanned over all operands, then we have one unique value plus
974  // phi values. Scan PHI nodes to see if they all merge in each other or
975  // the value.
976  if (InValNo == NumIncomingVals) {
977  SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
978  if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
979  return replaceInstUsesWith(PN, NonPhiInVal);
980  }
981  }
982  }
983 
984  // If there are multiple PHIs, sort their operands so that they all list
985  // the blocks in the same order. This will help identical PHIs be eliminated
986  // by other passes. Other passes shouldn't depend on this for correctness
987  // however.
988  PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
989  if (&PN != FirstPN)
990  for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
991  BasicBlock *BBA = PN.getIncomingBlock(i);
992  BasicBlock *BBB = FirstPN->getIncomingBlock(i);
993  if (BBA != BBB) {
994  Value *VA = PN.getIncomingValue(i);
995  unsigned j = PN.getBasicBlockIndex(BBB);
996  Value *VB = PN.getIncomingValue(j);
997  PN.setIncomingBlock(i, BBB);
998  PN.setIncomingValue(i, VB);
999  PN.setIncomingBlock(j, BBA);
1000  PN.setIncomingValue(j, VA);
1001  // NOTE: Instcombine normally would want us to "return &PN" if we
1002  // modified any of the operands of an instruction. However, since we
1003  // aren't adding or removing uses (just rearranging them) we don't do
1004  // this in this case.
1005  }
1006  }
1007 
1008  // If this is an integer PHI and we know that it has an illegal type, see if
1009  // it is only used by trunc or trunc(lshr) operations. If so, we split the
1010  // PHI into the various pieces being extracted. This sort of thing is
1011  // introduced when SROA promotes an aggregate to a single large integer type.
1012  if (PN.getType()->isIntegerTy() &&
1013  !DL.isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
1014  if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
1015  return Res;
1016 
1017  return nullptr;
1018 }
uint64_t CallInst * C
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:850
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isSameOperationAs(const Instruction *I, unsigned flags=0) const
This function determines if the specified instruction executes the same operation as the current one...
Compute iterated dominance frontiers using a linear time algorithm.
Definition: AllocatorList.h:24
BinaryOps getOpcode() const
Definition: InstrTypes.h:523
bool isAtomic() const
Return true if this instruction has an AtomicOrdering of unordered or higher.
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:136
match_zero m_Zero()
Match an arbitrary zero/null constant.
Definition: PatternMatch.h:145
static LoweredPHIRecord getEmptyKey()
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value *> IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:863
A debug info location.
Definition: DebugLoc.h:34
An instruction for reading from memory.
Definition: Instructions.h:164
Hexagon Common GEP
#define op(i)
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
op_iterator op_begin()
Definition: User.h:214
static ConstantInt * GetAnyNonZeroConstInt(PHINode &PN)
Return an existing non-zero constant if this phi node has one, otherwise return constant 1...
static bool isSafeAndProfitableToSinkLoad(LoadInst *L)
Return true if we know that it is safe to sink the load out of the block that defines it...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:336
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:252
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:217
int getBasicBlockIndex(const BasicBlock *BB) const
Return the first index of the specified basic block in the value list for this PHI.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:560
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:451
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:197
Instruction * SliceUpIllegalIntegerPHI(PHINode &PN)
This is an integer PHI and we know that it has an illegal type: see if it is only used by trunc or tr...
static const DILocation * getMergedLocation(const DILocation *LocA, const DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
static bool isEqual(const LoweredPHIRecord &LHS, const LoweredPHIRecord &RHS)
Type * getSourceElementType() const
Definition: Instructions.h:934
static Constant * getZExt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1570
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition: InstrTypes.h:929
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:194
static CmpInst * Create(OtherOps Op, Predicate predicate, Value *S1, Value *S2, const Twine &Name="", Instruction *InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:125
An instruction for storing to memory.
Definition: Instructions.h:306
Debug location.
Value * getOperand(unsigned i) const
Definition: User.h:154
an instruction for type-safe pointer arithmetic to access elements of arrays and structs ...
Definition: Instructions.h:837
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition: STLExtras.h:769
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:54
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:281
LLVM Basic Block Representation.
Definition: BasicBlock.h:59
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
This is an important base class in LLVM.
Definition: Constant.h:42
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
Definition: SmallVector.h:116
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:363
op_iterator op_end()
Definition: User.h:216
This instruction compares its operands according to the predicate given to the constructor.
op_range operands()
Definition: User.h:222
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:374
self_iterator getIterator()
Definition: ilist_node.h:82
static UndefValue * get(Type *T)
Static factory methods - Return an &#39;undef&#39; object of the specified type.
Definition: Constants.cpp:1320
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Create a ZExt or BitCast cast instruction.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Instruction * visitPHINode(PHINode &PN)
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1214
size_type size() const
Definition: SmallPtrSet.h:92
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Definition: PPCPredicates.h:85
Iterator for intrusive lists based on ilist_node.
unsigned getNumOperands() const
Definition: User.h:176
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:410
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
auto find(R &&Range, const T &Val) -> decltype(std::begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
Definition: STLExtras.h:837
void setIncomingBlock(unsigned i, BasicBlock *BB)
iterator end()
Definition: BasicBlock.h:254
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:864
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
Definition: Instruction.h:63
static LoweredPHIRecord getTombstoneKey()
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
Definition: Constants.cpp:1542
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:560
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
unsigned getNumIncomingValues() const
Return the number of incoming edges.
void setOperand(unsigned i, Value *Val)
Definition: User.h:159
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:132
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
iterator_range< user_iterator > users()
Definition: Value.h:395
static bool DeadPHICycle(PHINode *PN, SmallPtrSetImpl< PHINode *> &PotentiallyDeadPHIs)
Return true if this PHI node is only used by a PHI node cycle that is dead.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass&#39;s ...
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:934
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
Definition: SmallVector.h:120
unsigned getAlignment() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:226
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:61
StringRef getName() const
Return a constant reference to the value&#39;s name.
Definition: Value.cpp:218
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
#define I(x, y, z)
Definition: MD5.cpp:58
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:276
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:326
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:115
LLVM Value Representation.
Definition: Value.h:73
bool isEquality() const
This is just a convenience that dispatches to the subclasses.
This file provides internal interfaces used to implement the InstCombine.
static unsigned getHashValue(const LoweredPHIRecord &Val)
Invoke instruction.
#define DEBUG(X)
Definition: Debug.h:118
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Return true if the given value is known to be non-zero when defined.
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:408
static bool isVolatile(Instruction *Inst)
const TerminatorInst * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:120
void setIncomingValue(unsigned i, Value *V)
op_range incoming_values()
static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal, SmallPtrSetImpl< PHINode *> &ValueEqualPHIs)
Return true if this phi node is always equal to NonPhiInVal.
Value * SimplifyInstruction(Instruction *I, const SimplifyQuery &Q, OptimizationRemarkEmitter *ORE=nullptr)
See if we can compute a simplified version of this instruction.
void combineMetadata(Instruction *K, const Instruction *J, ArrayRef< unsigned > KnownIDs)
Combine the metadata of two instructions so that K can replace J.
Definition: Local.cpp:1711
bool use_empty() const
Definition: Value.h:322
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
const BasicBlock * getParent() const
Definition: Instruction.h:66
an instruction to allocate memory on the stack
Definition: Instructions.h:60