LLVM  3.7.0
InstCombinePHI.cpp
Go to the documentation of this file.
1 //===- InstCombinePHI.cpp -------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the visitPHINode function.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallPtrSet.h"
18 using namespace llvm;
19 
20 #define DEBUG_TYPE "instcombine"
21 
22 /// If we have something like phi [add (a,b), add(a,c)] and if a/b/c and the
23 /// adds all have a single use, turn this into a phi and a single binop.
24 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
25  Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
26  assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
27  unsigned Opc = FirstInst->getOpcode();
28  Value *LHSVal = FirstInst->getOperand(0);
29  Value *RHSVal = FirstInst->getOperand(1);
30 
31  Type *LHSType = LHSVal->getType();
32  Type *RHSType = RHSVal->getType();
33 
34  bool isNUW = false, isNSW = false, isExact = false;
36  dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
37  isNUW = BO->hasNoUnsignedWrap();
38  isNSW = BO->hasNoSignedWrap();
39  } else if (PossiblyExactOperator *PEO =
40  dyn_cast<PossiblyExactOperator>(FirstInst))
41  isExact = PEO->isExact();
42 
43  // Scan to see if all operands are the same opcode, and all have one use.
44  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
46  if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
47  // Verify type of the LHS matches so we don't fold cmp's of different
48  // types.
49  I->getOperand(0)->getType() != LHSType ||
50  I->getOperand(1)->getType() != RHSType)
51  return nullptr;
52 
53  // If they are CmpInst instructions, check their predicates
54  if (CmpInst *CI = dyn_cast<CmpInst>(I))
55  if (CI->getPredicate() != cast<CmpInst>(FirstInst)->getPredicate())
56  return nullptr;
57 
58  if (isNUW)
59  isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
60  if (isNSW)
61  isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
62  if (isExact)
63  isExact = cast<PossiblyExactOperator>(I)->isExact();
64 
65  // Keep track of which operand needs a phi node.
66  if (I->getOperand(0) != LHSVal) LHSVal = nullptr;
67  if (I->getOperand(1) != RHSVal) RHSVal = nullptr;
68  }
69 
70  // If both LHS and RHS would need a PHI, don't do this transformation,
71  // because it would increase the number of PHIs entering the block,
72  // which leads to higher register pressure. This is especially
73  // bad when the PHIs are in the header of a loop.
74  if (!LHSVal && !RHSVal)
75  return nullptr;
76 
77  // Otherwise, this is safe to transform!
78 
79  Value *InLHS = FirstInst->getOperand(0);
80  Value *InRHS = FirstInst->getOperand(1);
81  PHINode *NewLHS = nullptr, *NewRHS = nullptr;
82  if (!LHSVal) {
83  NewLHS = PHINode::Create(LHSType, PN.getNumIncomingValues(),
84  FirstInst->getOperand(0)->getName() + ".pn");
85  NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
86  InsertNewInstBefore(NewLHS, PN);
87  LHSVal = NewLHS;
88  }
89 
90  if (!RHSVal) {
91  NewRHS = PHINode::Create(RHSType, PN.getNumIncomingValues(),
92  FirstInst->getOperand(1)->getName() + ".pn");
93  NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
94  InsertNewInstBefore(NewRHS, PN);
95  RHSVal = NewRHS;
96  }
97 
98  // Add all operands to the new PHIs.
99  if (NewLHS || NewRHS) {
100  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
101  Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
102  if (NewLHS) {
103  Value *NewInLHS = InInst->getOperand(0);
104  NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
105  }
106  if (NewRHS) {
107  Value *NewInRHS = InInst->getOperand(1);
108  NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
109  }
110  }
111  }
112 
113  if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst)) {
114  CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
115  LHSVal, RHSVal);
116  NewCI->setDebugLoc(FirstInst->getDebugLoc());
117  return NewCI;
118  }
119 
120  BinaryOperator *BinOp = cast<BinaryOperator>(FirstInst);
121  BinaryOperator *NewBinOp =
122  BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
123  if (isNUW) NewBinOp->setHasNoUnsignedWrap();
124  if (isNSW) NewBinOp->setHasNoSignedWrap();
125  if (isExact) NewBinOp->setIsExact();
126  NewBinOp->setDebugLoc(FirstInst->getDebugLoc());
127  return NewBinOp;
128 }
129 
130 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
131  GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
132 
133  SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
134  FirstInst->op_end());
135  // This is true if all GEP bases are allocas and if all indices into them are
136  // constants.
137  bool AllBasePointersAreAllocas = true;
138 
139  // We don't want to replace this phi if the replacement would require
140  // more than one phi, which leads to higher register pressure. This is
141  // especially bad when the PHIs are in the header of a loop.
142  bool NeededPhi = false;
143 
144  bool AllInBounds = true;
145 
146  // Scan to see if all operands are the same opcode, and all have one use.
147  for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
149  if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
150  GEP->getNumOperands() != FirstInst->getNumOperands())
151  return nullptr;
152 
153  AllInBounds &= GEP->isInBounds();
154 
155  // Keep track of whether or not all GEPs are of alloca pointers.
156  if (AllBasePointersAreAllocas &&
157  (!isa<AllocaInst>(GEP->getOperand(0)) ||
158  !GEP->hasAllConstantIndices()))
159  AllBasePointersAreAllocas = false;
160 
161  // Compare the operand lists.
162  for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
163  if (FirstInst->getOperand(op) == GEP->getOperand(op))
164  continue;
165 
166  // Don't merge two GEPs when two operands differ (introducing phi nodes)
167  // if one of the PHIs has a constant for the index. The index may be
168  // substantially cheaper to compute for the constants, so making it a
169  // variable index could pessimize the path. This also handles the case
170  // for struct indices, which must always be constant.
171  if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
172  isa<ConstantInt>(GEP->getOperand(op)))
173  return nullptr;
174 
175  if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
176  return nullptr;
177 
178  // If we already needed a PHI for an earlier operand, and another operand
179  // also requires a PHI, we'd be introducing more PHIs than we're
180  // eliminating, which increases register pressure on entry to the PHI's
181  // block.
182  if (NeededPhi)
183  return nullptr;
184 
185  FixedOperands[op] = nullptr; // Needs a PHI.
186  NeededPhi = true;
187  }
188  }
189 
190  // If all of the base pointers of the PHI'd GEPs are from allocas, don't
191  // bother doing this transformation. At best, this will just save a bit of
192  // offset calculation, but all the predecessors will have to materialize the
193  // stack address into a register anyway. We'd actually rather *clone* the
194  // load up into the predecessors so that we have a load of a gep of an alloca,
195  // which can usually all be folded into the load.
196  if (AllBasePointersAreAllocas)
197  return nullptr;
198 
199  // Otherwise, this is safe to transform. Insert PHI nodes for each operand
200  // that is variable.
201  SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
202 
203  bool HasAnyPHIs = false;
204  for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
205  if (FixedOperands[i]) continue; // operand doesn't need a phi.
206  Value *FirstOp = FirstInst->getOperand(i);
207  PHINode *NewPN = PHINode::Create(FirstOp->getType(), e,
208  FirstOp->getName()+".pn");
209  InsertNewInstBefore(NewPN, PN);
210 
211  NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
212  OperandPhis[i] = NewPN;
213  FixedOperands[i] = NewPN;
214  HasAnyPHIs = true;
215  }
216 
217 
218  // Add all operands to the new PHIs.
219  if (HasAnyPHIs) {
220  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
221  GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
222  BasicBlock *InBB = PN.getIncomingBlock(i);
223 
224  for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
225  if (PHINode *OpPhi = OperandPhis[op])
226  OpPhi->addIncoming(InGEP->getOperand(op), InBB);
227  }
228  }
229 
230  Value *Base = FixedOperands[0];
231  GetElementPtrInst *NewGEP =
233  makeArrayRef(FixedOperands).slice(1));
234  if (AllInBounds) NewGEP->setIsInBounds();
235  NewGEP->setDebugLoc(FirstInst->getDebugLoc());
236  return NewGEP;
237 }
238 
239 
240 /// Return true if we know that it is safe to sink the load out of the block
241 /// that defines it. This means that it must be obvious the value of the load is
242 /// not changed from the point of the load to the end of the block it is in.
243 ///
244 /// Finally, it is safe, but not profitable, to sink a load targeting a
245 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
246 /// to a register.
248  BasicBlock::iterator BBI = L, E = L->getParent()->end();
249 
250  for (++BBI; BBI != E; ++BBI)
251  if (BBI->mayWriteToMemory())
252  return false;
253 
254  // Check for non-address taken alloca. If not address-taken already, it isn't
255  // profitable to do this xform.
256  if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
257  bool isAddressTaken = false;
258  for (User *U : AI->users()) {
259  if (isa<LoadInst>(U)) continue;
260  if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
261  // If storing TO the alloca, then the address isn't taken.
262  if (SI->getOperand(1) == AI) continue;
263  }
264  isAddressTaken = true;
265  break;
266  }
267 
268  if (!isAddressTaken && AI->isStaticAlloca())
269  return false;
270  }
271 
272  // If this load is a load from a GEP with a constant offset from an alloca,
273  // then we don't want to sink it. In its present form, it will be
274  // load [constant stack offset]. Sinking it will cause us to have to
275  // materialize the stack addresses in each predecessor in a register only to
276  // do a shared load from register in the successor.
277  if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
278  if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
279  if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
280  return false;
281 
282  return true;
283 }
284 
285 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
286  LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
287 
288  // FIXME: This is overconservative; this transform is allowed in some cases
289  // for atomic operations.
290  if (FirstLI->isAtomic())
291  return nullptr;
292 
293  // When processing loads, we need to propagate two bits of information to the
294  // sunk load: whether it is volatile, and what its alignment is. We currently
295  // don't sink loads when some have their alignment specified and some don't.
296  // visitLoadInst will propagate an alignment onto the load when TD is around,
297  // and if TD isn't around, we can't handle the mixed case.
298  bool isVolatile = FirstLI->isVolatile();
299  unsigned LoadAlignment = FirstLI->getAlignment();
300  unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace();
301 
302  // We can't sink the load if the loaded value could be modified between the
303  // load and the PHI.
304  if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
306  return nullptr;
307 
308  // If the PHI is of volatile loads and the load block has multiple
309  // successors, sinking it would remove a load of the volatile value from
310  // the path through the other successor.
311  if (isVolatile &&
312  FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
313  return nullptr;
314 
315  // Check to see if all arguments are the same operation.
316  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
318  if (!LI || !LI->hasOneUse())
319  return nullptr;
320 
321  // We can't sink the load if the loaded value could be modified between
322  // the load and the PHI.
323  if (LI->isVolatile() != isVolatile ||
324  LI->getParent() != PN.getIncomingBlock(i) ||
325  LI->getPointerAddressSpace() != LoadAddrSpace ||
327  return nullptr;
328 
329  // If some of the loads have an alignment specified but not all of them,
330  // we can't do the transformation.
331  if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
332  return nullptr;
333 
334  LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
335 
336  // If the PHI is of volatile loads and the load block has multiple
337  // successors, sinking it would remove a load of the volatile value from
338  // the path through the other successor.
339  if (isVolatile &&
340  LI->getParent()->getTerminator()->getNumSuccessors() != 1)
341  return nullptr;
342  }
343 
344  // Okay, they are all the same operation. Create a new PHI node of the
345  // correct type, and PHI together all of the LHS's of the instructions.
346  PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
348  PN.getName()+".in");
349 
350  Value *InVal = FirstLI->getOperand(0);
351  NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
352 
353  // Add all operands to the new PHI.
354  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
355  Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
356  if (NewInVal != InVal)
357  InVal = nullptr;
358  NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
359  }
360 
361  Value *PhiVal;
362  if (InVal) {
363  // The new PHI unions all of the same values together. This is really
364  // common, so we handle it intelligently here for compile-time speed.
365  PhiVal = InVal;
366  delete NewPN;
367  } else {
368  InsertNewInstBefore(NewPN, PN);
369  PhiVal = NewPN;
370  }
371 
372  // If this was a volatile load that we are merging, make sure to loop through
373  // and mark all the input loads as non-volatile. If we don't do this, we will
374  // insert a new volatile load and the old ones will not be deletable.
375  if (isVolatile)
376  for (Value *IncValue : PN.incoming_values())
377  cast<LoadInst>(IncValue)->setVolatile(false);
378 
379  LoadInst *NewLI = new LoadInst(PhiVal, "", isVolatile, LoadAlignment);
380  NewLI->setDebugLoc(FirstLI->getDebugLoc());
381  return NewLI;
382 }
383 
384 
385 
386 /// If all operands to a PHI node are the same "unary" operator and they all are
387 /// only used by the PHI, PHI together their inputs, and do the operation once,
388 /// to the result of the PHI.
389 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
390  Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
391 
392  if (isa<GetElementPtrInst>(FirstInst))
393  return FoldPHIArgGEPIntoPHI(PN);
394  if (isa<LoadInst>(FirstInst))
395  return FoldPHIArgLoadIntoPHI(PN);
396 
397  // Scan the instruction, looking for input operations that can be folded away.
398  // If all input operands to the phi are the same instruction (e.g. a cast from
399  // the same type or "+42") we can pull the operation through the PHI, reducing
400  // code size and simplifying code.
401  Constant *ConstantOp = nullptr;
402  Type *CastSrcTy = nullptr;
403  bool isNUW = false, isNSW = false, isExact = false;
404 
405  if (isa<CastInst>(FirstInst)) {
406  CastSrcTy = FirstInst->getOperand(0)->getType();
407 
408  // Be careful about transforming integer PHIs. We don't want to pessimize
409  // the code by turning an i32 into an i1293.
410  if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) {
411  if (!ShouldChangeType(PN.getType(), CastSrcTy))
412  return nullptr;
413  }
414  } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
415  // Can fold binop, compare or shift here if the RHS is a constant,
416  // otherwise call FoldPHIArgBinOpIntoPHI.
417  ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
418  if (!ConstantOp)
419  return FoldPHIArgBinOpIntoPHI(PN);
420 
421  if (OverflowingBinaryOperator *BO =
422  dyn_cast<OverflowingBinaryOperator>(FirstInst)) {
423  isNUW = BO->hasNoUnsignedWrap();
424  isNSW = BO->hasNoSignedWrap();
425  } else if (PossiblyExactOperator *PEO =
426  dyn_cast<PossiblyExactOperator>(FirstInst))
427  isExact = PEO->isExact();
428  } else {
429  return nullptr; // Cannot fold this operation.
430  }
431 
432  // Check to see if all arguments are the same operation.
433  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
435  if (!I || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
436  return nullptr;
437  if (CastSrcTy) {
438  if (I->getOperand(0)->getType() != CastSrcTy)
439  return nullptr; // Cast operation must match.
440  } else if (I->getOperand(1) != ConstantOp) {
441  return nullptr;
442  }
443 
444  if (isNUW)
445  isNUW = cast<OverflowingBinaryOperator>(I)->hasNoUnsignedWrap();
446  if (isNSW)
447  isNSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
448  if (isExact)
449  isExact = cast<PossiblyExactOperator>(I)->isExact();
450  }
451 
452  // Okay, they are all the same operation. Create a new PHI node of the
453  // correct type, and PHI together all of the LHS's of the instructions.
454  PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
456  PN.getName()+".in");
457 
458  Value *InVal = FirstInst->getOperand(0);
459  NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
460 
461  // Add all operands to the new PHI.
462  for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
463  Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
464  if (NewInVal != InVal)
465  InVal = nullptr;
466  NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
467  }
468 
469  Value *PhiVal;
470  if (InVal) {
471  // The new PHI unions all of the same values together. This is really
472  // common, so we handle it intelligently here for compile-time speed.
473  PhiVal = InVal;
474  delete NewPN;
475  } else {
476  InsertNewInstBefore(NewPN, PN);
477  PhiVal = NewPN;
478  }
479 
480  // Insert and return the new operation.
481  if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) {
482  CastInst *NewCI = CastInst::Create(FirstCI->getOpcode(), PhiVal,
483  PN.getType());
484  NewCI->setDebugLoc(FirstInst->getDebugLoc());
485  return NewCI;
486  }
487 
488  if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) {
489  BinOp = BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
490  if (isNUW) BinOp->setHasNoUnsignedWrap();
491  if (isNSW) BinOp->setHasNoSignedWrap();
492  if (isExact) BinOp->setIsExact();
493  BinOp->setDebugLoc(FirstInst->getDebugLoc());
494  return BinOp;
495  }
496 
497  CmpInst *CIOp = cast<CmpInst>(FirstInst);
498  CmpInst *NewCI = CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
499  PhiVal, ConstantOp);
500  NewCI->setDebugLoc(FirstInst->getDebugLoc());
501  return NewCI;
502 }
503 
504 /// Return true if this PHI node is only used by a PHI node cycle that is dead.
505 static bool DeadPHICycle(PHINode *PN,
506  SmallPtrSetImpl<PHINode*> &PotentiallyDeadPHIs) {
507  if (PN->use_empty()) return true;
508  if (!PN->hasOneUse()) return false;
509 
510  // Remember this node, and if we find the cycle, return.
511  if (!PotentiallyDeadPHIs.insert(PN).second)
512  return true;
513 
514  // Don't scan crazily complex things.
515  if (PotentiallyDeadPHIs.size() == 16)
516  return false;
517 
518  if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
519  return DeadPHICycle(PU, PotentiallyDeadPHIs);
520 
521  return false;
522 }
523 
524 /// Return true if this phi node is always equal to NonPhiInVal.
525 /// This happens with mutually cyclic phi nodes like:
526 /// z = some value; x = phi (y, z); y = phi (x, z)
527 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
528  SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
529  // See if we already saw this PHI node.
530  if (!ValueEqualPHIs.insert(PN).second)
531  return true;
532 
533  // Don't scan crazily complex things.
534  if (ValueEqualPHIs.size() == 16)
535  return false;
536 
537  // Scan the operands to see if they are either phi nodes or are equal to
538  // the value.
539  for (Value *Op : PN->incoming_values()) {
540  if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
541  if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
542  return false;
543  } else if (Op != NonPhiInVal)
544  return false;
545  }
546 
547  return true;
548 }
549 
550 
551 namespace {
552 struct PHIUsageRecord {
553  unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
554  unsigned Shift; // The amount shifted.
555  Instruction *Inst; // The trunc instruction.
556 
557  PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
558  : PHIId(pn), Shift(Sh), Inst(User) {}
559 
560  bool operator<(const PHIUsageRecord &RHS) const {
561  if (PHIId < RHS.PHIId) return true;
562  if (PHIId > RHS.PHIId) return false;
563  if (Shift < RHS.Shift) return true;
564  if (Shift > RHS.Shift) return false;
565  return Inst->getType()->getPrimitiveSizeInBits() <
566  RHS.Inst->getType()->getPrimitiveSizeInBits();
567  }
568 };
569 
570 struct LoweredPHIRecord {
571  PHINode *PN; // The PHI that was lowered.
572  unsigned Shift; // The amount shifted.
573  unsigned Width; // The width extracted.
574 
575  LoweredPHIRecord(PHINode *pn, unsigned Sh, Type *Ty)
576  : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
577 
578  // Ctor form used by DenseMap.
579  LoweredPHIRecord(PHINode *pn, unsigned Sh)
580  : PN(pn), Shift(Sh), Width(0) {}
581 };
582 }
583 
584 namespace llvm {
585  template<>
586  struct DenseMapInfo<LoweredPHIRecord> {
587  static inline LoweredPHIRecord getEmptyKey() {
588  return LoweredPHIRecord(nullptr, 0);
589  }
590  static inline LoweredPHIRecord getTombstoneKey() {
591  return LoweredPHIRecord(nullptr, 1);
592  }
593  static unsigned getHashValue(const LoweredPHIRecord &Val) {
594  return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
595  (Val.Width>>3);
596  }
597  static bool isEqual(const LoweredPHIRecord &LHS,
598  const LoweredPHIRecord &RHS) {
599  return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
600  LHS.Width == RHS.Width;
601  }
602  };
603 }
604 
605 
606 /// This is an integer PHI and we know that it has an illegal type: see if it is
607 /// only used by trunc or trunc(lshr) operations. If so, we split the PHI into
608 /// the various pieces being extracted. This sort of thing is introduced when
609 /// SROA promotes an aggregate to large integer values.
610 ///
611 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
612 /// inttoptr. We should produce new PHIs in the right type.
613 ///
615  // PHIUsers - Keep track of all of the truncated values extracted from a set
616  // of PHIs, along with their offset. These are the things we want to rewrite.
618 
619  // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
620  // nodes which are extracted from. PHIsToSlice is a set we use to avoid
621  // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
622  // check the uses of (to ensure they are all extracts).
623  SmallVector<PHINode*, 8> PHIsToSlice;
624  SmallPtrSet<PHINode*, 8> PHIsInspected;
625 
626  PHIsToSlice.push_back(&FirstPhi);
627  PHIsInspected.insert(&FirstPhi);
628 
629  for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
630  PHINode *PN = PHIsToSlice[PHIId];
631 
632  // Scan the input list of the PHI. If any input is an invoke, and if the
633  // input is defined in the predecessor, then we won't be split the critical
634  // edge which is required to insert a truncate. Because of this, we have to
635  // bail out.
636  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
638  if (!II) continue;
639  if (II->getParent() != PN->getIncomingBlock(i))
640  continue;
641 
642  // If we have a phi, and if it's directly in the predecessor, then we have
643  // a critical edge where we need to put the truncate. Since we can't
644  // split the edge in instcombine, we have to bail out.
645  return nullptr;
646  }
647 
648  for (User *U : PN->users()) {
649  Instruction *UserI = cast<Instruction>(U);
650 
651  // If the user is a PHI, inspect its uses recursively.
652  if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
653  if (PHIsInspected.insert(UserPN).second)
654  PHIsToSlice.push_back(UserPN);
655  continue;
656  }
657 
658  // Truncates are always ok.
659  if (isa<TruncInst>(UserI)) {
660  PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
661  continue;
662  }
663 
664  // Otherwise it must be a lshr which can only be used by one trunc.
665  if (UserI->getOpcode() != Instruction::LShr ||
666  !UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
667  !isa<ConstantInt>(UserI->getOperand(1)))
668  return nullptr;
669 
670  unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
671  PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
672  }
673  }
674 
675  // If we have no users, they must be all self uses, just nuke the PHI.
676  if (PHIUsers.empty())
677  return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
678 
679  // If this phi node is transformable, create new PHIs for all the pieces
680  // extracted out of it. First, sort the users by their offset and size.
681  array_pod_sort(PHIUsers.begin(), PHIUsers.end());
682 
683  DEBUG(dbgs() << "SLICING UP PHI: " << FirstPhi << '\n';
684  for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
685  dbgs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] << '\n';
686  );
687 
688  // PredValues - This is a temporary used when rewriting PHI nodes. It is
689  // hoisted out here to avoid construction/destruction thrashing.
691 
692  // ExtractedVals - Each new PHI we introduce is saved here so we don't
693  // introduce redundant PHIs.
695 
696  for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
697  unsigned PHIId = PHIUsers[UserI].PHIId;
698  PHINode *PN = PHIsToSlice[PHIId];
699  unsigned Offset = PHIUsers[UserI].Shift;
700  Type *Ty = PHIUsers[UserI].Inst->getType();
701 
702  PHINode *EltPHI;
703 
704  // If we've already lowered a user like this, reuse the previously lowered
705  // value.
706  if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == nullptr) {
707 
708  // Otherwise, Create the new PHI node for this user.
709  EltPHI = PHINode::Create(Ty, PN->getNumIncomingValues(),
710  PN->getName()+".off"+Twine(Offset), PN);
711  assert(EltPHI->getType() != PN->getType() &&
712  "Truncate didn't shrink phi?");
713 
714  for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
715  BasicBlock *Pred = PN->getIncomingBlock(i);
716  Value *&PredVal = PredValues[Pred];
717 
718  // If we already have a value for this predecessor, reuse it.
719  if (PredVal) {
720  EltPHI->addIncoming(PredVal, Pred);
721  continue;
722  }
723 
724  // Handle the PHI self-reuse case.
725  Value *InVal = PN->getIncomingValue(i);
726  if (InVal == PN) {
727  PredVal = EltPHI;
728  EltPHI->addIncoming(PredVal, Pred);
729  continue;
730  }
731 
732  if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
733  // If the incoming value was a PHI, and if it was one of the PHIs we
734  // already rewrote it, just use the lowered value.
735  if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
736  PredVal = Res;
737  EltPHI->addIncoming(PredVal, Pred);
738  continue;
739  }
740  }
741 
742  // Otherwise, do an extract in the predecessor.
743  Builder->SetInsertPoint(Pred, Pred->getTerminator());
744  Value *Res = InVal;
745  if (Offset)
746  Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
747  Offset), "extract");
748  Res = Builder->CreateTrunc(Res, Ty, "extract.t");
749  PredVal = Res;
750  EltPHI->addIncoming(Res, Pred);
751 
752  // If the incoming value was a PHI, and if it was one of the PHIs we are
753  // rewriting, we will ultimately delete the code we inserted. This
754  // means we need to revisit that PHI to make sure we extract out the
755  // needed piece.
756  if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
757  if (PHIsInspected.count(OldInVal)) {
758  unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(),
759  OldInVal)-PHIsToSlice.begin();
760  PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
761  cast<Instruction>(Res)));
762  ++UserE;
763  }
764  }
765  PredValues.clear();
766 
767  DEBUG(dbgs() << " Made element PHI for offset " << Offset << ": "
768  << *EltPHI << '\n');
769  ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
770  }
771 
772  // Replace the use of this piece with the PHI node.
773  ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
774  }
775 
776  // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
777  // with undefs.
778  Value *Undef = UndefValue::get(FirstPhi.getType());
779  for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
780  ReplaceInstUsesWith(*PHIsToSlice[i], Undef);
781  return ReplaceInstUsesWith(FirstPhi, Undef);
782 }
783 
784 // PHINode simplification
785 //
787  if (Value *V = SimplifyInstruction(&PN, DL, TLI, DT, AC))
788  return ReplaceInstUsesWith(PN, V);
789 
790  // If all PHI operands are the same operation, pull them through the PHI,
791  // reducing code size.
792  if (isa<Instruction>(PN.getIncomingValue(0)) &&
793  isa<Instruction>(PN.getIncomingValue(1)) &&
794  cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
795  cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
796  // FIXME: The hasOneUse check will fail for PHIs that use the value more
797  // than themselves more than once.
798  PN.getIncomingValue(0)->hasOneUse())
799  if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
800  return Result;
801 
802  // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
803  // this PHI only has a single use (a PHI), and if that PHI only has one use (a
804  // PHI)... break the cycle.
805  if (PN.hasOneUse()) {
806  Instruction *PHIUser = cast<Instruction>(PN.user_back());
807  if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
808  SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
809  PotentiallyDeadPHIs.insert(&PN);
810  if (DeadPHICycle(PU, PotentiallyDeadPHIs))
811  return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
812  }
813 
814  // If this phi has a single use, and if that use just computes a value for
815  // the next iteration of a loop, delete the phi. This occurs with unused
816  // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
817  // common case here is good because the only other things that catch this
818  // are induction variable analysis (sometimes) and ADCE, which is only run
819  // late.
820  if (PHIUser->hasOneUse() &&
821  (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
822  PHIUser->user_back() == &PN) {
823  return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
824  }
825  }
826 
827  // We sometimes end up with phi cycles that non-obviously end up being the
828  // same value, for example:
829  // z = some value; x = phi (y, z); y = phi (x, z)
830  // where the phi nodes don't necessarily need to be in the same block. Do a
831  // quick check to see if the PHI node only contains a single non-phi value, if
832  // so, scan to see if the phi cycle is actually equal to that value.
833  {
834  unsigned InValNo = 0, NumIncomingVals = PN.getNumIncomingValues();
835  // Scan for the first non-phi operand.
836  while (InValNo != NumIncomingVals &&
837  isa<PHINode>(PN.getIncomingValue(InValNo)))
838  ++InValNo;
839 
840  if (InValNo != NumIncomingVals) {
841  Value *NonPhiInVal = PN.getIncomingValue(InValNo);
842 
843  // Scan the rest of the operands to see if there are any conflicts, if so
844  // there is no need to recursively scan other phis.
845  for (++InValNo; InValNo != NumIncomingVals; ++InValNo) {
846  Value *OpVal = PN.getIncomingValue(InValNo);
847  if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
848  break;
849  }
850 
851  // If we scanned over all operands, then we have one unique value plus
852  // phi values. Scan PHI nodes to see if they all merge in each other or
853  // the value.
854  if (InValNo == NumIncomingVals) {
855  SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
856  if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
857  return ReplaceInstUsesWith(PN, NonPhiInVal);
858  }
859  }
860  }
861 
862  // If there are multiple PHIs, sort their operands so that they all list
863  // the blocks in the same order. This will help identical PHIs be eliminated
864  // by other passes. Other passes shouldn't depend on this for correctness
865  // however.
866  PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
867  if (&PN != FirstPN)
868  for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
869  BasicBlock *BBA = PN.getIncomingBlock(i);
870  BasicBlock *BBB = FirstPN->getIncomingBlock(i);
871  if (BBA != BBB) {
872  Value *VA = PN.getIncomingValue(i);
873  unsigned j = PN.getBasicBlockIndex(BBB);
874  Value *VB = PN.getIncomingValue(j);
875  PN.setIncomingBlock(i, BBB);
876  PN.setIncomingValue(i, VB);
877  PN.setIncomingBlock(j, BBA);
878  PN.setIncomingValue(j, VA);
879  // NOTE: Instcombine normally would want us to "return &PN" if we
880  // modified any of the operands of an instruction. However, since we
881  // aren't adding or removing uses (just rearranging them) we don't do
882  // this in this case.
883  }
884  }
885 
886  // If this is an integer PHI and we know that it has an illegal type, see if
887  // it is only used by trunc or trunc(lshr) operations. If so, we split the
888  // PHI into the various pieces being extracted. This sort of thing is
889  // introduced when SROA promotes an aggregate to a single large integer type.
890  if (PN.getType()->isIntegerTy() &&
892  if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
893  return Res;
894 
895  return nullptr;
896 }
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Definition: IRBuilder.h:842
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:679
void addIncoming(Value *V, BasicBlock *BB)
addIncoming - Add an incoming value to the end of the PHI list
Type * getSourceElementType() const
Definition: Instructions.h:926
unsigned getNumOperands() const
Definition: User.h:138
static LoweredPHIRecord getEmptyKey()
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:276
LoadInst - an instruction for reading from memory.
Definition: Instructions.h:177
Hexagon Common GEP
#define op(i)
op_iterator op_begin()
Definition: User.h:183
static bool isSafeAndProfitableToSinkLoad(LoadInst *L)
Return true if we know that it is safe to sink the load out of the block that defines it...
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:188
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:242
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:231
static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal, SmallPtrSetImpl< PHINode * > &ValueEqualPHIs)
Return true if this phi node is always equal to NonPhiInVal.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:79
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:389
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
Definition: ArrayRef.h:308
void setHasNoUnsignedWrap(bool b=true)
Set or clear the nsw flag on this instruction, which must be an operator which supports this flag...
void setIsInBounds(bool b=true)
setIsInBounds - Set or clear the inbounds flag on this GEP instruction.
Number of individual test Apply this number of consecutive mutations to each input exit after the first new interesting input is found the minimized corpus is saved into the first input directory Number of jobs to run If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
Instruction * SliceUpIllegalIntegerPHI(PHINode &PN)
This is an integer PHI and we know that it has an illegal type: see if it is only used by trunc or tr...
static bool isEqual(const LoweredPHIRecord &LHS, const LoweredPHIRecord &RHS)
static bool DeadPHICycle(PHINode *PN, SmallPtrSetImpl< PHINode * > &PotentiallyDeadPHIs)
Return true if this PHI node is only used by a PHI node cycle that is dead.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
Definition: SmallVector.h:57
StoreInst - an instruction for storing to memory.
Definition: Instructions.h:316
bool isAtomic() const
isAtomic - Return true if this instruction has an AtomicOrdering of unordered or higher.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block...
Definition: IRBuilder.h:85
bool isInBounds() const
isInBounds - Determine whether the GEP has the inbounds flag.
unsigned getNumIncomingValues() const
getNumIncomingValues - Return the number of incoming edges
unsigned getNumSuccessors() const
Return the number of successors that this terminator has.
Definition: InstrTypes.h:57
GetElementPtrInst - an instruction for type-safe pointer arithmetic to access elements of arrays and ...
Definition: Instructions.h:830
void array_pod_sort(IteratorTy Start, IteratorTy End)
array_pod_sort - This sorts an array with the specified start and end extent.
Definition: STLExtras.h:287
static CmpInst * Create(OtherOps Op, unsigned short predicate, Value *S1, Value *S2, const Twine &Name="", Instruction *InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate and the two operands.
void setDebugLoc(DebugLoc Loc)
setDebugLoc - Set the debug location information for this instruction.
Definition: Instruction.h:227
LLVM Basic Block Representation.
Definition: BasicBlock.h:65
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
size_type size() const
Definition: SmallPtrSet.h:79
This is an important base class in LLVM.
Definition: Constant.h:41
Instruction * ReplaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:264
const DebugLoc & getDebugLoc() const
getDebugLoc - Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:230
A udiv or sdiv instruction, which can be marked as "exact", indicating that no bits are destroyed...
Definition: Operator.h:126
op_iterator op_end()
Definition: User.h:185
BasicBlock * getIncomingBlock(unsigned i) const
getIncomingBlock - Return incoming basic block number i.
Utility class for integer arithmetic operators which may exhibit overflow - Add, Sub, and Mul.
Definition: Operator.h:74
Value * getOperand(unsigned i) const
Definition: User.h:118
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:760
static UndefValue * get(Type *T)
get() - Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1473
bool hasAllConstantIndices() const
hasAllConstantIndices - Return true if all of the indices of this GEP are constant integers...
Instruction * visitPHINode(PHINode &PN)
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Definition: Instructions.h:854
BinaryOps getOpcode() const
Definition: InstrTypes.h:323
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
Definition: SmallPtrSet.h:299
void setIncomingBlock(unsigned i, BasicBlock *BB)
Value * getIncomingValue(unsigned i) const
getIncomingValue - Return incoming value number x
iterator end()
Definition: BasicBlock.h:233
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:222
Instruction * user_back()
user_back - Specialize the methods defined in Value, as we know that an instruction can only be used ...
Definition: Instruction.h:69
bool isVolatile() const
isVolatile - Return true if this is a load from a volatile memory location.
Definition: Instructions.h:232
SequentialType * getType() const
Definition: Instructions.h:922
static LoweredPHIRecord getTombstoneKey()
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Definition: Constants.cpp:582
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Definition: Instructions.h:289
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:123
bool isIntegerTy() const
isIntegerTy - True if this is an instance of IntegerType.
Definition: Type.h:193
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name=Twine(), Instruction *InsertBefore=nullptr)
Construct a binary instruction, given the opcode and the two operands.
iterator_range< user_iterator > users()
Definition: Value.h:300
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:285
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name="", Instruction *InsertBefore=nullptr)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
bool isLegalInteger(unsigned Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU...
Definition: DataLayout.h:239
unsigned getAlignment() const
getAlignment - Return the alignment of the access that is being performed
Definition: Instructions.h:243
#define I(x, y, z)
Definition: MD5.cpp:54
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:124
bool hasOneUse() const
Return true if there is exactly one user of this value.
Definition: Value.h:311
Instruction * InsertNewInstBefore(Instruction *New, Instruction &Old)
Inserts an instruction New before instruction Old.
OtherOps getOpcode() const
Get the opcode casted to the right type.
Definition: InstrTypes.h:755
bool use_empty() const
Definition: Value.h:275
bool operator<(int64_t V1, const APSInt &V2)
Definition: APSInt.h:332
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
getPrimitiveSizeInBits - Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:121
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1189
LLVM Value Representation.
Definition: Value.h:69
This file provides internal interfaces used to implement the InstCombine.
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:112
static unsigned getHashValue(const LoweredPHIRecord &Val)
InvokeInst - Invoke instruction.
#define DEBUG(X)
Definition: Debug.h:92
Value * SimplifyInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr)
SimplifyInstruction - See if we can compute a simplified version of this instruction.
bool isSameOperationAs(const Instruction *I, unsigned flags=0) const
This function determines if the specified instruction executes the same operation as the current one...
static bool isVolatile(Instruction *Inst)
void setIncomingValue(unsigned i, Value *V)
op_range incoming_values()
int getBasicBlockIndex(const BasicBlock *BB) const
getBasicBlockIndex - Return the first index of the specified basic block in the value list for this P...
const BasicBlock * getParent() const
Definition: Instruction.h:72
AllocaInst - an instruction to allocate memory on the stack.
Definition: Instructions.h:76