LLVM  3.7.0
CodeGen/Analysis.cpp
Go to the documentation of this file.
1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines several CodeGen-specific LLVM IR analysis utilities.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
30 
31 using namespace llvm;
32 
33 /// Compute the linearized index of a member in a nested aggregate/struct/array
34 /// by recursing and accumulating CurIndex as long as there are indices in the
35 /// index list.
37  const unsigned *Indices,
38  const unsigned *IndicesEnd,
39  unsigned CurIndex) {
40  // Base case: We're done.
41  if (Indices && Indices == IndicesEnd)
42  return CurIndex;
43 
44  // Given a struct type, recursively traverse the elements.
45  if (StructType *STy = dyn_cast<StructType>(Ty)) {
46  for (StructType::element_iterator EB = STy->element_begin(),
47  EI = EB,
48  EE = STy->element_end();
49  EI != EE; ++EI) {
50  if (Indices && *Indices == unsigned(EI - EB))
51  return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
52  CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
53  }
54  assert(!Indices && "Unexpected out of bound");
55  return CurIndex;
56  }
57  // Given an array type, recursively traverse the elements.
58  else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
59  Type *EltTy = ATy->getElementType();
60  unsigned NumElts = ATy->getNumElements();
61  // Compute the Linear offset when jumping one element of the array
62  unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
63  if (Indices) {
64  assert(*Indices < NumElts && "Unexpected out of bound");
65  // If the indice is inside the array, compute the index to the requested
66  // elt and recurse inside the element with the end of the indices list
67  CurIndex += EltLinearOffset* *Indices;
68  return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
69  }
70  CurIndex += EltLinearOffset*NumElts;
71  return CurIndex;
72  }
73  // We haven't found the type we're looking for, so keep searching.
74  return CurIndex + 1;
75 }
76 
77 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
78 /// EVTs that represent all the individual underlying
79 /// non-aggregate types that comprise it.
80 ///
81 /// If Offsets is non-null, it points to a vector to be filled in
82 /// with the in-memory offsets of each of the individual values.
83 ///
84 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
85  Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
87  uint64_t StartingOffset) {
88  // Given a struct type, recursively traverse the elements.
89  if (StructType *STy = dyn_cast<StructType>(Ty)) {
90  const StructLayout *SL = DL.getStructLayout(STy);
91  for (StructType::element_iterator EB = STy->element_begin(),
92  EI = EB,
93  EE = STy->element_end();
94  EI != EE; ++EI)
95  ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
96  StartingOffset + SL->getElementOffset(EI - EB));
97  return;
98  }
99  // Given an array type, recursively traverse the elements.
100  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
101  Type *EltTy = ATy->getElementType();
102  uint64_t EltSize = DL.getTypeAllocSize(EltTy);
103  for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
104  ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
105  StartingOffset + i * EltSize);
106  return;
107  }
108  // Interpret void as zero return values.
109  if (Ty->isVoidTy())
110  return;
111  // Base case: we can get an EVT for this LLVM IR type.
112  ValueVTs.push_back(TLI.getValueType(DL, Ty));
113  if (Offsets)
114  Offsets->push_back(StartingOffset);
115 }
116 
117 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
119  V = V->stripPointerCasts();
120  GlobalValue *GV = dyn_cast<GlobalValue>(V);
122 
123  if (Var && Var->getName() == "llvm.eh.catch.all.value") {
124  assert(Var->hasInitializer() &&
125  "The EH catch-all value must have an initializer");
126  Value *Init = Var->getInitializer();
127  GV = dyn_cast<GlobalValue>(Init);
128  if (!GV) V = cast<ConstantPointerNull>(Init);
129  }
130 
131  assert((GV || isa<ConstantPointerNull>(V)) &&
132  "TypeInfo must be a global variable or NULL");
133  return GV;
134 }
135 
136 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
137 /// processed uses a memory 'm' constraint.
138 bool
140  const TargetLowering &TLI) {
141  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
142  InlineAsm::ConstraintInfo &CI = CInfos[i];
143  for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
145  if (CType == TargetLowering::C_Memory)
146  return true;
147  }
148 
149  // Indirect operand accesses access memory.
150  if (CI.isIndirect)
151  return true;
152  }
153 
154  return false;
155 }
156 
157 /// getFCmpCondCode - Return the ISD condition code corresponding to
158 /// the given LLVM IR floating-point condition code. This includes
159 /// consideration of global floating-point math flags.
160 ///
162  switch (Pred) {
163  case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
164  case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
165  case FCmpInst::FCMP_OGT: return ISD::SETOGT;
166  case FCmpInst::FCMP_OGE: return ISD::SETOGE;
167  case FCmpInst::FCMP_OLT: return ISD::SETOLT;
168  case FCmpInst::FCMP_OLE: return ISD::SETOLE;
169  case FCmpInst::FCMP_ONE: return ISD::SETONE;
170  case FCmpInst::FCMP_ORD: return ISD::SETO;
171  case FCmpInst::FCMP_UNO: return ISD::SETUO;
172  case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
173  case FCmpInst::FCMP_UGT: return ISD::SETUGT;
174  case FCmpInst::FCMP_UGE: return ISD::SETUGE;
175  case FCmpInst::FCMP_ULT: return ISD::SETULT;
176  case FCmpInst::FCMP_ULE: return ISD::SETULE;
177  case FCmpInst::FCMP_UNE: return ISD::SETUNE;
178  case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
179  default: llvm_unreachable("Invalid FCmp predicate opcode!");
180  }
181 }
182 
184  switch (CC) {
185  case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
186  case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
187  case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
188  case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
189  case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
190  case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
191  default: return CC;
192  }
193 }
194 
195 /// getICmpCondCode - Return the ISD condition code corresponding to
196 /// the given LLVM IR integer condition code.
197 ///
199  switch (Pred) {
200  case ICmpInst::ICMP_EQ: return ISD::SETEQ;
201  case ICmpInst::ICMP_NE: return ISD::SETNE;
202  case ICmpInst::ICMP_SLE: return ISD::SETLE;
203  case ICmpInst::ICMP_ULE: return ISD::SETULE;
204  case ICmpInst::ICMP_SGE: return ISD::SETGE;
205  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
206  case ICmpInst::ICMP_SLT: return ISD::SETLT;
207  case ICmpInst::ICMP_ULT: return ISD::SETULT;
208  case ICmpInst::ICMP_SGT: return ISD::SETGT;
209  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
210  default:
211  llvm_unreachable("Invalid ICmp predicate opcode!");
212  }
213 }
214 
215 static bool isNoopBitcast(Type *T1, Type *T2,
216  const TargetLoweringBase& TLI) {
217  return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
218  (isa<VectorType>(T1) && isa<VectorType>(T2) &&
219  TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
220 }
221 
222 /// Look through operations that will be free to find the earliest source of
223 /// this value.
224 ///
225 /// @param ValLoc If V has aggegate type, we will be interested in a particular
226 /// scalar component. This records its address; the reverse of this list gives a
227 /// sequence of indices appropriate for an extractvalue to locate the important
228 /// value. This value is updated during the function and on exit will indicate
229 /// similar information for the Value returned.
230 ///
231 /// @param DataBits If this function looks through truncate instructions, this
232 /// will record the smallest size attained.
233 static const Value *getNoopInput(const Value *V,
235  unsigned &DataBits,
236  const TargetLoweringBase &TLI,
237  const DataLayout &DL) {
238  while (true) {
239  // Try to look through V1; if V1 is not an instruction, it can't be looked
240  // through.
241  const Instruction *I = dyn_cast<Instruction>(V);
242  if (!I || I->getNumOperands() == 0) return V;
243  const Value *NoopInput = nullptr;
244 
245  Value *Op = I->getOperand(0);
246  if (isa<BitCastInst>(I)) {
247  // Look through truly no-op bitcasts.
248  if (isNoopBitcast(Op->getType(), I->getType(), TLI))
249  NoopInput = Op;
250  } else if (isa<GetElementPtrInst>(I)) {
251  // Look through getelementptr
252  if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
253  NoopInput = Op;
254  } else if (isa<IntToPtrInst>(I)) {
255  // Look through inttoptr.
256  // Make sure this isn't a truncating or extending cast. We could
257  // support this eventually, but don't bother for now.
258  if (!isa<VectorType>(I->getType()) &&
259  DL.getPointerSizeInBits() ==
260  cast<IntegerType>(Op->getType())->getBitWidth())
261  NoopInput = Op;
262  } else if (isa<PtrToIntInst>(I)) {
263  // Look through ptrtoint.
264  // Make sure this isn't a truncating or extending cast. We could
265  // support this eventually, but don't bother for now.
266  if (!isa<VectorType>(I->getType()) &&
267  DL.getPointerSizeInBits() ==
268  cast<IntegerType>(I->getType())->getBitWidth())
269  NoopInput = Op;
270  } else if (isa<TruncInst>(I) &&
271  TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
272  DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
273  NoopInput = Op;
274  } else if (isa<CallInst>(I)) {
275  // Look through call (skipping callee)
276  for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 1;
277  i != e; ++i) {
278  unsigned attrInd = i - I->op_begin() + 1;
279  if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
280  isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
281  NoopInput = *i;
282  break;
283  }
284  }
285  } else if (isa<InvokeInst>(I)) {
286  // Look through invoke (skipping BB, BB, Callee)
287  for (User::const_op_iterator i = I->op_begin(), e = I->op_end() - 3;
288  i != e; ++i) {
289  unsigned attrInd = i - I->op_begin() + 1;
290  if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
291  isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
292  NoopInput = *i;
293  break;
294  }
295  }
296  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
297  // Value may come from either the aggregate or the scalar
298  ArrayRef<unsigned> InsertLoc = IVI->getIndices();
299  if (ValLoc.size() >= InsertLoc.size() &&
300  std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
301  // The type being inserted is a nested sub-type of the aggregate; we
302  // have to remove those initial indices to get the location we're
303  // interested in for the operand.
304  ValLoc.resize(ValLoc.size() - InsertLoc.size());
305  NoopInput = IVI->getInsertedValueOperand();
306  } else {
307  // The struct we're inserting into has the value we're interested in, no
308  // change of address.
309  NoopInput = Op;
310  }
311  } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
312  // The part we're interested in will inevitably be some sub-section of the
313  // previous aggregate. Combine the two paths to obtain the true address of
314  // our element.
315  ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
316  ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
317  NoopInput = Op;
318  }
319  // Terminate if we couldn't find anything to look through.
320  if (!NoopInput)
321  return V;
322 
323  V = NoopInput;
324  }
325 }
326 
327 /// Return true if this scalar return value only has bits discarded on its path
328 /// from the "tail call" to the "ret". This includes the obvious noop
329 /// instructions handled by getNoopInput above as well as free truncations (or
330 /// extensions prior to the call).
331 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
332  SmallVectorImpl<unsigned> &RetIndices,
333  SmallVectorImpl<unsigned> &CallIndices,
334  bool AllowDifferingSizes,
335  const TargetLoweringBase &TLI,
336  const DataLayout &DL) {
337 
338  // Trace the sub-value needed by the return value as far back up the graph as
339  // possible, in the hope that it will intersect with the value produced by the
340  // call. In the simple case with no "returned" attribute, the hope is actually
341  // that we end up back at the tail call instruction itself.
342  unsigned BitsRequired = UINT_MAX;
343  RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
344 
345  // If this slot in the value returned is undef, it doesn't matter what the
346  // call puts there, it'll be fine.
347  if (isa<UndefValue>(RetVal))
348  return true;
349 
350  // Now do a similar search up through the graph to find where the value
351  // actually returned by the "tail call" comes from. In the simple case without
352  // a "returned" attribute, the search will be blocked immediately and the loop
353  // a Noop.
354  unsigned BitsProvided = UINT_MAX;
355  CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
356 
357  // There's no hope if we can't actually trace them to (the same part of!) the
358  // same value.
359  if (CallVal != RetVal || CallIndices != RetIndices)
360  return false;
361 
362  // However, intervening truncates may have made the call non-tail. Make sure
363  // all the bits that are needed by the "ret" have been provided by the "tail
364  // call". FIXME: with sufficiently cunning bit-tracking, we could look through
365  // extensions too.
366  if (BitsProvided < BitsRequired ||
367  (!AllowDifferingSizes && BitsProvided != BitsRequired))
368  return false;
369 
370  return true;
371 }
372 
373 /// For an aggregate type, determine whether a given index is within bounds or
374 /// not.
375 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
376  if (ArrayType *AT = dyn_cast<ArrayType>(T))
377  return Idx < AT->getNumElements();
378 
379  return Idx < cast<StructType>(T)->getNumElements();
380 }
381 
382 /// Move the given iterators to the next leaf type in depth first traversal.
383 ///
384 /// Performs a depth-first traversal of the type as specified by its arguments,
385 /// stopping at the next leaf node (which may be a legitimate scalar type or an
386 /// empty struct or array).
387 ///
388 /// @param SubTypes List of the partial components making up the type from
389 /// outermost to innermost non-empty aggregate. The element currently
390 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
391 ///
392 /// @param Path Set of extractvalue indices leading from the outermost type
393 /// (SubTypes[0]) to the leaf node currently represented.
394 ///
395 /// @returns true if a new type was found, false otherwise. Calling this
396 /// function again on a finished iterator will repeatedly return
397 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
398 /// aggregate or a non-aggregate
401  // First march back up the tree until we can successfully increment one of the
402  // coordinates in Path.
403  while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
404  Path.pop_back();
405  SubTypes.pop_back();
406  }
407 
408  // If we reached the top, then the iterator is done.
409  if (Path.empty())
410  return false;
411 
412  // We know there's *some* valid leaf now, so march back down the tree picking
413  // out the left-most element at each node.
414  ++Path.back();
415  Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
416  while (DeeperType->isAggregateType()) {
417  CompositeType *CT = cast<CompositeType>(DeeperType);
418  if (!indexReallyValid(CT, 0))
419  return true;
420 
421  SubTypes.push_back(CT);
422  Path.push_back(0);
423 
424  DeeperType = CT->getTypeAtIndex(0U);
425  }
426 
427  return true;
428 }
429 
430 /// Find the first non-empty, scalar-like type in Next and setup the iterator
431 /// components.
432 ///
433 /// Assuming Next is an aggregate of some kind, this function will traverse the
434 /// tree from left to right (i.e. depth-first) looking for the first
435 /// non-aggregate type which will play a role in function return.
436 ///
437 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
438 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
439 /// i32 in that type.
440 static bool firstRealType(Type *Next,
443  // First initialise the iterator components to the first "leaf" node
444  // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
445  // despite nominally being an aggregate).
446  while (Next->isAggregateType() &&
447  indexReallyValid(cast<CompositeType>(Next), 0)) {
448  SubTypes.push_back(cast<CompositeType>(Next));
449  Path.push_back(0);
450  Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
451  }
452 
453  // If there's no Path now, Next was originally scalar already (or empty
454  // leaf). We're done.
455  if (Path.empty())
456  return true;
457 
458  // Otherwise, use normal iteration to keep looking through the tree until we
459  // find a non-aggregate type.
460  while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
461  if (!advanceToNextLeafType(SubTypes, Path))
462  return false;
463  }
464 
465  return true;
466 }
467 
468 /// Set the iterator data-structures to the next non-empty, non-aggregate
469 /// subtype.
472  do {
473  if (!advanceToNextLeafType(SubTypes, Path))
474  return false;
475 
476  assert(!Path.empty() && "found a leaf but didn't set the path?");
477  } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
478 
479  return true;
480 }
481 
482 
483 /// Test if the given instruction is in a position to be optimized
484 /// with a tail-call. This roughly means that it's in a block with
485 /// a return and there's nothing that needs to be scheduled
486 /// between it and the return.
487 ///
488 /// This function only tests target-independent requirements.
490  const Instruction *I = CS.getInstruction();
491  const BasicBlock *ExitBB = I->getParent();
492  const TerminatorInst *Term = ExitBB->getTerminator();
493  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
494 
495  // The block must end in a return statement or unreachable.
496  //
497  // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
498  // an unreachable, for now. The way tailcall optimization is currently
499  // implemented means it will add an epilogue followed by a jump. That is
500  // not profitable. Also, if the callee is a special function (e.g.
501  // longjmp on x86), it can end up causing miscompilation that has not
502  // been fully understood.
503  if (!Ret &&
504  (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
505  return false;
506 
507  // If I will have a chain, make sure no other instruction that will have a
508  // chain interposes between I and the return.
509  if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
511  for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
512  if (&*BBI == I)
513  break;
514  // Debug info intrinsics do not get in the way of tail call optimization.
515  if (isa<DbgInfoIntrinsic>(BBI))
516  continue;
517  if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
519  return false;
520  }
521 
522  const Function *F = ExitBB->getParent();
524  F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
525 }
526 
528  const Instruction *I,
529  const ReturnInst *Ret,
530  const TargetLoweringBase &TLI) {
531  // If the block ends with a void return or unreachable, it doesn't matter
532  // what the call's return type is.
533  if (!Ret || Ret->getNumOperands() == 0) return true;
534 
535  // If the return value is undef, it doesn't matter what the call's
536  // return type is.
537  if (isa<UndefValue>(Ret->getOperand(0))) return true;
538 
539  // Make sure the attributes attached to each return are compatible.
540  AttrBuilder CallerAttrs(F->getAttributes(),
542  AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
544 
545  // Noalias is completely benign as far as calling convention goes, it
546  // shouldn't affect whether the call is a tail call.
547  CallerAttrs = CallerAttrs.removeAttribute(Attribute::NoAlias);
548  CalleeAttrs = CalleeAttrs.removeAttribute(Attribute::NoAlias);
549 
550  bool AllowDifferingSizes = true;
551  if (CallerAttrs.contains(Attribute::ZExt)) {
552  if (!CalleeAttrs.contains(Attribute::ZExt))
553  return false;
554 
555  AllowDifferingSizes = false;
556  CallerAttrs.removeAttribute(Attribute::ZExt);
557  CalleeAttrs.removeAttribute(Attribute::ZExt);
558  } else if (CallerAttrs.contains(Attribute::SExt)) {
559  if (!CalleeAttrs.contains(Attribute::SExt))
560  return false;
561 
562  AllowDifferingSizes = false;
563  CallerAttrs.removeAttribute(Attribute::SExt);
564  CalleeAttrs.removeAttribute(Attribute::SExt);
565  }
566 
567  // If they're still different, there's some facet we don't understand
568  // (currently only "inreg", but in future who knows). It may be OK but the
569  // only safe option is to reject the tail call.
570  if (CallerAttrs != CalleeAttrs)
571  return false;
572 
573  const Value *RetVal = Ret->getOperand(0), *CallVal = I;
574  SmallVector<unsigned, 4> RetPath, CallPath;
575  SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
576 
577  bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
578  bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
579 
580  // Nothing's actually returned, it doesn't matter what the callee put there
581  // it's a valid tail call.
582  if (RetEmpty)
583  return true;
584 
585  // Iterate pairwise through each of the value types making up the tail call
586  // and the corresponding return. For each one we want to know whether it's
587  // essentially going directly from the tail call to the ret, via operations
588  // that end up not generating any code.
589  //
590  // We allow a certain amount of covariance here. For example it's permitted
591  // for the tail call to define more bits than the ret actually cares about
592  // (e.g. via a truncate).
593  do {
594  if (CallEmpty) {
595  // We've exhausted the values produced by the tail call instruction, the
596  // rest are essentially undef. The type doesn't really matter, but we need
597  // *something*.
598  Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
599  CallVal = UndefValue::get(SlotType);
600  }
601 
602  // The manipulations performed when we're looking through an insertvalue or
603  // an extractvalue would happen at the front of the RetPath list, so since
604  // we have to copy it anyway it's more efficient to create a reversed copy.
605  SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
606  SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
607 
608  // Finally, we can check whether the value produced by the tail call at this
609  // index is compatible with the value we return.
610  if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
611  AllowDifferingSizes, TLI,
612  F->getParent()->getDataLayout()))
613  return false;
614 
615  CallEmpty = !nextRealType(CallSubTypes, CallPath);
616  } while(nextRealType(RetSubTypes, RetPath));
617 
618  return true;
619 }
620 
622  if (!GV->hasLinkOnceODRLinkage())
623  return false;
624 
625  if (GV->hasUnnamedAddr())
626  return true;
627 
628  // If it is a non constant variable, it needs to be uniqued across shared
629  // objects.
630  if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
631  if (!Var->isConstant())
632  return false;
633  }
634 
635  // An alias can point to a variable. We could try to resolve the alias to
636  // decide, but for now just don't hide them.
637  if (isa<GlobalAlias>(GV))
638  return false;
639 
641  if (GlobalStatus::analyzeGlobal(GV, GS))
642  return false;
643 
644  return !GS.IsCompared;
645 }
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type (if unknown returns 0).
ReturnInst - Return a value (possibly void), from a function.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:104
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
ExtractValueInst - This instruction extracts a struct member or array element value from an aggregate...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
Sign extended before/after call.
Definition: Attributes.h:105
InstrTy * getInstruction() const
Definition: CallSite.h:82
bool canBeOmittedFromSymbolTable(const GlobalValue *GV)
unsigned getNumOperands() const
Definition: User.h:138
virtual bool allowTruncateForTailCall(Type *, Type *) const
Return true if a truncation from Ty1 to Ty2 is permitted when deciding whether a call is in tail posi...
iterator end() const
Definition: ArrayRef.h:123
unsigned less or equal
Definition: InstrTypes.h:723
static const Value * getNoopInput(const Value *V, SmallVectorImpl< unsigned > &ValLoc, unsigned &DataBits, const TargetLoweringBase &TLI, const DataLayout &DL)
Look through operations that will be free to find the earliest source of this value.
unsigned less than
Definition: InstrTypes.h:722
bool mayHaveSideEffects() const
mayHaveSideEffects - Return true if the instruction may have side effects.
Definition: Instruction.h:387
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:703
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:378
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:713
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:111
F(f)
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
op_iterator op_begin()
Definition: User.h:183
bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI)
Test if given that the input instruction is in the tail call position if the return type or any attri...
ConstraintCodeVector Codes
Code - The constraint code, either the register name (in braces) or the constraint letter/number...
Definition: InlineAsm.h:148
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:708
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:475
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:707
Type::subtype_iterator element_iterator
Definition: DerivedTypes.h:278
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:551
StructType - Class to represent struct types.
Definition: DerivedTypes.h:191
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
A Use represents the edge between a Value definition and its users.
Definition: Use.h:69
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APInt.h:33
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:704
Number of individual test Apply this number of consecutive mutations to each input exit after the first new interesting input is found the minimized corpus is saved into the first input directory Number of jobs to run If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool mayReadFromMemory() const
mayReadFromMemory - Return true if this instruction may read memory.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
Definition: SmallVector.h:57
reverse_iterator rbegin() const
Definition: ArrayRef.h:125
#define T
ArrayType - Class to represent array types.
Definition: DerivedTypes.h:336
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:804
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
static bool nextRealType(SmallVectorImpl< CompositeType * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Set the iterator data-structures to the next non-empty, non-aggregate subtype.
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:134
Considered to not alias after call.
Definition: Attributes.h:83
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, const TargetLowering &TLI)
hasInlineAsmMemConstraint - Return true if the inline asm instruction being processed uses a memory '...
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:491
static bool indexReallyValid(CompositeType *T, unsigned Idx)
For an aggregate type, determine whether a given index is within bounds or not.
reverse_iterator rend() const
Definition: ArrayRef.h:126
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
As we analyze each global, keep track of some information about it.
Definition: GlobalStatus.h:28
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:35
LLVM Basic Block Representation.
Definition: BasicBlock.h:65
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
bool isIndirect
isIndirect - True if this operand is an indirect operand.
Definition: InlineAsm.h:144
Return value is always equal to this argument.
Definition: Attributes.h:103
op_iterator op_end()
Definition: User.h:185
Type * getTypeAtIndex(const Value *V)
getTypeAtIndex - Given an index value into the type, return the type of the element.
Definition: Type.cpp:634
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:697
iterator begin() const
Definition: ArrayRef.h:122
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
Value * getOperand(unsigned i) const
Definition: User.h:118
Zero extended before/after call.
Definition: Attributes.h:119
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:706
static bool analyzeGlobal(const Value *V, GlobalStatus &GS)
Look at all uses of the global and fill in the GlobalStatus structure.
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:714
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:416
bool isPointerTy() const
isPointerTy - True if this is an instance of PointerType.
Definition: Type.h:217
static UndefValue * get(Type *T)
get() - Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1473
static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, SmallVectorImpl< unsigned > &RetIndices, SmallVectorImpl< unsigned > &CallIndices, bool AllowDifferingSizes, const TargetLoweringBase &TLI, const DataLayout &DL)
Return true if this scalar return value only has bits discarded on its path from the "tail call" to t...
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:712
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
signed greater than
Definition: InstrTypes.h:724
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:701
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:388
static bool isNoopBitcast(Type *T1, Type *T2, const TargetLoweringBase &TLI)
iterator end()
Definition: BasicBlock.h:233
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:711
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:222
virtual const TargetLowering * getTargetLowering() const
signed less than
Definition: InstrTypes.h:726
Value * stripPointerCasts()
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:458
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
AttributeSet getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:181
signed less or equal
Definition: InstrTypes.h:727
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:285
CompositeType - Common super class of ArrayType, StructType, PointerType and VectorType.
Definition: DerivedTypes.h:148
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:372
bool isAggregateType() const
isAggregateType - Return true if the type is an aggregate type.
Definition: Type.h:260
bool IsCompared
True if the global's address is used in a comparison.
Definition: GlobalStatus.h:30
unsigned greater or equal
Definition: InstrTypes.h:721
ImmutableCallSite - establish a view to a call site for examination.
Definition: CallSite.h:418
#define I(x, y, z)
Definition: MD5.cpp:54
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:124
bool hasLinkOnceODRLinkage() const
Definition: GlobalValue.h:267
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:329
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:705
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:709
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
getEVT - Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:277
AttributeSet getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
getPrimitiveSizeInBits - Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:121
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:700
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:365
LLVM Value Representation.
Definition: Value.h:69
bool hasUnnamedAddr() const
Definition: GlobalValue.h:130
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:710
Primary interface to the complete machine description for the target machine.
unsigned greater than
Definition: InstrTypes.h:720
static bool advanceToNextLeafType(SmallVectorImpl< CompositeType * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Move the given iterators to the next leaf type in depth first traversal.
bool isSafeToSpeculativelyExecute(const Value *V, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
isSafeToSpeculativelyExecute - Return true if the instruction does not have any effects besides calcu...
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:702
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
std::vector< ConstraintInfo > ConstraintInfoVector
Definition: InlineAsm.h:114
const BasicBlock * getParent() const
Definition: Instruction.h:72
#define T1
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:699
signed greater or equal
Definition: InstrTypes.h:725
This file describes how to lower LLVM code to machine code.
bool isVoidTy() const
isVoidTy - Return true if this is 'void'.
Definition: Type.h:137
InsertValueInst - This instruction inserts a struct field of array element value into an aggregate va...
static bool firstRealType(Type *Next, SmallVectorImpl< CompositeType * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Find the first non-empty, scalar-like type in Next and setup the iterator components.
void resize(size_type N)
Definition: SmallVector.h:376