LLVM  4.0.0
CodeGen/Analysis.cpp
Go to the documentation of this file.
1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines several CodeGen-specific LLVM IR analysis utilities.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/Analysis.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/Instructions.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
31 
32 using namespace llvm;
33 
34 /// Compute the linearized index of a member in a nested aggregate/struct/array
35 /// by recursing and accumulating CurIndex as long as there are indices in the
36 /// index list.
38  const unsigned *Indices,
39  const unsigned *IndicesEnd,
40  unsigned CurIndex) {
41  // Base case: We're done.
42  if (Indices && Indices == IndicesEnd)
43  return CurIndex;
44 
45  // Given a struct type, recursively traverse the elements.
46  if (StructType *STy = dyn_cast<StructType>(Ty)) {
47  for (StructType::element_iterator EB = STy->element_begin(),
48  EI = EB,
49  EE = STy->element_end();
50  EI != EE; ++EI) {
51  if (Indices && *Indices == unsigned(EI - EB))
52  return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
53  CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
54  }
55  assert(!Indices && "Unexpected out of bound");
56  return CurIndex;
57  }
58  // Given an array type, recursively traverse the elements.
59  else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
60  Type *EltTy = ATy->getElementType();
61  unsigned NumElts = ATy->getNumElements();
62  // Compute the Linear offset when jumping one element of the array
63  unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
64  if (Indices) {
65  assert(*Indices < NumElts && "Unexpected out of bound");
66  // If the indice is inside the array, compute the index to the requested
67  // elt and recurse inside the element with the end of the indices list
68  CurIndex += EltLinearOffset* *Indices;
69  return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
70  }
71  CurIndex += EltLinearOffset*NumElts;
72  return CurIndex;
73  }
74  // We haven't found the type we're looking for, so keep searching.
75  return CurIndex + 1;
76 }
77 
78 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
79 /// EVTs that represent all the individual underlying
80 /// non-aggregate types that comprise it.
81 ///
82 /// If Offsets is non-null, it points to a vector to be filled in
83 /// with the in-memory offsets of each of the individual values.
84 ///
85 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
86  Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
88  uint64_t StartingOffset) {
89  // Given a struct type, recursively traverse the elements.
90  if (StructType *STy = dyn_cast<StructType>(Ty)) {
91  const StructLayout *SL = DL.getStructLayout(STy);
92  for (StructType::element_iterator EB = STy->element_begin(),
93  EI = EB,
94  EE = STy->element_end();
95  EI != EE; ++EI)
96  ComputeValueVTs(TLI, DL, *EI, ValueVTs, Offsets,
97  StartingOffset + SL->getElementOffset(EI - EB));
98  return;
99  }
100  // Given an array type, recursively traverse the elements.
101  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
102  Type *EltTy = ATy->getElementType();
103  uint64_t EltSize = DL.getTypeAllocSize(EltTy);
104  for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
105  ComputeValueVTs(TLI, DL, EltTy, ValueVTs, Offsets,
106  StartingOffset + i * EltSize);
107  return;
108  }
109  // Interpret void as zero return values.
110  if (Ty->isVoidTy())
111  return;
112  // Base case: we can get an EVT for this LLVM IR type.
113  ValueVTs.push_back(TLI.getValueType(DL, Ty));
114  if (Offsets)
115  Offsets->push_back(StartingOffset);
116 }
117 
118 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
120  V = V->stripPointerCasts();
121  GlobalValue *GV = dyn_cast<GlobalValue>(V);
123 
124  if (Var && Var->getName() == "llvm.eh.catch.all.value") {
125  assert(Var->hasInitializer() &&
126  "The EH catch-all value must have an initializer");
127  Value *Init = Var->getInitializer();
128  GV = dyn_cast<GlobalValue>(Init);
129  if (!GV) V = cast<ConstantPointerNull>(Init);
130  }
131 
132  assert((GV || isa<ConstantPointerNull>(V)) &&
133  "TypeInfo must be a global variable or NULL");
134  return GV;
135 }
136 
137 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
138 /// processed uses a memory 'm' constraint.
139 bool
141  const TargetLowering &TLI) {
142  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
143  InlineAsm::ConstraintInfo &CI = CInfos[i];
144  for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
146  if (CType == TargetLowering::C_Memory)
147  return true;
148  }
149 
150  // Indirect operand accesses access memory.
151  if (CI.isIndirect)
152  return true;
153  }
154 
155  return false;
156 }
157 
158 /// getFCmpCondCode - Return the ISD condition code corresponding to
159 /// the given LLVM IR floating-point condition code. This includes
160 /// consideration of global floating-point math flags.
161 ///
163  switch (Pred) {
164  case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
165  case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
166  case FCmpInst::FCMP_OGT: return ISD::SETOGT;
167  case FCmpInst::FCMP_OGE: return ISD::SETOGE;
168  case FCmpInst::FCMP_OLT: return ISD::SETOLT;
169  case FCmpInst::FCMP_OLE: return ISD::SETOLE;
170  case FCmpInst::FCMP_ONE: return ISD::SETONE;
171  case FCmpInst::FCMP_ORD: return ISD::SETO;
172  case FCmpInst::FCMP_UNO: return ISD::SETUO;
173  case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
174  case FCmpInst::FCMP_UGT: return ISD::SETUGT;
175  case FCmpInst::FCMP_UGE: return ISD::SETUGE;
176  case FCmpInst::FCMP_ULT: return ISD::SETULT;
177  case FCmpInst::FCMP_ULE: return ISD::SETULE;
178  case FCmpInst::FCMP_UNE: return ISD::SETUNE;
179  case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
180  default: llvm_unreachable("Invalid FCmp predicate opcode!");
181  }
182 }
183 
185  switch (CC) {
186  case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
187  case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
188  case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
189  case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
190  case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
191  case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
192  default: return CC;
193  }
194 }
195 
196 /// getICmpCondCode - Return the ISD condition code corresponding to
197 /// the given LLVM IR integer condition code.
198 ///
200  switch (Pred) {
201  case ICmpInst::ICMP_EQ: return ISD::SETEQ;
202  case ICmpInst::ICMP_NE: return ISD::SETNE;
203  case ICmpInst::ICMP_SLE: return ISD::SETLE;
204  case ICmpInst::ICMP_ULE: return ISD::SETULE;
205  case ICmpInst::ICMP_SGE: return ISD::SETGE;
206  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
207  case ICmpInst::ICMP_SLT: return ISD::SETLT;
208  case ICmpInst::ICMP_ULT: return ISD::SETULT;
209  case ICmpInst::ICMP_SGT: return ISD::SETGT;
210  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
211  default:
212  llvm_unreachable("Invalid ICmp predicate opcode!");
213  }
214 }
215 
216 static bool isNoopBitcast(Type *T1, Type *T2,
217  const TargetLoweringBase& TLI) {
218  return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
219  (isa<VectorType>(T1) && isa<VectorType>(T2) &&
220  TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
221 }
222 
223 /// Look through operations that will be free to find the earliest source of
224 /// this value.
225 ///
226 /// @param ValLoc If V has aggegate type, we will be interested in a particular
227 /// scalar component. This records its address; the reverse of this list gives a
228 /// sequence of indices appropriate for an extractvalue to locate the important
229 /// value. This value is updated during the function and on exit will indicate
230 /// similar information for the Value returned.
231 ///
232 /// @param DataBits If this function looks through truncate instructions, this
233 /// will record the smallest size attained.
234 static const Value *getNoopInput(const Value *V,
236  unsigned &DataBits,
237  const TargetLoweringBase &TLI,
238  const DataLayout &DL) {
239  while (true) {
240  // Try to look through V1; if V1 is not an instruction, it can't be looked
241  // through.
242  const Instruction *I = dyn_cast<Instruction>(V);
243  if (!I || I->getNumOperands() == 0) return V;
244  const Value *NoopInput = nullptr;
245 
246  Value *Op = I->getOperand(0);
247  if (isa<BitCastInst>(I)) {
248  // Look through truly no-op bitcasts.
249  if (isNoopBitcast(Op->getType(), I->getType(), TLI))
250  NoopInput = Op;
251  } else if (isa<GetElementPtrInst>(I)) {
252  // Look through getelementptr
253  if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
254  NoopInput = Op;
255  } else if (isa<IntToPtrInst>(I)) {
256  // Look through inttoptr.
257  // Make sure this isn't a truncating or extending cast. We could
258  // support this eventually, but don't bother for now.
259  if (!isa<VectorType>(I->getType()) &&
260  DL.getPointerSizeInBits() ==
261  cast<IntegerType>(Op->getType())->getBitWidth())
262  NoopInput = Op;
263  } else if (isa<PtrToIntInst>(I)) {
264  // Look through ptrtoint.
265  // Make sure this isn't a truncating or extending cast. We could
266  // support this eventually, but don't bother for now.
267  if (!isa<VectorType>(I->getType()) &&
268  DL.getPointerSizeInBits() ==
269  cast<IntegerType>(I->getType())->getBitWidth())
270  NoopInput = Op;
271  } else if (isa<TruncInst>(I) &&
272  TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
273  DataBits = std::min(DataBits, I->getType()->getPrimitiveSizeInBits());
274  NoopInput = Op;
275  } else if (auto CS = ImmutableCallSite(I)) {
276  const Value *ReturnedOp = CS.getReturnedArgOperand();
277  if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
278  NoopInput = ReturnedOp;
279  } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
280  // Value may come from either the aggregate or the scalar
281  ArrayRef<unsigned> InsertLoc = IVI->getIndices();
282  if (ValLoc.size() >= InsertLoc.size() &&
283  std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
284  // The type being inserted is a nested sub-type of the aggregate; we
285  // have to remove those initial indices to get the location we're
286  // interested in for the operand.
287  ValLoc.resize(ValLoc.size() - InsertLoc.size());
288  NoopInput = IVI->getInsertedValueOperand();
289  } else {
290  // The struct we're inserting into has the value we're interested in, no
291  // change of address.
292  NoopInput = Op;
293  }
294  } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
295  // The part we're interested in will inevitably be some sub-section of the
296  // previous aggregate. Combine the two paths to obtain the true address of
297  // our element.
298  ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
299  ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
300  NoopInput = Op;
301  }
302  // Terminate if we couldn't find anything to look through.
303  if (!NoopInput)
304  return V;
305 
306  V = NoopInput;
307  }
308 }
309 
310 /// Return true if this scalar return value only has bits discarded on its path
311 /// from the "tail call" to the "ret". This includes the obvious noop
312 /// instructions handled by getNoopInput above as well as free truncations (or
313 /// extensions prior to the call).
314 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
315  SmallVectorImpl<unsigned> &RetIndices,
316  SmallVectorImpl<unsigned> &CallIndices,
317  bool AllowDifferingSizes,
318  const TargetLoweringBase &TLI,
319  const DataLayout &DL) {
320 
321  // Trace the sub-value needed by the return value as far back up the graph as
322  // possible, in the hope that it will intersect with the value produced by the
323  // call. In the simple case with no "returned" attribute, the hope is actually
324  // that we end up back at the tail call instruction itself.
325  unsigned BitsRequired = UINT_MAX;
326  RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
327 
328  // If this slot in the value returned is undef, it doesn't matter what the
329  // call puts there, it'll be fine.
330  if (isa<UndefValue>(RetVal))
331  return true;
332 
333  // Now do a similar search up through the graph to find where the value
334  // actually returned by the "tail call" comes from. In the simple case without
335  // a "returned" attribute, the search will be blocked immediately and the loop
336  // a Noop.
337  unsigned BitsProvided = UINT_MAX;
338  CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
339 
340  // There's no hope if we can't actually trace them to (the same part of!) the
341  // same value.
342  if (CallVal != RetVal || CallIndices != RetIndices)
343  return false;
344 
345  // However, intervening truncates may have made the call non-tail. Make sure
346  // all the bits that are needed by the "ret" have been provided by the "tail
347  // call". FIXME: with sufficiently cunning bit-tracking, we could look through
348  // extensions too.
349  if (BitsProvided < BitsRequired ||
350  (!AllowDifferingSizes && BitsProvided != BitsRequired))
351  return false;
352 
353  return true;
354 }
355 
356 /// For an aggregate type, determine whether a given index is within bounds or
357 /// not.
358 static bool indexReallyValid(CompositeType *T, unsigned Idx) {
359  if (ArrayType *AT = dyn_cast<ArrayType>(T))
360  return Idx < AT->getNumElements();
361 
362  return Idx < cast<StructType>(T)->getNumElements();
363 }
364 
365 /// Move the given iterators to the next leaf type in depth first traversal.
366 ///
367 /// Performs a depth-first traversal of the type as specified by its arguments,
368 /// stopping at the next leaf node (which may be a legitimate scalar type or an
369 /// empty struct or array).
370 ///
371 /// @param SubTypes List of the partial components making up the type from
372 /// outermost to innermost non-empty aggregate. The element currently
373 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
374 ///
375 /// @param Path Set of extractvalue indices leading from the outermost type
376 /// (SubTypes[0]) to the leaf node currently represented.
377 ///
378 /// @returns true if a new type was found, false otherwise. Calling this
379 /// function again on a finished iterator will repeatedly return
380 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
381 /// aggregate or a non-aggregate
384  // First march back up the tree until we can successfully increment one of the
385  // coordinates in Path.
386  while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
387  Path.pop_back();
388  SubTypes.pop_back();
389  }
390 
391  // If we reached the top, then the iterator is done.
392  if (Path.empty())
393  return false;
394 
395  // We know there's *some* valid leaf now, so march back down the tree picking
396  // out the left-most element at each node.
397  ++Path.back();
398  Type *DeeperType = SubTypes.back()->getTypeAtIndex(Path.back());
399  while (DeeperType->isAggregateType()) {
400  CompositeType *CT = cast<CompositeType>(DeeperType);
401  if (!indexReallyValid(CT, 0))
402  return true;
403 
404  SubTypes.push_back(CT);
405  Path.push_back(0);
406 
407  DeeperType = CT->getTypeAtIndex(0U);
408  }
409 
410  return true;
411 }
412 
413 /// Find the first non-empty, scalar-like type in Next and setup the iterator
414 /// components.
415 ///
416 /// Assuming Next is an aggregate of some kind, this function will traverse the
417 /// tree from left to right (i.e. depth-first) looking for the first
418 /// non-aggregate type which will play a role in function return.
419 ///
420 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
421 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
422 /// i32 in that type.
423 static bool firstRealType(Type *Next,
426  // First initialise the iterator components to the first "leaf" node
427  // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
428  // despite nominally being an aggregate).
429  while (Next->isAggregateType() &&
430  indexReallyValid(cast<CompositeType>(Next), 0)) {
431  SubTypes.push_back(cast<CompositeType>(Next));
432  Path.push_back(0);
433  Next = cast<CompositeType>(Next)->getTypeAtIndex(0U);
434  }
435 
436  // If there's no Path now, Next was originally scalar already (or empty
437  // leaf). We're done.
438  if (Path.empty())
439  return true;
440 
441  // Otherwise, use normal iteration to keep looking through the tree until we
442  // find a non-aggregate type.
443  while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType()) {
444  if (!advanceToNextLeafType(SubTypes, Path))
445  return false;
446  }
447 
448  return true;
449 }
450 
451 /// Set the iterator data-structures to the next non-empty, non-aggregate
452 /// subtype.
455  do {
456  if (!advanceToNextLeafType(SubTypes, Path))
457  return false;
458 
459  assert(!Path.empty() && "found a leaf but didn't set the path?");
460  } while (SubTypes.back()->getTypeAtIndex(Path.back())->isAggregateType());
461 
462  return true;
463 }
464 
465 
466 /// Test if the given instruction is in a position to be optimized
467 /// with a tail-call. This roughly means that it's in a block with
468 /// a return and there's nothing that needs to be scheduled
469 /// between it and the return.
470 ///
471 /// This function only tests target-independent requirements.
473  const Instruction *I = CS.getInstruction();
474  const BasicBlock *ExitBB = I->getParent();
475  const TerminatorInst *Term = ExitBB->getTerminator();
476  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
477 
478  // The block must end in a return statement or unreachable.
479  //
480  // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
481  // an unreachable, for now. The way tailcall optimization is currently
482  // implemented means it will add an epilogue followed by a jump. That is
483  // not profitable. Also, if the callee is a special function (e.g.
484  // longjmp on x86), it can end up causing miscompilation that has not
485  // been fully understood.
486  if (!Ret &&
487  (!TM.Options.GuaranteedTailCallOpt || !isa<UnreachableInst>(Term)))
488  return false;
489 
490  // If I will have a chain, make sure no other instruction that will have a
491  // chain interposes between I and the return.
492  if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
494  for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
495  if (&*BBI == I)
496  break;
497  // Debug info intrinsics do not get in the way of tail call optimization.
498  if (isa<DbgInfoIntrinsic>(BBI))
499  continue;
500  if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
502  return false;
503  }
504 
505  const Function *F = ExitBB->getParent();
507  F, I, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
508 }
509 
511  const ReturnInst *Ret,
512  const TargetLoweringBase &TLI,
513  bool *AllowDifferingSizes) {
514  // ADS may be null, so don't write to it directly.
515  bool DummyADS;
516  bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
517  ADS = true;
518 
519  AttrBuilder CallerAttrs(F->getAttributes(),
521  AttrBuilder CalleeAttrs(cast<CallInst>(I)->getAttributes(),
523 
524  // Noalias is completely benign as far as calling convention goes, it
525  // shouldn't affect whether the call is a tail call.
526  CallerAttrs.removeAttribute(Attribute::NoAlias);
527  CalleeAttrs.removeAttribute(Attribute::NoAlias);
528 
529  if (CallerAttrs.contains(Attribute::ZExt)) {
530  if (!CalleeAttrs.contains(Attribute::ZExt))
531  return false;
532 
533  ADS = false;
534  CallerAttrs.removeAttribute(Attribute::ZExt);
535  CalleeAttrs.removeAttribute(Attribute::ZExt);
536  } else if (CallerAttrs.contains(Attribute::SExt)) {
537  if (!CalleeAttrs.contains(Attribute::SExt))
538  return false;
539 
540  ADS = false;
541  CallerAttrs.removeAttribute(Attribute::SExt);
542  CalleeAttrs.removeAttribute(Attribute::SExt);
543  }
544 
545  // If they're still different, there's some facet we don't understand
546  // (currently only "inreg", but in future who knows). It may be OK but the
547  // only safe option is to reject the tail call.
548  return CallerAttrs == CalleeAttrs;
549 }
550 
552  const Instruction *I,
553  const ReturnInst *Ret,
554  const TargetLoweringBase &TLI) {
555  // If the block ends with a void return or unreachable, it doesn't matter
556  // what the call's return type is.
557  if (!Ret || Ret->getNumOperands() == 0) return true;
558 
559  // If the return value is undef, it doesn't matter what the call's
560  // return type is.
561  if (isa<UndefValue>(Ret->getOperand(0))) return true;
562 
563  // Make sure the attributes attached to each return are compatible.
564  bool AllowDifferingSizes;
565  if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
566  return false;
567 
568  const Value *RetVal = Ret->getOperand(0), *CallVal = I;
569  SmallVector<unsigned, 4> RetPath, CallPath;
570  SmallVector<CompositeType *, 4> RetSubTypes, CallSubTypes;
571 
572  bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
573  bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
574 
575  // Nothing's actually returned, it doesn't matter what the callee put there
576  // it's a valid tail call.
577  if (RetEmpty)
578  return true;
579 
580  // Iterate pairwise through each of the value types making up the tail call
581  // and the corresponding return. For each one we want to know whether it's
582  // essentially going directly from the tail call to the ret, via operations
583  // that end up not generating any code.
584  //
585  // We allow a certain amount of covariance here. For example it's permitted
586  // for the tail call to define more bits than the ret actually cares about
587  // (e.g. via a truncate).
588  do {
589  if (CallEmpty) {
590  // We've exhausted the values produced by the tail call instruction, the
591  // rest are essentially undef. The type doesn't really matter, but we need
592  // *something*.
593  Type *SlotType = RetSubTypes.back()->getTypeAtIndex(RetPath.back());
594  CallVal = UndefValue::get(SlotType);
595  }
596 
597  // The manipulations performed when we're looking through an insertvalue or
598  // an extractvalue would happen at the front of the RetPath list, so since
599  // we have to copy it anyway it's more efficient to create a reversed copy.
600  SmallVector<unsigned, 4> TmpRetPath(RetPath.rbegin(), RetPath.rend());
601  SmallVector<unsigned, 4> TmpCallPath(CallPath.rbegin(), CallPath.rend());
602 
603  // Finally, we can check whether the value produced by the tail call at this
604  // index is compatible with the value we return.
605  if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
606  AllowDifferingSizes, TLI,
607  F->getParent()->getDataLayout()))
608  return false;
609 
610  CallEmpty = !nextRealType(CallSubTypes, CallPath);
611  } while(nextRealType(RetSubTypes, RetPath));
612 
613  return true;
614 }
615 
617  if (!GV->hasLinkOnceODRLinkage())
618  return false;
619 
620  // We assume that anyone who sets global unnamed_addr on a non-constant knows
621  // what they're doing.
622  if (GV->hasGlobalUnnamedAddr())
623  return true;
624 
625  // If it is a non constant variable, it needs to be uniqued across shared
626  // objects.
627  if (const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV)) {
628  if (!Var->isConstant())
629  return false;
630  }
631 
632  return GV->hasAtLeastLocalUnnamedAddr();
633 }
634 
636  DenseMap<const MachineBasicBlock *, int> &FuncletMembership, int Funclet,
637  const MachineBasicBlock *MBB) {
639  while (!Worklist.empty()) {
640  const MachineBasicBlock *Visiting = Worklist.pop_back_val();
641  // Don't follow blocks which start new funclets.
642  if (Visiting->isEHPad() && Visiting != MBB)
643  continue;
644 
645  // Add this MBB to our funclet.
646  auto P = FuncletMembership.insert(std::make_pair(Visiting, Funclet));
647 
648  // Don't revisit blocks.
649  if (!P.second) {
650  assert(P.first->second == Funclet && "MBB is part of two funclets!");
651  continue;
652  }
653 
654  // Returns are boundaries where funclet transfer can occur, don't follow
655  // successors.
656  if (Visiting->isReturnBlock())
657  continue;
658 
659  for (const MachineBasicBlock *Succ : Visiting->successors())
660  Worklist.push_back(Succ);
661  }
662 }
663 
667 
668  // We don't have anything to do if there aren't any EH pads.
669  if (!MF.hasEHFunclets())
670  return FuncletMembership;
671 
672  int EntryBBNumber = MF.front().getNumber();
673  bool IsSEH = isAsynchronousEHPersonality(
675 
681  for (const MachineBasicBlock &MBB : MF) {
682  if (MBB.isEHFuncletEntry()) {
683  FuncletBlocks.push_back(&MBB);
684  } else if (IsSEH && MBB.isEHPad()) {
685  SEHCatchPads.push_back(&MBB);
686  } else if (MBB.pred_empty()) {
687  UnreachableBlocks.push_back(&MBB);
688  }
689 
691 
692  // CatchPads are not funclets for SEH so do not consider CatchRet to
693  // transfer control to another funclet.
694  if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
695  continue;
696 
697  // FIXME: SEH CatchPads are not necessarily in the parent function:
698  // they could be inside a finally block.
699  const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
700  const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
701  CatchRetSuccessors.push_back(
702  {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
703  }
704 
705  // We don't have anything to do if there aren't any EH pads.
706  if (FuncletBlocks.empty())
707  return FuncletMembership;
708 
709  // Identify all the basic blocks reachable from the function entry.
710  collectFuncletMembers(FuncletMembership, EntryBBNumber, &MF.front());
711  // All blocks not part of a funclet are in the parent function.
712  for (const MachineBasicBlock *MBB : UnreachableBlocks)
713  collectFuncletMembers(FuncletMembership, EntryBBNumber, MBB);
714  // Next, identify all the blocks inside the funclets.
715  for (const MachineBasicBlock *MBB : FuncletBlocks)
716  collectFuncletMembers(FuncletMembership, MBB->getNumber(), MBB);
717  // SEH CatchPads aren't really funclets, handle them separately.
718  for (const MachineBasicBlock *MBB : SEHCatchPads)
719  collectFuncletMembers(FuncletMembership, EntryBBNumber, MBB);
720  // Finally, identify all the targets of a catchret.
721  for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
722  CatchRetSuccessors)
723  collectFuncletMembers(FuncletMembership, CatchRetPair.second,
724  CatchRetPair.first);
725  return FuncletMembership;
726 }
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type (if unknown returns 0).
Return a value (possibly void), from a function.
bool isEHPad() const
Returns true if the block is a landing pad.
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:102
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
static void collectFuncletMembers(DenseMap< const MachineBasicBlock *, int > &FuncletMembership, int Funclet, const MachineBasicBlock *MBB)
This instruction extracts a struct member or array element value from an aggregate value...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
size_t i
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
bool canBeOmittedFromSymbolTable(const GlobalValue *GV)
unsigned getNumOperands() const
Definition: User.h:167
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
iterator end() const
Definition: ArrayRef.h:130
unsigned less or equal
Definition: InstrTypes.h:906
static const Value * getNoopInput(const Value *V, SmallVectorImpl< unsigned > &ValLoc, unsigned &DataBits, const TargetLoweringBase &TLI, const DataLayout &DL)
Look through operations that will be free to find the earliest source of this value.
unsigned less than
Definition: InstrTypes.h:905
bool mayHaveSideEffects() const
Return true if the instruction may have side effects.
Definition: Instruction.h:450
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:886
Offsets
Offsets in bytes from the start of the input buffer.
Definition: SIInstrInfo.h:777
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:896
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:100
The two locations do not alias at all.
Definition: AliasAnalysis.h:79
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
unsigned getCatchReturnOpcode() const
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI)
Test if given that the input instruction is in the tail call position if the return type or any attri...
ConstraintCodeVector Codes
Code - The constraint code, either the register name (in braces) or the constraint letter/number...
Definition: InlineAsm.h:148
iterator_range< succ_iterator > successors()
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:172
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:891
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:496
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:890
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Type::subtype_iterator element_iterator
Definition: DerivedTypes.h:278
const HexagonInstrInfo * TII
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
Definition: DataLayout.cpp:566
Class to represent struct types.
Definition: DerivedTypes.h:199
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: APFloat.h:32
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1218
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:887
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
LLVM_NODISCARD bool empty() const
Definition: SmallVector.h:60
const MachineBasicBlock & front() const
#define F(x, y, z)
Definition: MD5.cpp:51
bool mayReadFromMemory() const
Return true if this instruction may read memory.
reverse_iterator rbegin() const
Definition: ArrayRef.h:132
MachineBasicBlock * MBB
#define T
Class to represent array types.
Definition: DerivedTypes.h:345
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
Definition: ISDOpcodes.h:842
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
static bool nextRealType(SmallVectorImpl< CompositeType * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Set the iterator data-structures to the next non-empty, non-aggregate subtype.
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:141
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
bool hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos, const TargetLowering &TLI)
hasInlineAsmMemConstraint - Return true if the inline asm instruction being processed uses a memory '...
TargetInstrInfo - Interface to description of machine instruction set.
uint64_t getElementOffset(unsigned Idx) const
Definition: DataLayout.h:517
static bool indexReallyValid(CompositeType *T, unsigned Idx)
For an aggregate type, determine whether a given index is within bounds or not.
reverse_iterator rend() const
Definition: ArrayRef.h:133
#define P(N)
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
Subclasses of this class are all able to terminate a basic block.
Definition: InstrTypes.h:52
LLVM Basic Block Representation.
Definition: BasicBlock.h:51
bool hasAtLeastLocalUnnamedAddr() const
Returns true if this value's address is not significant in this module.
Definition: GlobalValue.h:196
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:45
bool isIndirect
isIndirect - True if this operand is an indirect operand.
Definition: InlineAsm.h:144
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool isEHFuncletEntry() const
Returns true if this is the entry block of an EH funclet.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:880
iterator begin() const
Definition: ArrayRef.h:129
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
Value * getOperand(unsigned i) const
Definition: User.h:145
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:889
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:897
void append(in_iter in_start, in_iter in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:392
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:213
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1337
static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, SmallVectorImpl< unsigned > &RetIndices, SmallVectorImpl< unsigned > &CallIndices, bool AllowDifferingSizes, const TargetLoweringBase &TLI, const DataLayout &DL)
Return true if this scalar return value only has bits discarded on its path from the "tail call" to t...
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:895
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isInTailCallPosition(ImmutableCallSite CS, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction. ...
signed greater than
Definition: InstrTypes.h:907
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:884
Iterator for intrusive lists based on ilist_node.
bool hasGlobalUnnamedAddr() const
Definition: GlobalValue.h:187
InstrTy * getInstruction() const
Definition: CallSite.h:93
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Definition: DataLayout.h:408
static bool isNoopBitcast(Type *T1, Type *T2, const TargetLoweringBase &TLI)
iterator end()
Definition: BasicBlock.h:230
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:894
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:230
virtual const TargetLowering * getTargetLowering() const
DenseMap< const MachineBasicBlock *, int > getFuncletMembership(const MachineFunction &MF)
signed less than
Definition: InstrTypes.h:909
LLVM_NODISCARD T pop_back_val()
Definition: SmallVector.h:382
Value * stripPointerCasts()
Strip off pointer casts, all-zero GEPs, and aliases.
Definition: Value.cpp:490
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
AttributeSet getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:176
signed less or equal
Definition: InstrTypes.h:910
bool hasEHFunclets() const
Common super class of ArrayType, StructType and VectorType.
Definition: DerivedTypes.h:160
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.cpp:384
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:247
unsigned greater or equal
Definition: InstrTypes.h:904
ImmutableCallSite - establish a view to a call site for examination.
Definition: CallSite.h:665
#define I(x, y, z)
Definition: MD5.cpp:54
TerminatorInst * getTerminator()
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:124
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
Definition: SmallVector.h:135
bool hasLinkOnceODRLinkage() const
Definition: GlobalValue.h:406
unsigned getPointerSizeInBits(unsigned AS=0) const
Layout pointer size, in bits FIXME: The defaults need to be removed once all of the backends/clients ...
Definition: DataLayout.h:349
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:888
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:287
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:892
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
getEVT - Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:309
AttributeSet getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isSafeToSpeculativelyExecute(const Value *V, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition: Type.cpp:108
virtual const TargetInstrInfo * getInstrInfo() const
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:883
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:537
LLVM Value Representation.
Definition: Value.h:71
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:893
Primary interface to the complete machine description for the target machine.
unsigned greater than
Definition: InstrTypes.h:903
static bool advanceToNextLeafType(SmallVectorImpl< CompositeType * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Move the given iterators to the next leaf type in depth first traversal.
bool attributesPermitTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI, bool *AllowDifferingSizes=nullptr)
Test if given that the input instruction is in the tail call position, if there is an attribute misma...
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:885
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
std::vector< ConstraintInfo > ConstraintInfoVector
Definition: InlineAsm.h:114
const BasicBlock * getParent() const
Definition: Instruction.h:62
#define T1
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:882
signed greater or equal
Definition: InstrTypes.h:908
Type * getTypeAtIndex(const Value *V) const
Given an index value into the type, return the type of the element.
Definition: Type.cpp:554
This file describes how to lower LLVM code to machine code.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
This instruction inserts a struct field of array element value into an aggregate value.
static bool firstRealType(Type *Next, SmallVectorImpl< CompositeType * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Find the first non-empty, scalar-like type in Next and setup the iterator components.
void resize(size_type N)
Definition: SmallVector.h:352