LLVM 17.0.0git
Analysis.cpp
Go to the documentation of this file.
1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines several CodeGen-specific LLVM IR analysis utilities.
10//
11//===----------------------------------------------------------------------===//
12
19#include "llvm/IR/DataLayout.h"
21#include "llvm/IR/Function.h"
24#include "llvm/IR/Module.h"
27
28using namespace llvm;
29
30/// Compute the linearized index of a member in a nested aggregate/struct/array
31/// by recursing and accumulating CurIndex as long as there are indices in the
32/// index list.
34 const unsigned *Indices,
35 const unsigned *IndicesEnd,
36 unsigned CurIndex) {
37 // Base case: We're done.
38 if (Indices && Indices == IndicesEnd)
39 return CurIndex;
40
41 // Given a struct type, recursively traverse the elements.
42 if (StructType *STy = dyn_cast<StructType>(Ty)) {
43 for (auto I : llvm::enumerate(STy->elements())) {
44 Type *ET = I.value();
45 if (Indices && *Indices == I.index())
46 return ComputeLinearIndex(ET, Indices + 1, IndicesEnd, CurIndex);
47 CurIndex = ComputeLinearIndex(ET, nullptr, nullptr, CurIndex);
48 }
49 assert(!Indices && "Unexpected out of bound");
50 return CurIndex;
51 }
52 // Given an array type, recursively traverse the elements.
53 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
54 Type *EltTy = ATy->getElementType();
55 unsigned NumElts = ATy->getNumElements();
56 // Compute the Linear offset when jumping one element of the array
57 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
58 if (Indices) {
59 assert(*Indices < NumElts && "Unexpected out of bound");
60 // If the indice is inside the array, compute the index to the requested
61 // elt and recurse inside the element with the end of the indices list
62 CurIndex += EltLinearOffset* *Indices;
63 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
64 }
65 CurIndex += EltLinearOffset*NumElts;
66 return CurIndex;
67 }
68 // We haven't found the type we're looking for, so keep searching.
69 return CurIndex + 1;
70}
71
72/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
73/// EVTs that represent all the individual underlying
74/// non-aggregate types that comprise it.
75///
76/// If Offsets is non-null, it points to a vector to be filled in
77/// with the in-memory offsets of each of the individual values.
78///
80 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
83 TypeSize StartingOffset) {
84 // Given a struct type, recursively traverse the elements.
85 if (StructType *STy = dyn_cast<StructType>(Ty)) {
86 // If the Offsets aren't needed, don't query the struct layout. This allows
87 // us to support structs with scalable vectors for operations that don't
88 // need offsets.
89 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
90 for (StructType::element_iterator EB = STy->element_begin(),
91 EI = EB,
92 EE = STy->element_end();
93 EI != EE; ++EI) {
94 // Don't compute the element offset if we didn't get a StructLayout above.
95 TypeSize EltOffset = SL ? SL->getElementOffset(EI - EB)
96 : TypeSize::get(0, StartingOffset.isScalable());
97 ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
98 StartingOffset + EltOffset);
99 }
100 return;
101 }
102 // Given an array type, recursively traverse the elements.
103 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
104 Type *EltTy = ATy->getElementType();
105 TypeSize EltSize = DL.getTypeAllocSize(EltTy);
106 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
107 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
108 StartingOffset + i * EltSize);
109 return;
110 }
111 // Interpret void as zero return values.
112 if (Ty->isVoidTy())
113 return;
114 // Base case: we can get an EVT for this LLVM IR type.
115 ValueVTs.push_back(TLI.getValueType(DL, Ty));
116 if (MemVTs)
117 MemVTs->push_back(TLI.getMemValueType(DL, Ty));
118 if (Offsets)
119 Offsets->push_back(StartingOffset);
120}
121
123 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
125 TypeSize StartingOffset) {
126 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
127 StartingOffset);
128}
129
131 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
133 uint64_t StartingOffset) {
134 TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy());
135 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, Offsets, Offset);
136}
137
139 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
140 SmallVectorImpl<uint64_t> *FixedOffsets,
141 uint64_t StartingOffset) {
142 TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy());
144 if (FixedOffsets)
145 ComputeValueVTs(TLI, DL, Ty, ValueVTs, &Offsets, Offset);
146 else
147 ComputeValueVTs(TLI, DL, Ty, ValueVTs, nullptr, Offset);
148
149 if (FixedOffsets)
150 for (TypeSize Offset : Offsets)
151 FixedOffsets->push_back(Offset.getKnownMinValue());
152}
153
155 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
156 SmallVectorImpl<EVT> *MemVTs,
158 uint64_t StartingOffset) {
159 TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy());
160 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, Offsets, Offset);
161}
162
164 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
165 SmallVectorImpl<EVT> *MemVTs,
166 SmallVectorImpl<uint64_t> *FixedOffsets,
167 uint64_t StartingOffset) {
168 TypeSize Offset = TypeSize::get(StartingOffset, Ty->isScalableTy());
170 if (FixedOffsets)
171 ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, &Offsets, Offset);
172 else
173 ComputeValueVTs(TLI, DL, Ty, ValueVTs, MemVTs, nullptr, Offset);
174
175 if (FixedOffsets)
176 for (TypeSize Offset : Offsets)
177 FixedOffsets->push_back(Offset.getKnownMinValue());
178}
179
181 SmallVectorImpl<LLT> &ValueTys,
183 uint64_t StartingOffset) {
184 // Given a struct type, recursively traverse the elements.
185 if (StructType *STy = dyn_cast<StructType>(&Ty)) {
186 // If the Offsets aren't needed, don't query the struct layout. This allows
187 // us to support structs with scalable vectors for operations that don't
188 // need offsets.
189 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
190 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
191 uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
192 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
193 StartingOffset + EltOffset);
194 }
195 return;
196 }
197 // Given an array type, recursively traverse the elements.
198 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
199 Type *EltTy = ATy->getElementType();
200 uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
201 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
202 computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
203 StartingOffset + i * EltSize);
204 return;
205 }
206 // Interpret void as zero return values.
207 if (Ty.isVoidTy())
208 return;
209 // Base case: we can get an LLT for this LLVM IR type.
210 ValueTys.push_back(getLLTForType(Ty, DL));
211 if (Offsets != nullptr)
212 Offsets->push_back(StartingOffset * 8);
213}
214
215/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
217 V = V->stripPointerCasts();
218 GlobalValue *GV = dyn_cast<GlobalValue>(V);
219 GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
220
221 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
222 assert(Var->hasInitializer() &&
223 "The EH catch-all value must have an initializer");
224 Value *Init = Var->getInitializer();
225 GV = dyn_cast<GlobalValue>(Init);
226 if (!GV) V = cast<ConstantPointerNull>(Init);
227 }
228
229 assert((GV || isa<ConstantPointerNull>(V)) &&
230 "TypeInfo must be a global variable or NULL");
231 return GV;
232}
233
234/// getFCmpCondCode - Return the ISD condition code corresponding to
235/// the given LLVM IR floating-point condition code. This includes
236/// consideration of global floating-point math flags.
237///
239 switch (Pred) {
240 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
241 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
242 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
243 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
244 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
245 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
246 case FCmpInst::FCMP_ONE: return ISD::SETONE;
247 case FCmpInst::FCMP_ORD: return ISD::SETO;
248 case FCmpInst::FCMP_UNO: return ISD::SETUO;
249 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
250 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
251 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
252 case FCmpInst::FCMP_ULT: return ISD::SETULT;
253 case FCmpInst::FCMP_ULE: return ISD::SETULE;
254 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
255 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
256 default: llvm_unreachable("Invalid FCmp predicate opcode!");
257 }
258}
259
261 switch (CC) {
262 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
263 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
264 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
265 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
266 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
267 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
268 default: return CC;
269 }
270}
271
273 switch (Pred) {
274 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
275 case ICmpInst::ICMP_NE: return ISD::SETNE;
276 case ICmpInst::ICMP_SLE: return ISD::SETLE;
277 case ICmpInst::ICMP_ULE: return ISD::SETULE;
278 case ICmpInst::ICMP_SGE: return ISD::SETGE;
279 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
280 case ICmpInst::ICMP_SLT: return ISD::SETLT;
281 case ICmpInst::ICMP_ULT: return ISD::SETULT;
282 case ICmpInst::ICMP_SGT: return ISD::SETGT;
283 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
284 default:
285 llvm_unreachable("Invalid ICmp predicate opcode!");
286 }
287}
288
290 switch (Pred) {
291 case ISD::SETEQ:
292 return ICmpInst::ICMP_EQ;
293 case ISD::SETNE:
294 return ICmpInst::ICMP_NE;
295 case ISD::SETLE:
296 return ICmpInst::ICMP_SLE;
297 case ISD::SETULE:
298 return ICmpInst::ICMP_ULE;
299 case ISD::SETGE:
300 return ICmpInst::ICMP_SGE;
301 case ISD::SETUGE:
302 return ICmpInst::ICMP_UGE;
303 case ISD::SETLT:
304 return ICmpInst::ICMP_SLT;
305 case ISD::SETULT:
306 return ICmpInst::ICMP_ULT;
307 case ISD::SETGT:
308 return ICmpInst::ICMP_SGT;
309 case ISD::SETUGT:
310 return ICmpInst::ICMP_UGT;
311 default:
312 llvm_unreachable("Invalid ISD integer condition code!");
313 }
314}
315
316static bool isNoopBitcast(Type *T1, Type *T2,
317 const TargetLoweringBase& TLI) {
318 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
319 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
321}
322
323/// Look through operations that will be free to find the earliest source of
324/// this value.
325///
326/// @param ValLoc If V has aggregate type, we will be interested in a particular
327/// scalar component. This records its address; the reverse of this list gives a
328/// sequence of indices appropriate for an extractvalue to locate the important
329/// value. This value is updated during the function and on exit will indicate
330/// similar information for the Value returned.
331///
332/// @param DataBits If this function looks through truncate instructions, this
333/// will record the smallest size attained.
334static const Value *getNoopInput(const Value *V,
336 unsigned &DataBits,
337 const TargetLoweringBase &TLI,
338 const DataLayout &DL) {
339 while (true) {
340 // Try to look through V1; if V1 is not an instruction, it can't be looked
341 // through.
342 const Instruction *I = dyn_cast<Instruction>(V);
343 if (!I || I->getNumOperands() == 0) return V;
344 const Value *NoopInput = nullptr;
345
346 Value *Op = I->getOperand(0);
347 if (isa<BitCastInst>(I)) {
348 // Look through truly no-op bitcasts.
349 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
350 NoopInput = Op;
351 } else if (isa<GetElementPtrInst>(I)) {
352 // Look through getelementptr
353 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
354 NoopInput = Op;
355 } else if (isa<IntToPtrInst>(I)) {
356 // Look through inttoptr.
357 // Make sure this isn't a truncating or extending cast. We could
358 // support this eventually, but don't bother for now.
359 if (!isa<VectorType>(I->getType()) &&
360 DL.getPointerSizeInBits() ==
361 cast<IntegerType>(Op->getType())->getBitWidth())
362 NoopInput = Op;
363 } else if (isa<PtrToIntInst>(I)) {
364 // Look through ptrtoint.
365 // Make sure this isn't a truncating or extending cast. We could
366 // support this eventually, but don't bother for now.
367 if (!isa<VectorType>(I->getType()) &&
368 DL.getPointerSizeInBits() ==
369 cast<IntegerType>(I->getType())->getBitWidth())
370 NoopInput = Op;
371 } else if (isa<TruncInst>(I) &&
372 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
373 DataBits =
374 std::min((uint64_t)DataBits,
375 I->getType()->getPrimitiveSizeInBits().getFixedValue());
376 NoopInput = Op;
377 } else if (auto *CB = dyn_cast<CallBase>(I)) {
378 const Value *ReturnedOp = CB->getReturnedArgOperand();
379 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
380 NoopInput = ReturnedOp;
381 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
382 // Value may come from either the aggregate or the scalar
383 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
384 if (ValLoc.size() >= InsertLoc.size() &&
385 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
386 // The type being inserted is a nested sub-type of the aggregate; we
387 // have to remove those initial indices to get the location we're
388 // interested in for the operand.
389 ValLoc.resize(ValLoc.size() - InsertLoc.size());
390 NoopInput = IVI->getInsertedValueOperand();
391 } else {
392 // The struct we're inserting into has the value we're interested in, no
393 // change of address.
394 NoopInput = Op;
395 }
396 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
397 // The part we're interested in will inevitably be some sub-section of the
398 // previous aggregate. Combine the two paths to obtain the true address of
399 // our element.
400 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
401 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
402 NoopInput = Op;
403 }
404 // Terminate if we couldn't find anything to look through.
405 if (!NoopInput)
406 return V;
407
408 V = NoopInput;
409 }
410}
411
412/// Return true if this scalar return value only has bits discarded on its path
413/// from the "tail call" to the "ret". This includes the obvious noop
414/// instructions handled by getNoopInput above as well as free truncations (or
415/// extensions prior to the call).
416static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
417 SmallVectorImpl<unsigned> &RetIndices,
418 SmallVectorImpl<unsigned> &CallIndices,
419 bool AllowDifferingSizes,
420 const TargetLoweringBase &TLI,
421 const DataLayout &DL) {
422
423 // Trace the sub-value needed by the return value as far back up the graph as
424 // possible, in the hope that it will intersect with the value produced by the
425 // call. In the simple case with no "returned" attribute, the hope is actually
426 // that we end up back at the tail call instruction itself.
427 unsigned BitsRequired = UINT_MAX;
428 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
429
430 // If this slot in the value returned is undef, it doesn't matter what the
431 // call puts there, it'll be fine.
432 if (isa<UndefValue>(RetVal))
433 return true;
434
435 // Now do a similar search up through the graph to find where the value
436 // actually returned by the "tail call" comes from. In the simple case without
437 // a "returned" attribute, the search will be blocked immediately and the loop
438 // a Noop.
439 unsigned BitsProvided = UINT_MAX;
440 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
441
442 // There's no hope if we can't actually trace them to (the same part of!) the
443 // same value.
444 if (CallVal != RetVal || CallIndices != RetIndices)
445 return false;
446
447 // However, intervening truncates may have made the call non-tail. Make sure
448 // all the bits that are needed by the "ret" have been provided by the "tail
449 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
450 // extensions too.
451 if (BitsProvided < BitsRequired ||
452 (!AllowDifferingSizes && BitsProvided != BitsRequired))
453 return false;
454
455 return true;
456}
457
458/// For an aggregate type, determine whether a given index is within bounds or
459/// not.
460static bool indexReallyValid(Type *T, unsigned Idx) {
461 if (ArrayType *AT = dyn_cast<ArrayType>(T))
462 return Idx < AT->getNumElements();
463
464 return Idx < cast<StructType>(T)->getNumElements();
465}
466
467/// Move the given iterators to the next leaf type in depth first traversal.
468///
469/// Performs a depth-first traversal of the type as specified by its arguments,
470/// stopping at the next leaf node (which may be a legitimate scalar type or an
471/// empty struct or array).
472///
473/// @param SubTypes List of the partial components making up the type from
474/// outermost to innermost non-empty aggregate. The element currently
475/// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
476///
477/// @param Path Set of extractvalue indices leading from the outermost type
478/// (SubTypes[0]) to the leaf node currently represented.
479///
480/// @returns true if a new type was found, false otherwise. Calling this
481/// function again on a finished iterator will repeatedly return
482/// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
483/// aggregate or a non-aggregate
486 // First march back up the tree until we can successfully increment one of the
487 // coordinates in Path.
488 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
489 Path.pop_back();
490 SubTypes.pop_back();
491 }
492
493 // If we reached the top, then the iterator is done.
494 if (Path.empty())
495 return false;
496
497 // We know there's *some* valid leaf now, so march back down the tree picking
498 // out the left-most element at each node.
499 ++Path.back();
500 Type *DeeperType =
501 ExtractValueInst::getIndexedType(SubTypes.back(), Path.back());
502 while (DeeperType->isAggregateType()) {
503 if (!indexReallyValid(DeeperType, 0))
504 return true;
505
506 SubTypes.push_back(DeeperType);
507 Path.push_back(0);
508
509 DeeperType = ExtractValueInst::getIndexedType(DeeperType, 0);
510 }
511
512 return true;
513}
514
515/// Find the first non-empty, scalar-like type in Next and setup the iterator
516/// components.
517///
518/// Assuming Next is an aggregate of some kind, this function will traverse the
519/// tree from left to right (i.e. depth-first) looking for the first
520/// non-aggregate type which will play a role in function return.
521///
522/// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
523/// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
524/// i32 in that type.
525static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
527 // First initialise the iterator components to the first "leaf" node
528 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
529 // despite nominally being an aggregate).
530 while (Type *FirstInner = ExtractValueInst::getIndexedType(Next, 0)) {
531 SubTypes.push_back(Next);
532 Path.push_back(0);
533 Next = FirstInner;
534 }
535
536 // If there's no Path now, Next was originally scalar already (or empty
537 // leaf). We're done.
538 if (Path.empty())
539 return true;
540
541 // Otherwise, use normal iteration to keep looking through the tree until we
542 // find a non-aggregate type.
543 while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
544 ->isAggregateType()) {
545 if (!advanceToNextLeafType(SubTypes, Path))
546 return false;
547 }
548
549 return true;
550}
551
552/// Set the iterator data-structures to the next non-empty, non-aggregate
553/// subtype.
556 do {
557 if (!advanceToNextLeafType(SubTypes, Path))
558 return false;
559
560 assert(!Path.empty() && "found a leaf but didn't set the path?");
561 } while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
562 ->isAggregateType());
563
564 return true;
565}
566
567
568/// Test if the given instruction is in a position to be optimized
569/// with a tail-call. This roughly means that it's in a block with
570/// a return and there's nothing that needs to be scheduled
571/// between it and the return.
572///
573/// This function only tests target-independent requirements.
575 const BasicBlock *ExitBB = Call.getParent();
576 const Instruction *Term = ExitBB->getTerminator();
577 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
578
579 // The block must end in a return statement or unreachable.
580 //
581 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
582 // an unreachable, for now. The way tailcall optimization is currently
583 // implemented means it will add an epilogue followed by a jump. That is
584 // not profitable. Also, if the callee is a special function (e.g.
585 // longjmp on x86), it can end up causing miscompilation that has not
586 // been fully understood.
587 if (!Ret && ((!TM.Options.GuaranteedTailCallOpt &&
588 Call.getCallingConv() != CallingConv::Tail &&
589 Call.getCallingConv() != CallingConv::SwiftTail) ||
590 !isa<UnreachableInst>(Term)))
591 return false;
592
593 // If I will have a chain, make sure no other instruction that will have a
594 // chain interposes between I and the return.
595 // Check for all calls including speculatable functions.
596 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
597 if (&*BBI == &Call)
598 break;
599 // Debug info intrinsics do not get in the way of tail call optimization.
600 // Pseudo probe intrinsics do not block tail call optimization either.
601 if (BBI->isDebugOrPseudoInst())
602 continue;
603 // A lifetime end, assume or noalias.decl intrinsic should not stop tail
604 // call optimization.
605 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
606 if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
607 II->getIntrinsicID() == Intrinsic::assume ||
608 II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
609 continue;
610 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
612 return false;
613 }
614
615 const Function *F = ExitBB->getParent();
617 F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
618}
619
621 const ReturnInst *Ret,
622 const TargetLoweringBase &TLI,
623 bool *AllowDifferingSizes) {
624 // ADS may be null, so don't write to it directly.
625 bool DummyADS;
626 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
627 ADS = true;
628
629 AttrBuilder CallerAttrs(F->getContext(), F->getAttributes().getRetAttrs());
630 AttrBuilder CalleeAttrs(F->getContext(),
631 cast<CallInst>(I)->getAttributes().getRetAttrs());
632
633 // Following attributes are completely benign as far as calling convention
634 // goes, they shouldn't affect whether the call is a tail call.
635 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
636 Attribute::DereferenceableOrNull, Attribute::NoAlias,
637 Attribute::NonNull, Attribute::NoUndef}) {
638 CallerAttrs.removeAttribute(Attr);
639 CalleeAttrs.removeAttribute(Attr);
640 }
641
642 if (CallerAttrs.contains(Attribute::ZExt)) {
643 if (!CalleeAttrs.contains(Attribute::ZExt))
644 return false;
645
646 ADS = false;
647 CallerAttrs.removeAttribute(Attribute::ZExt);
648 CalleeAttrs.removeAttribute(Attribute::ZExt);
649 } else if (CallerAttrs.contains(Attribute::SExt)) {
650 if (!CalleeAttrs.contains(Attribute::SExt))
651 return false;
652
653 ADS = false;
654 CallerAttrs.removeAttribute(Attribute::SExt);
655 CalleeAttrs.removeAttribute(Attribute::SExt);
656 }
657
658 // Drop sext and zext return attributes if the result is not used.
659 // This enables tail calls for code like:
660 //
661 // define void @caller() {
662 // entry:
663 // %unused_result = tail call zeroext i1 @callee()
664 // br label %retlabel
665 // retlabel:
666 // ret void
667 // }
668 if (I->use_empty()) {
669 CalleeAttrs.removeAttribute(Attribute::SExt);
670 CalleeAttrs.removeAttribute(Attribute::ZExt);
671 }
672
673 // If they're still different, there's some facet we don't understand
674 // (currently only "inreg", but in future who knows). It may be OK but the
675 // only safe option is to reject the tail call.
676 return CallerAttrs == CalleeAttrs;
677}
678
679/// Check whether B is a bitcast of a pointer type to another pointer type,
680/// which is equal to A.
681static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
682 assert(A && B && "Expected non-null inputs!");
683
684 auto *BitCastIn = dyn_cast<BitCastInst>(B);
685
686 if (!BitCastIn)
687 return false;
688
689 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
690 return false;
691
692 return A == BitCastIn->getOperand(0);
693}
694
696 const Instruction *I,
697 const ReturnInst *Ret,
698 const TargetLoweringBase &TLI) {
699 // If the block ends with a void return or unreachable, it doesn't matter
700 // what the call's return type is.
701 if (!Ret || Ret->getNumOperands() == 0) return true;
702
703 // If the return value is undef, it doesn't matter what the call's
704 // return type is.
705 if (isa<UndefValue>(Ret->getOperand(0))) return true;
706
707 // Make sure the attributes attached to each return are compatible.
708 bool AllowDifferingSizes;
709 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
710 return false;
711
712 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
713 // Intrinsic like llvm.memcpy has no return value, but the expanded
714 // libcall may or may not have return value. On most platforms, it
715 // will be expanded as memcpy in libc, which returns the first
716 // argument. On other platforms like arm-none-eabi, memcpy may be
717 // expanded as library call without return value, like __aeabi_memcpy.
718 const CallInst *Call = cast<CallInst>(I);
719 if (Function *F = Call->getCalledFunction()) {
720 Intrinsic::ID IID = F->getIntrinsicID();
721 if (((IID == Intrinsic::memcpy &&
722 TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
723 (IID == Intrinsic::memmove &&
724 TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
725 (IID == Intrinsic::memset &&
726 TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
727 (RetVal == Call->getArgOperand(0) ||
728 isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0))))
729 return true;
730 }
731
732 SmallVector<unsigned, 4> RetPath, CallPath;
733 SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
734
735 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
736 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
737
738 // Nothing's actually returned, it doesn't matter what the callee put there
739 // it's a valid tail call.
740 if (RetEmpty)
741 return true;
742
743 // Iterate pairwise through each of the value types making up the tail call
744 // and the corresponding return. For each one we want to know whether it's
745 // essentially going directly from the tail call to the ret, via operations
746 // that end up not generating any code.
747 //
748 // We allow a certain amount of covariance here. For example it's permitted
749 // for the tail call to define more bits than the ret actually cares about
750 // (e.g. via a truncate).
751 do {
752 if (CallEmpty) {
753 // We've exhausted the values produced by the tail call instruction, the
754 // rest are essentially undef. The type doesn't really matter, but we need
755 // *something*.
756 Type *SlotType =
757 ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back());
758 CallVal = UndefValue::get(SlotType);
759 }
760
761 // The manipulations performed when we're looking through an insertvalue or
762 // an extractvalue would happen at the front of the RetPath list, so since
763 // we have to copy it anyway it's more efficient to create a reversed copy.
764 SmallVector<unsigned, 4> TmpRetPath(llvm::reverse(RetPath));
765 SmallVector<unsigned, 4> TmpCallPath(llvm::reverse(CallPath));
766
767 // Finally, we can check whether the value produced by the tail call at this
768 // index is compatible with the value we return.
769 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
770 AllowDifferingSizes, TLI,
771 F->getParent()->getDataLayout()))
772 return false;
773
774 CallEmpty = !nextRealType(CallSubTypes, CallPath);
775 } while(nextRealType(RetSubTypes, RetPath));
776
777 return true;
778}
779
781 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
782 const MachineBasicBlock *MBB) {
784 while (!Worklist.empty()) {
785 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
786 // Don't follow blocks which start new scopes.
787 if (Visiting->isEHPad() && Visiting != MBB)
788 continue;
789
790 // Add this MBB to our scope.
791 auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
792
793 // Don't revisit blocks.
794 if (!P.second) {
795 assert(P.first->second == EHScope && "MBB is part of two scopes!");
796 continue;
797 }
798
799 // Returns are boundaries where scope transfer can occur, don't follow
800 // successors.
801 if (Visiting->isEHScopeReturnBlock())
802 continue;
803
804 append_range(Worklist, Visiting->successors());
805 }
806}
807
811
812 // We don't have anything to do if there aren't any EH pads.
813 if (!MF.hasEHScopes())
814 return EHScopeMembership;
815
816 int EntryBBNumber = MF.front().getNumber();
817 bool IsSEH = isAsynchronousEHPersonality(
819
825 for (const MachineBasicBlock &MBB : MF) {
826 if (MBB.isEHScopeEntry()) {
827 EHScopeBlocks.push_back(&MBB);
828 } else if (IsSEH && MBB.isEHPad()) {
829 SEHCatchPads.push_back(&MBB);
830 } else if (MBB.pred_empty()) {
831 UnreachableBlocks.push_back(&MBB);
832 }
833
835
836 // CatchPads are not scopes for SEH so do not consider CatchRet to
837 // transfer control to another scope.
838 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
839 continue;
840
841 // FIXME: SEH CatchPads are not necessarily in the parent function:
842 // they could be inside a finally block.
843 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
844 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
845 CatchRetSuccessors.push_back(
846 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
847 }
848
849 // We don't have anything to do if there aren't any EH pads.
850 if (EHScopeBlocks.empty())
851 return EHScopeMembership;
852
853 // Identify all the basic blocks reachable from the function entry.
854 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
855 // All blocks not part of a scope are in the parent function.
856 for (const MachineBasicBlock *MBB : UnreachableBlocks)
857 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
858 // Next, identify all the blocks inside the scopes.
859 for (const MachineBasicBlock *MBB : EHScopeBlocks)
860 collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
861 // SEH CatchPads aren't really scopes, handle them separately.
862 for (const MachineBasicBlock *MBB : SEHCatchPads)
863 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
864 // Finally, identify all the targets of a catchret.
865 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
866 CatchRetSuccessors)
867 collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
868 CatchRetPair.first);
869 return EHScopeMembership;
870}
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static bool isNoopBitcast(Type *T1, Type *T2, const TargetLoweringBase &TLI)
Definition: Analysis.cpp:316
static bool firstRealType(Type *Next, SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Find the first non-empty, scalar-like type in Next and setup the iterator components.
Definition: Analysis.cpp:525
static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal, SmallVectorImpl< unsigned > &RetIndices, SmallVectorImpl< unsigned > &CallIndices, bool AllowDifferingSizes, const TargetLoweringBase &TLI, const DataLayout &DL)
Return true if this scalar return value only has bits discarded on its path from the "tail call" to t...
Definition: Analysis.cpp:416
static void collectEHScopeMembers(DenseMap< const MachineBasicBlock *, int > &EHScopeMembership, int EHScope, const MachineBasicBlock *MBB)
Definition: Analysis.cpp:780
static bool indexReallyValid(Type *T, unsigned Idx)
For an aggregate type, determine whether a given index is within bounds or not.
Definition: Analysis.cpp:460
static bool nextRealType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Set the iterator data-structures to the next non-empty, non-aggregate subtype.
Definition: Analysis.cpp:554
static bool isPointerBitcastEqualTo(const Value *A, const Value *B)
Check whether B is a bitcast of a pointer type to another pointer type, which is equal to A.
Definition: Analysis.cpp:681
static bool advanceToNextLeafType(SmallVectorImpl< Type * > &SubTypes, SmallVectorImpl< unsigned > &Path)
Move the given iterators to the next leaf type in depth first traversal.
Definition: Analysis.cpp:484
static const Value * getNoopInput(const Value *V, SmallVectorImpl< unsigned > &ValLoc, unsigned &DataBits, const TargetLoweringBase &TLI, const DataLayout &DL)
Look through operations that will be free to find the earliest source of this value.
Definition: Analysis.cpp:334
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define T1
Module.h This file contains the declarations for the Module class.
#define P(N)
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file describes how to lower LLVM code to machine code.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
reverse_iterator rend() const
Definition: ArrayRef.h:155
iterator end() const
Definition: ArrayRef.h:152
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:163
iterator begin() const
Definition: ArrayRef.h:151
reverse_iterator rbegin() const
Definition: ArrayRef.h:154
Class to represent array types.
Definition: DerivedTypes.h:368
bool contains(Attribute::AttrKind A) const
Return true if the builder has the specified attribute.
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
LLVM Basic Block Representation.
Definition: BasicBlock.h:56
iterator end()
Definition: BasicBlock.h:328
InstListType::const_iterator const_iterator
Definition: BasicBlock.h:88
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:112
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:127
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1190
This class represents a function call, abstracting a target machine's calling convention.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:711
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
This instruction extracts a struct member or array element value from an aggregate value.
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1961
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
bool hasInitializer() const
Definitions have initializers, declarations don't.
This instruction inserts a struct field of array element value into an aggregate value.
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
bool isEHPad() const
Returns true if the block is a landing pad.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isEHScopeEntry() const
Returns true if this is the entry block of an EH scope, i.e., the block that used to have a catchpad ...
iterator_range< succ_iterator > successors()
bool isEHScopeReturnBlock() const
Convenience function that returns true if the bock ends in a EH scope return instruction.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
Return a value (possibly void), from a function.
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void resize(size_type N)
Definition: SmallVector.h:642
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
Definition: DataLayout.h:623
TypeSize getElementOffset(unsigned Idx) const
Definition: DataLayout.h:652
Class to represent struct types.
Definition: DerivedTypes.h:213
Type::subtype_iterator element_iterator
Definition: DerivedTypes.h:326
TargetInstrInfo - Interface to description of machine instruction set.
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
EVT getMemValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:78
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize get(ScalarTy Quantity, bool Scalable)
Definition: TypeSize.h:328
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:256
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:295
bool isScalableTy() const
Return true if this is a scalable vector type or a target extension type with a scalable layout.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1724
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:166
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition: CallingConv.h:76
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
Definition: CallingConv.h:87
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1447
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< TypeSize > *Offsets, TypeSize StartingOffset)
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:122
@ Offset
Definition: DWP.cpp:440
ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred)
getICmpCondCode - Return the ISD condition code corresponding to the given LLVM IR integer condition ...
Definition: Analysis.cpp:272
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
Definition: STLExtras.h:2430
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
Definition: STLExtras.h:2129
bool returnTypeIsEligibleForTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI)
Test if given that the input instruction is in the tail call position if the return type or any attri...
Definition: Analysis.cpp:695
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:511
void computeValueLLTs(const DataLayout &DL, Type &Ty, SmallVectorImpl< LLT > &ValueTys, SmallVectorImpl< uint64_t > *Offsets=nullptr, uint64_t StartingOffset=0)
computeValueLLTs - Given an LLVM IR type, compute a sequence of LLTs that represent all the individua...
Definition: Analysis.cpp:180
ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred)
getFCmpCondCode - Return the ISD condition code corresponding to the given LLVM IR floating-point con...
Definition: Analysis.cpp:238
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
bool attributesPermitTailCall(const Function *F, const Instruction *I, const ReturnInst *Ret, const TargetLoweringBase &TLI, bool *AllowDifferingSizes=nullptr)
Test if given that the input instruction is in the tail call position, if there is an attribute misma...
Definition: Analysis.cpp:620
ISD::CondCode getFCmpCodeWithoutNaN(ISD::CondCode CC)
getFCmpCodeWithoutNaN - Given an ISD condition code comparing floats, return the equivalent code if w...
Definition: Analysis.cpp:260
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
GlobalValue * ExtractTypeInfo(Value *V)
ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
Definition: Analysis.cpp:216
bool isInTailCallPosition(const CallBase &Call, const TargetMachine &TM)
Test if the given instruction is in a position to be optimized with a tail-call.
Definition: Analysis.cpp:574
unsigned ComputeLinearIndex(Type *Ty, const unsigned *Indices, const unsigned *IndicesEnd, unsigned CurIndex=0)
Compute the linearized index of a member in a nested aggregate/struct/array.
Definition: Analysis.cpp:33
DenseMap< const MachineBasicBlock *, int > getEHScopeMembership(const MachineFunction &MF)
Definition: Analysis.cpp:809
LLT getLLTForType(Type &Ty, const DataLayout &DL)
Construct a low-level type based on an LLVM type.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
Definition: ValueTypes.cpp:616