Bug Summary

File:llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
Warning:line 1246, column 18
Access to field 'TheKind' results in a dereference of a null pointer (loaded from variable 'Res')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name WholeProgramDevirt.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-12/lib/clang/12.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/include -I /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-12/lib/clang/12.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/build-llvm/lib/Transforms/IPO -fdebug-prefix-map=/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-08-06-171148-17323-1 -x c++ /build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp

/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp

1//===- WholeProgramDevirt.cpp - Whole program virtual call optimization ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements whole program optimization of virtual calls in cases
10// where we know (via !type metadata) that the list of callees is fixed. This
11// includes the following:
12// - Single implementation devirtualization: if a virtual call has a single
13// possible callee, replace all calls with a direct call to that callee.
14// - Virtual constant propagation: if the virtual function's return type is an
15// integer <=64 bits and all possible callees are readnone, for each class and
16// each list of constant arguments: evaluate the function, store the return
17// value alongside the virtual table, and rewrite each virtual call as a load
18// from the virtual table.
19// - Uniform return value optimization: if the conditions for virtual constant
20// propagation hold and each function returns the same constant value, replace
21// each virtual call with that constant.
22// - Unique return value optimization for i1 return values: if the conditions
23// for virtual constant propagation hold and a single vtable's function
24// returns 0, or a single vtable's function returns 1, replace each virtual
25// call with a comparison of the vptr against that vtable's address.
26//
27// This pass is intended to be used during the regular and thin LTO pipelines:
28//
29// During regular LTO, the pass determines the best optimization for each
30// virtual call and applies the resolutions directly to virtual calls that are
31// eligible for virtual call optimization (i.e. calls that use either of the
32// llvm.assume(llvm.type.test) or llvm.type.checked.load intrinsics).
33//
34// During hybrid Regular/ThinLTO, the pass operates in two phases:
35// - Export phase: this is run during the thin link over a single merged module
36// that contains all vtables with !type metadata that participate in the link.
37// The pass computes a resolution for each virtual call and stores it in the
38// type identifier summary.
39// - Import phase: this is run during the thin backends over the individual
40// modules. The pass applies the resolutions previously computed during the
41// import phase to each eligible virtual call.
42//
43// During ThinLTO, the pass operates in two phases:
44// - Export phase: this is run during the thin link over the index which
45// contains a summary of all vtables with !type metadata that participate in
46// the link. It computes a resolution for each virtual call and stores it in
47// the type identifier summary. Only single implementation devirtualization
48// is supported.
49// - Import phase: (same as with hybrid case above).
50//
51//===----------------------------------------------------------------------===//
52
53#include "llvm/Transforms/IPO/WholeProgramDevirt.h"
54#include "llvm/ADT/ArrayRef.h"
55#include "llvm/ADT/DenseMap.h"
56#include "llvm/ADT/DenseMapInfo.h"
57#include "llvm/ADT/DenseSet.h"
58#include "llvm/ADT/MapVector.h"
59#include "llvm/ADT/SmallVector.h"
60#include "llvm/ADT/Triple.h"
61#include "llvm/ADT/iterator_range.h"
62#include "llvm/Analysis/AliasAnalysis.h"
63#include "llvm/Analysis/BasicAliasAnalysis.h"
64#include "llvm/Analysis/OptimizationRemarkEmitter.h"
65#include "llvm/Analysis/TypeMetadataUtils.h"
66#include "llvm/Bitcode/BitcodeReader.h"
67#include "llvm/Bitcode/BitcodeWriter.h"
68#include "llvm/IR/Constants.h"
69#include "llvm/IR/DataLayout.h"
70#include "llvm/IR/DebugLoc.h"
71#include "llvm/IR/DerivedTypes.h"
72#include "llvm/IR/Dominators.h"
73#include "llvm/IR/Function.h"
74#include "llvm/IR/GlobalAlias.h"
75#include "llvm/IR/GlobalVariable.h"
76#include "llvm/IR/IRBuilder.h"
77#include "llvm/IR/InstrTypes.h"
78#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Instructions.h"
80#include "llvm/IR/Intrinsics.h"
81#include "llvm/IR/LLVMContext.h"
82#include "llvm/IR/Metadata.h"
83#include "llvm/IR/Module.h"
84#include "llvm/IR/ModuleSummaryIndexYAML.h"
85#include "llvm/InitializePasses.h"
86#include "llvm/Pass.h"
87#include "llvm/PassRegistry.h"
88#include "llvm/Support/Casting.h"
89#include "llvm/Support/CommandLine.h"
90#include "llvm/Support/Errc.h"
91#include "llvm/Support/Error.h"
92#include "llvm/Support/FileSystem.h"
93#include "llvm/Support/GlobPattern.h"
94#include "llvm/Support/MathExtras.h"
95#include "llvm/Transforms/IPO.h"
96#include "llvm/Transforms/IPO/FunctionAttrs.h"
97#include "llvm/Transforms/Utils/Evaluator.h"
98#include <algorithm>
99#include <cstddef>
100#include <map>
101#include <set>
102#include <string>
103
104using namespace llvm;
105using namespace wholeprogramdevirt;
106
107#define DEBUG_TYPE"wholeprogramdevirt" "wholeprogramdevirt"
108
109static cl::opt<PassSummaryAction> ClSummaryAction(
110 "wholeprogramdevirt-summary-action",
111 cl::desc("What to do with the summary when running this pass"),
112 cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing")llvm::cl::OptionEnumValue { "none", int(PassSummaryAction::None
), "Do nothing" }
,
113 clEnumValN(PassSummaryAction::Import, "import",llvm::cl::OptionEnumValue { "import", int(PassSummaryAction::
Import), "Import typeid resolutions from summary and globals"
}
114 "Import typeid resolutions from summary and globals")llvm::cl::OptionEnumValue { "import", int(PassSummaryAction::
Import), "Import typeid resolutions from summary and globals"
}
,
115 clEnumValN(PassSummaryAction::Export, "export",llvm::cl::OptionEnumValue { "export", int(PassSummaryAction::
Export), "Export typeid resolutions to summary and globals" }
116 "Export typeid resolutions to summary and globals")llvm::cl::OptionEnumValue { "export", int(PassSummaryAction::
Export), "Export typeid resolutions to summary and globals" }
),
117 cl::Hidden);
118
119static cl::opt<std::string> ClReadSummary(
120 "wholeprogramdevirt-read-summary",
121 cl::desc(
122 "Read summary from given bitcode or YAML file before running pass"),
123 cl::Hidden);
124
125static cl::opt<std::string> ClWriteSummary(
126 "wholeprogramdevirt-write-summary",
127 cl::desc("Write summary to given bitcode or YAML file after running pass. "
128 "Output file format is deduced from extension: *.bc means writing "
129 "bitcode, otherwise YAML"),
130 cl::Hidden);
131
132static cl::opt<unsigned>
133 ClThreshold("wholeprogramdevirt-branch-funnel-threshold", cl::Hidden,
134 cl::init(10), cl::ZeroOrMore,
135 cl::desc("Maximum number of call targets per "
136 "call site to enable branch funnels"));
137
138static cl::opt<bool>
139 PrintSummaryDevirt("wholeprogramdevirt-print-index-based", cl::Hidden,
140 cl::init(false), cl::ZeroOrMore,
141 cl::desc("Print index-based devirtualization messages"));
142
143/// Provide a way to force enable whole program visibility in tests.
144/// This is needed to support legacy tests that don't contain
145/// !vcall_visibility metadata (the mere presense of type tests
146/// previously implied hidden visibility).
147cl::opt<bool>
148 WholeProgramVisibility("whole-program-visibility", cl::init(false),
149 cl::Hidden, cl::ZeroOrMore,
150 cl::desc("Enable whole program visibility"));
151
152/// Provide a way to force disable whole program for debugging or workarounds,
153/// when enabled via the linker.
154cl::opt<bool> DisableWholeProgramVisibility(
155 "disable-whole-program-visibility", cl::init(false), cl::Hidden,
156 cl::ZeroOrMore,
157 cl::desc("Disable whole program visibility (overrides enabling options)"));
158
159/// Provide way to prevent certain function from being devirtualized
160cl::list<std::string>
161 SkipFunctionNames("wholeprogramdevirt-skip",
162 cl::desc("Prevent function(s) from being devirtualized"),
163 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated);
164
165namespace {
166struct PatternList {
167 std::vector<GlobPattern> Patterns;
168 template <class T> void init(const T &StringList) {
169 for (const auto &S : StringList)
170 if (Expected<GlobPattern> Pat = GlobPattern::create(S))
171 Patterns.push_back(std::move(*Pat));
172 }
173 bool match(StringRef S) {
174 for (const GlobPattern &P : Patterns)
175 if (P.match(S))
176 return true;
177 return false;
178 }
179};
180} // namespace
181
182// Find the minimum offset that we may store a value of size Size bits at. If
183// IsAfter is set, look for an offset before the object, otherwise look for an
184// offset after the object.
185uint64_t
186wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets,
187 bool IsAfter, uint64_t Size) {
188 // Find a minimum offset taking into account only vtable sizes.
189 uint64_t MinByte = 0;
190 for (const VirtualCallTarget &Target : Targets) {
191 if (IsAfter)
192 MinByte = std::max(MinByte, Target.minAfterBytes());
193 else
194 MinByte = std::max(MinByte, Target.minBeforeBytes());
195 }
196
197 // Build a vector of arrays of bytes covering, for each target, a slice of the
198 // used region (see AccumBitVector::BytesUsed in
199 // llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively,
200 // this aligns the used regions to start at MinByte.
201 //
202 // In this example, A, B and C are vtables, # is a byte already allocated for
203 // a virtual function pointer, AAAA... (etc.) are the used regions for the
204 // vtables and Offset(X) is the value computed for the Offset variable below
205 // for X.
206 //
207 // Offset(A)
208 // | |
209 // |MinByte
210 // A: ################AAAAAAAA|AAAAAAAA
211 // B: ########BBBBBBBBBBBBBBBB|BBBB
212 // C: ########################|CCCCCCCCCCCCCCCC
213 // | Offset(B) |
214 //
215 // This code produces the slices of A, B and C that appear after the divider
216 // at MinByte.
217 std::vector<ArrayRef<uint8_t>> Used;
218 for (const VirtualCallTarget &Target : Targets) {
219 ArrayRef<uint8_t> VTUsed = IsAfter ? Target.TM->Bits->After.BytesUsed
220 : Target.TM->Bits->Before.BytesUsed;
221 uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes()
222 : MinByte - Target.minBeforeBytes();
223
224 // Disregard used regions that are smaller than Offset. These are
225 // effectively all-free regions that do not need to be checked.
226 if (VTUsed.size() > Offset)
227 Used.push_back(VTUsed.slice(Offset));
228 }
229
230 if (Size == 1) {
231 // Find a free bit in each member of Used.
232 for (unsigned I = 0;; ++I) {
233 uint8_t BitsUsed = 0;
234 for (auto &&B : Used)
235 if (I < B.size())
236 BitsUsed |= B[I];
237 if (BitsUsed != 0xff)
238 return (MinByte + I) * 8 +
239 countTrailingZeros(uint8_t(~BitsUsed), ZB_Undefined);
240 }
241 } else {
242 // Find a free (Size/8) byte region in each member of Used.
243 // FIXME: see if alignment helps.
244 for (unsigned I = 0;; ++I) {
245 for (auto &&B : Used) {
246 unsigned Byte = 0;
247 while ((I + Byte) < B.size() && Byte < (Size / 8)) {
248 if (B[I + Byte])
249 goto NextI;
250 ++Byte;
251 }
252 }
253 return (MinByte + I) * 8;
254 NextI:;
255 }
256 }
257}
258
259void wholeprogramdevirt::setBeforeReturnValues(
260 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocBefore,
261 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) {
262 if (BitWidth == 1)
263 OffsetByte = -(AllocBefore / 8 + 1);
264 else
265 OffsetByte = -((AllocBefore + 7) / 8 + (BitWidth + 7) / 8);
266 OffsetBit = AllocBefore % 8;
267
268 for (VirtualCallTarget &Target : Targets) {
269 if (BitWidth == 1)
270 Target.setBeforeBit(AllocBefore);
271 else
272 Target.setBeforeBytes(AllocBefore, (BitWidth + 7) / 8);
273 }
274}
275
276void wholeprogramdevirt::setAfterReturnValues(
277 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocAfter,
278 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) {
279 if (BitWidth == 1)
280 OffsetByte = AllocAfter / 8;
281 else
282 OffsetByte = (AllocAfter + 7) / 8;
283 OffsetBit = AllocAfter % 8;
284
285 for (VirtualCallTarget &Target : Targets) {
286 if (BitWidth == 1)
287 Target.setAfterBit(AllocAfter);
288 else
289 Target.setAfterBytes(AllocAfter, (BitWidth + 7) / 8);
290 }
291}
292
293VirtualCallTarget::VirtualCallTarget(Function *Fn, const TypeMemberInfo *TM)
294 : Fn(Fn), TM(TM),
295 IsBigEndian(Fn->getParent()->getDataLayout().isBigEndian()), WasDevirt(false) {}
296
297namespace {
298
299// A slot in a set of virtual tables. The TypeID identifies the set of virtual
300// tables, and the ByteOffset is the offset in bytes from the address point to
301// the virtual function pointer.
302struct VTableSlot {
303 Metadata *TypeID;
304 uint64_t ByteOffset;
305};
306
307} // end anonymous namespace
308
309namespace llvm {
310
311template <> struct DenseMapInfo<VTableSlot> {
312 static VTableSlot getEmptyKey() {
313 return {DenseMapInfo<Metadata *>::getEmptyKey(),
314 DenseMapInfo<uint64_t>::getEmptyKey()};
315 }
316 static VTableSlot getTombstoneKey() {
317 return {DenseMapInfo<Metadata *>::getTombstoneKey(),
318 DenseMapInfo<uint64_t>::getTombstoneKey()};
319 }
320 static unsigned getHashValue(const VTableSlot &I) {
321 return DenseMapInfo<Metadata *>::getHashValue(I.TypeID) ^
322 DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset);
323 }
324 static bool isEqual(const VTableSlot &LHS,
325 const VTableSlot &RHS) {
326 return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset;
327 }
328};
329
330template <> struct DenseMapInfo<VTableSlotSummary> {
331 static VTableSlotSummary getEmptyKey() {
332 return {DenseMapInfo<StringRef>::getEmptyKey(),
333 DenseMapInfo<uint64_t>::getEmptyKey()};
334 }
335 static VTableSlotSummary getTombstoneKey() {
336 return {DenseMapInfo<StringRef>::getTombstoneKey(),
337 DenseMapInfo<uint64_t>::getTombstoneKey()};
338 }
339 static unsigned getHashValue(const VTableSlotSummary &I) {
340 return DenseMapInfo<StringRef>::getHashValue(I.TypeID) ^
341 DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset);
342 }
343 static bool isEqual(const VTableSlotSummary &LHS,
344 const VTableSlotSummary &RHS) {
345 return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset;
346 }
347};
348
349} // end namespace llvm
350
351namespace {
352
353// A virtual call site. VTable is the loaded virtual table pointer, and CS is
354// the indirect virtual call.
355struct VirtualCallSite {
356 Value *VTable = nullptr;
357 CallBase &CB;
358
359 // If non-null, this field points to the associated unsafe use count stored in
360 // the DevirtModule::NumUnsafeUsesForTypeTest map below. See the description
361 // of that field for details.
362 unsigned *NumUnsafeUses = nullptr;
363
364 void
365 emitRemark(const StringRef OptName, const StringRef TargetName,
366 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter) {
367 Function *F = CB.getCaller();
368 DebugLoc DLoc = CB.getDebugLoc();
369 BasicBlock *Block = CB.getParent();
370
371 using namespace ore;
372 OREGetter(F).emit(OptimizationRemark(DEBUG_TYPE"wholeprogramdevirt", OptName, DLoc, Block)
373 << NV("Optimization", OptName)
374 << ": devirtualized a call to "
375 << NV("FunctionName", TargetName));
376 }
377
378 void replaceAndErase(
379 const StringRef OptName, const StringRef TargetName, bool RemarksEnabled,
380 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
381 Value *New) {
382 if (RemarksEnabled)
383 emitRemark(OptName, TargetName, OREGetter);
384 CB.replaceAllUsesWith(New);
385 if (auto *II = dyn_cast<InvokeInst>(&CB)) {
386 BranchInst::Create(II->getNormalDest(), &CB);
387 II->getUnwindDest()->removePredecessor(II->getParent());
388 }
389 CB.eraseFromParent();
390 // This use is no longer unsafe.
391 if (NumUnsafeUses)
392 --*NumUnsafeUses;
393 }
394};
395
396// Call site information collected for a specific VTableSlot and possibly a list
397// of constant integer arguments. The grouping by arguments is handled by the
398// VTableSlotInfo class.
399struct CallSiteInfo {
400 /// The set of call sites for this slot. Used during regular LTO and the
401 /// import phase of ThinLTO (as well as the export phase of ThinLTO for any
402 /// call sites that appear in the merged module itself); in each of these
403 /// cases we are directly operating on the call sites at the IR level.
404 std::vector<VirtualCallSite> CallSites;
405
406 /// Whether all call sites represented by this CallSiteInfo, including those
407 /// in summaries, have been devirtualized. This starts off as true because a
408 /// default constructed CallSiteInfo represents no call sites.
409 bool AllCallSitesDevirted = true;
410
411 // These fields are used during the export phase of ThinLTO and reflect
412 // information collected from function summaries.
413
414 /// Whether any function summary contains an llvm.assume(llvm.type.test) for
415 /// this slot.
416 bool SummaryHasTypeTestAssumeUsers = false;
417
418 /// CFI-specific: a vector containing the list of function summaries that use
419 /// the llvm.type.checked.load intrinsic and therefore will require
420 /// resolutions for llvm.type.test in order to implement CFI checks if
421 /// devirtualization was unsuccessful. If devirtualization was successful, the
422 /// pass will clear this vector by calling markDevirt(). If at the end of the
423 /// pass the vector is non-empty, we will need to add a use of llvm.type.test
424 /// to each of the function summaries in the vector.
425 std::vector<FunctionSummary *> SummaryTypeCheckedLoadUsers;
426 std::vector<FunctionSummary *> SummaryTypeTestAssumeUsers;
427
428 bool isExported() const {
429 return SummaryHasTypeTestAssumeUsers ||
61
Assuming field 'SummaryHasTypeTestAssumeUsers' is true
62
Returning the value 1, which participates in a condition later
430 !SummaryTypeCheckedLoadUsers.empty();
431 }
432
433 void addSummaryTypeCheckedLoadUser(FunctionSummary *FS) {
434 SummaryTypeCheckedLoadUsers.push_back(FS);
435 AllCallSitesDevirted = false;
436 }
437
438 void addSummaryTypeTestAssumeUser(FunctionSummary *FS) {
439 SummaryTypeTestAssumeUsers.push_back(FS);
440 SummaryHasTypeTestAssumeUsers = true;
441 AllCallSitesDevirted = false;
442 }
443
444 void markDevirt() {
445 AllCallSitesDevirted = true;
446
447 // As explained in the comment for SummaryTypeCheckedLoadUsers.
448 SummaryTypeCheckedLoadUsers.clear();
449 }
450};
451
452// Call site information collected for a specific VTableSlot.
453struct VTableSlotInfo {
454 // The set of call sites which do not have all constant integer arguments
455 // (excluding "this").
456 CallSiteInfo CSInfo;
457
458 // The set of call sites with all constant integer arguments (excluding
459 // "this"), grouped by argument list.
460 std::map<std::vector<uint64_t>, CallSiteInfo> ConstCSInfo;
461
462 void addCallSite(Value *VTable, CallBase &CB, unsigned *NumUnsafeUses);
463
464private:
465 CallSiteInfo &findCallSiteInfo(CallBase &CB);
466};
467
468CallSiteInfo &VTableSlotInfo::findCallSiteInfo(CallBase &CB) {
469 std::vector<uint64_t> Args;
470 auto *CBType = dyn_cast<IntegerType>(CB.getType());
471 if (!CBType || CBType->getBitWidth() > 64 || CB.arg_empty())
472 return CSInfo;
473 for (auto &&Arg : make_range(CB.arg_begin() + 1, CB.arg_end())) {
474 auto *CI = dyn_cast<ConstantInt>(Arg);
475 if (!CI || CI->getBitWidth() > 64)
476 return CSInfo;
477 Args.push_back(CI->getZExtValue());
478 }
479 return ConstCSInfo[Args];
480}
481
482void VTableSlotInfo::addCallSite(Value *VTable, CallBase &CB,
483 unsigned *NumUnsafeUses) {
484 auto &CSI = findCallSiteInfo(CB);
485 CSI.AllCallSitesDevirted = false;
486 CSI.CallSites.push_back({VTable, CB, NumUnsafeUses});
487}
488
489struct DevirtModule {
490 Module &M;
491 function_ref<AAResults &(Function &)> AARGetter;
492 function_ref<DominatorTree &(Function &)> LookupDomTree;
493
494 ModuleSummaryIndex *ExportSummary;
495 const ModuleSummaryIndex *ImportSummary;
496
497 IntegerType *Int8Ty;
498 PointerType *Int8PtrTy;
499 IntegerType *Int32Ty;
500 IntegerType *Int64Ty;
501 IntegerType *IntPtrTy;
502 /// Sizeless array type, used for imported vtables. This provides a signal
503 /// to analyzers that these imports may alias, as they do for example
504 /// when multiple unique return values occur in the same vtable.
505 ArrayType *Int8Arr0Ty;
506
507 bool RemarksEnabled;
508 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter;
509
510 MapVector<VTableSlot, VTableSlotInfo> CallSlots;
511
512 // This map keeps track of the number of "unsafe" uses of a loaded function
513 // pointer. The key is the associated llvm.type.test intrinsic call generated
514 // by this pass. An unsafe use is one that calls the loaded function pointer
515 // directly. Every time we eliminate an unsafe use (for example, by
516 // devirtualizing it or by applying virtual constant propagation), we
517 // decrement the value stored in this map. If a value reaches zero, we can
518 // eliminate the type check by RAUWing the associated llvm.type.test call with
519 // true.
520 std::map<CallInst *, unsigned> NumUnsafeUsesForTypeTest;
521 PatternList FunctionsToSkip;
522
523 DevirtModule(Module &M, function_ref<AAResults &(Function &)> AARGetter,
524 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
525 function_ref<DominatorTree &(Function &)> LookupDomTree,
526 ModuleSummaryIndex *ExportSummary,
527 const ModuleSummaryIndex *ImportSummary)
528 : M(M), AARGetter(AARGetter), LookupDomTree(LookupDomTree),
529 ExportSummary(ExportSummary), ImportSummary(ImportSummary),
530 Int8Ty(Type::getInt8Ty(M.getContext())),
531 Int8PtrTy(Type::getInt8PtrTy(M.getContext())),
532 Int32Ty(Type::getInt32Ty(M.getContext())),
533 Int64Ty(Type::getInt64Ty(M.getContext())),
534 IntPtrTy(M.getDataLayout().getIntPtrType(M.getContext(), 0)),
535 Int8Arr0Ty(ArrayType::get(Type::getInt8Ty(M.getContext()), 0)),
536 RemarksEnabled(areRemarksEnabled()), OREGetter(OREGetter) {
537 assert(!(ExportSummary && ImportSummary))((!(ExportSummary && ImportSummary)) ? static_cast<
void> (0) : __assert_fail ("!(ExportSummary && ImportSummary)"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 537, __PRETTY_FUNCTION__))
;
538 FunctionsToSkip.init(SkipFunctionNames);
539 }
540
541 bool areRemarksEnabled();
542
543 void
544 scanTypeTestUsers(Function *TypeTestFunc,
545 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap);
546 void scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc);
547
548 void buildTypeIdentifierMap(
549 std::vector<VTableBits> &Bits,
550 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap);
551 bool
552 tryFindVirtualCallTargets(std::vector<VirtualCallTarget> &TargetsForSlot,
553 const std::set<TypeMemberInfo> &TypeMemberInfos,
554 uint64_t ByteOffset);
555
556 void applySingleImplDevirt(VTableSlotInfo &SlotInfo, Constant *TheFn,
557 bool &IsExported);
558 bool trySingleImplDevirt(ModuleSummaryIndex *ExportSummary,
559 MutableArrayRef<VirtualCallTarget> TargetsForSlot,
560 VTableSlotInfo &SlotInfo,
561 WholeProgramDevirtResolution *Res);
562
563 void applyICallBranchFunnel(VTableSlotInfo &SlotInfo, Constant *JT,
564 bool &IsExported);
565 void tryICallBranchFunnel(MutableArrayRef<VirtualCallTarget> TargetsForSlot,
566 VTableSlotInfo &SlotInfo,
567 WholeProgramDevirtResolution *Res, VTableSlot Slot);
568
569 bool tryEvaluateFunctionsWithArgs(
570 MutableArrayRef<VirtualCallTarget> TargetsForSlot,
571 ArrayRef<uint64_t> Args);
572
573 void applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName,
574 uint64_t TheRetVal);
575 bool tryUniformRetValOpt(MutableArrayRef<VirtualCallTarget> TargetsForSlot,
576 CallSiteInfo &CSInfo,
577 WholeProgramDevirtResolution::ByArg *Res);
578
579 // Returns the global symbol name that is used to export information about the
580 // given vtable slot and list of arguments.
581 std::string getGlobalName(VTableSlot Slot, ArrayRef<uint64_t> Args,
582 StringRef Name);
583
584 bool shouldExportConstantsAsAbsoluteSymbols();
585
586 // This function is called during the export phase to create a symbol
587 // definition containing information about the given vtable slot and list of
588 // arguments.
589 void exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name,
590 Constant *C);
591 void exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name,
592 uint32_t Const, uint32_t &Storage);
593
594 // This function is called during the import phase to create a reference to
595 // the symbol definition created during the export phase.
596 Constant *importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args,
597 StringRef Name);
598 Constant *importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args,
599 StringRef Name, IntegerType *IntTy,
600 uint32_t Storage);
601
602 Constant *getMemberAddr(const TypeMemberInfo *M);
603
604 void applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, bool IsOne,
605 Constant *UniqueMemberAddr);
606 bool tryUniqueRetValOpt(unsigned BitWidth,
607 MutableArrayRef<VirtualCallTarget> TargetsForSlot,
608 CallSiteInfo &CSInfo,
609 WholeProgramDevirtResolution::ByArg *Res,
610 VTableSlot Slot, ArrayRef<uint64_t> Args);
611
612 void applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName,
613 Constant *Byte, Constant *Bit);
614 bool tryVirtualConstProp(MutableArrayRef<VirtualCallTarget> TargetsForSlot,
615 VTableSlotInfo &SlotInfo,
616 WholeProgramDevirtResolution *Res, VTableSlot Slot);
617
618 void rebuildGlobal(VTableBits &B);
619
620 // Apply the summary resolution for Slot to all virtual calls in SlotInfo.
621 void importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo);
622
623 // If we were able to eliminate all unsafe uses for a type checked load,
624 // eliminate the associated type tests by replacing them with true.
625 void removeRedundantTypeTests();
626
627 bool run();
628
629 // Lower the module using the action and summary passed as command line
630 // arguments. For testing purposes only.
631 static bool
632 runForTesting(Module &M, function_ref<AAResults &(Function &)> AARGetter,
633 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
634 function_ref<DominatorTree &(Function &)> LookupDomTree);
635};
636
637struct DevirtIndex {
638 ModuleSummaryIndex &ExportSummary;
639 // The set in which to record GUIDs exported from their module by
640 // devirtualization, used by client to ensure they are not internalized.
641 std::set<GlobalValue::GUID> &ExportedGUIDs;
642 // A map in which to record the information necessary to locate the WPD
643 // resolution for local targets in case they are exported by cross module
644 // importing.
645 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap;
646
647 MapVector<VTableSlotSummary, VTableSlotInfo> CallSlots;
648
649 PatternList FunctionsToSkip;
650
651 DevirtIndex(
652 ModuleSummaryIndex &ExportSummary,
653 std::set<GlobalValue::GUID> &ExportedGUIDs,
654 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap)
655 : ExportSummary(ExportSummary), ExportedGUIDs(ExportedGUIDs),
656 LocalWPDTargetsMap(LocalWPDTargetsMap) {
657 FunctionsToSkip.init(SkipFunctionNames);
658 }
659
660 bool tryFindVirtualCallTargets(std::vector<ValueInfo> &TargetsForSlot,
661 const TypeIdCompatibleVtableInfo TIdInfo,
662 uint64_t ByteOffset);
663
664 bool trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot,
665 VTableSlotSummary &SlotSummary,
666 VTableSlotInfo &SlotInfo,
667 WholeProgramDevirtResolution *Res,
668 std::set<ValueInfo> &DevirtTargets);
669
670 void run();
671};
672
673struct WholeProgramDevirt : public ModulePass {
674 static char ID;
675
676 bool UseCommandLine = false;
677
678 ModuleSummaryIndex *ExportSummary = nullptr;
679 const ModuleSummaryIndex *ImportSummary = nullptr;
680
681 WholeProgramDevirt() : ModulePass(ID), UseCommandLine(true) {
682 initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry());
683 }
684
685 WholeProgramDevirt(ModuleSummaryIndex *ExportSummary,
686 const ModuleSummaryIndex *ImportSummary)
687 : ModulePass(ID), ExportSummary(ExportSummary),
688 ImportSummary(ImportSummary) {
689 initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry());
690 }
691
692 bool runOnModule(Module &M) override {
693 if (skipModule(M))
694 return false;
695
696 // In the new pass manager, we can request the optimization
697 // remark emitter pass on a per-function-basis, which the
698 // OREGetter will do for us.
699 // In the old pass manager, this is harder, so we just build
700 // an optimization remark emitter on the fly, when we need it.
701 std::unique_ptr<OptimizationRemarkEmitter> ORE;
702 auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & {
703 ORE = std::make_unique<OptimizationRemarkEmitter>(F);
704 return *ORE;
705 };
706
707 auto LookupDomTree = [this](Function &F) -> DominatorTree & {
708 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
709 };
710
711 if (UseCommandLine)
712 return DevirtModule::runForTesting(M, LegacyAARGetter(*this), OREGetter,
713 LookupDomTree);
714
715 return DevirtModule(M, LegacyAARGetter(*this), OREGetter, LookupDomTree,
716 ExportSummary, ImportSummary)
717 .run();
718 }
719
720 void getAnalysisUsage(AnalysisUsage &AU) const override {
721 AU.addRequired<AssumptionCacheTracker>();
722 AU.addRequired<TargetLibraryInfoWrapperPass>();
723 AU.addRequired<DominatorTreeWrapperPass>();
724 }
725};
726
727} // end anonymous namespace
728
729INITIALIZE_PASS_BEGIN(WholeProgramDevirt, "wholeprogramdevirt",static void *initializeWholeProgramDevirtPassOnce(PassRegistry
&Registry) {
730 "Whole program devirtualization", false, false)static void *initializeWholeProgramDevirtPassOnce(PassRegistry
&Registry) {
731INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);
732INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
733INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
734INITIALIZE_PASS_END(WholeProgramDevirt, "wholeprogramdevirt",PassInfo *PI = new PassInfo( "Whole program devirtualization"
, "wholeprogramdevirt", &WholeProgramDevirt::ID, PassInfo
::NormalCtor_t(callDefaultCtor<WholeProgramDevirt>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeWholeProgramDevirtPassFlag; void llvm
::initializeWholeProgramDevirtPass(PassRegistry &Registry
) { llvm::call_once(InitializeWholeProgramDevirtPassFlag, initializeWholeProgramDevirtPassOnce
, std::ref(Registry)); }
735 "Whole program devirtualization", false, false)PassInfo *PI = new PassInfo( "Whole program devirtualization"
, "wholeprogramdevirt", &WholeProgramDevirt::ID, PassInfo
::NormalCtor_t(callDefaultCtor<WholeProgramDevirt>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeWholeProgramDevirtPassFlag; void llvm
::initializeWholeProgramDevirtPass(PassRegistry &Registry
) { llvm::call_once(InitializeWholeProgramDevirtPassFlag, initializeWholeProgramDevirtPassOnce
, std::ref(Registry)); }
736char WholeProgramDevirt::ID = 0;
737
738ModulePass *
739llvm::createWholeProgramDevirtPass(ModuleSummaryIndex *ExportSummary,
740 const ModuleSummaryIndex *ImportSummary) {
741 return new WholeProgramDevirt(ExportSummary, ImportSummary);
742}
743
744PreservedAnalyses WholeProgramDevirtPass::run(Module &M,
745 ModuleAnalysisManager &AM) {
746 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
747 auto AARGetter = [&](Function &F) -> AAResults & {
748 return FAM.getResult<AAManager>(F);
749 };
750 auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & {
751 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
752 };
753 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree & {
754 return FAM.getResult<DominatorTreeAnalysis>(F);
755 };
756 if (!DevirtModule(M, AARGetter, OREGetter, LookupDomTree, ExportSummary,
757 ImportSummary)
758 .run())
759 return PreservedAnalyses::all();
760 return PreservedAnalyses::none();
761}
762
763// Enable whole program visibility if enabled by client (e.g. linker) or
764// internal option, and not force disabled.
765static bool hasWholeProgramVisibility(bool WholeProgramVisibilityEnabledInLTO) {
766 return (WholeProgramVisibilityEnabledInLTO || WholeProgramVisibility) &&
767 !DisableWholeProgramVisibility;
768}
769
770namespace llvm {
771
772/// If whole program visibility asserted, then upgrade all public vcall
773/// visibility metadata on vtable definitions to linkage unit visibility in
774/// Module IR (for regular or hybrid LTO).
775void updateVCallVisibilityInModule(Module &M,
776 bool WholeProgramVisibilityEnabledInLTO) {
777 if (!hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO))
778 return;
779 for (GlobalVariable &GV : M.globals())
780 // Add linkage unit visibility to any variable with type metadata, which are
781 // the vtable definitions. We won't have an existing vcall_visibility
782 // metadata on vtable definitions with public visibility.
783 if (GV.hasMetadata(LLVMContext::MD_type) &&
784 GV.getVCallVisibility() == GlobalObject::VCallVisibilityPublic)
785 GV.setVCallVisibilityMetadata(GlobalObject::VCallVisibilityLinkageUnit);
786}
787
788/// If whole program visibility asserted, then upgrade all public vcall
789/// visibility metadata on vtable definition summaries to linkage unit
790/// visibility in Module summary index (for ThinLTO).
791void updateVCallVisibilityInIndex(ModuleSummaryIndex &Index,
792 bool WholeProgramVisibilityEnabledInLTO) {
793 if (!hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO))
794 return;
795 for (auto &P : Index) {
796 for (auto &S : P.second.SummaryList) {
797 auto *GVar = dyn_cast<GlobalVarSummary>(S.get());
798 if (!GVar || GVar->vTableFuncs().empty() ||
799 GVar->getVCallVisibility() != GlobalObject::VCallVisibilityPublic)
800 continue;
801 GVar->setVCallVisibility(GlobalObject::VCallVisibilityLinkageUnit);
802 }
803 }
804}
805
806void runWholeProgramDevirtOnIndex(
807 ModuleSummaryIndex &Summary, std::set<GlobalValue::GUID> &ExportedGUIDs,
808 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) {
809 DevirtIndex(Summary, ExportedGUIDs, LocalWPDTargetsMap).run();
810}
811
812void updateIndexWPDForExports(
813 ModuleSummaryIndex &Summary,
814 function_ref<bool(StringRef, ValueInfo)> isExported,
815 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) {
816 for (auto &T : LocalWPDTargetsMap) {
817 auto &VI = T.first;
818 // This was enforced earlier during trySingleImplDevirt.
819 assert(VI.getSummaryList().size() == 1 &&((VI.getSummaryList().size() == 1 && "Devirt of local target has more than one copy"
) ? static_cast<void> (0) : __assert_fail ("VI.getSummaryList().size() == 1 && \"Devirt of local target has more than one copy\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 820, __PRETTY_FUNCTION__))
820 "Devirt of local target has more than one copy")((VI.getSummaryList().size() == 1 && "Devirt of local target has more than one copy"
) ? static_cast<void> (0) : __assert_fail ("VI.getSummaryList().size() == 1 && \"Devirt of local target has more than one copy\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 820, __PRETTY_FUNCTION__))
;
821 auto &S = VI.getSummaryList()[0];
822 if (!isExported(S->modulePath(), VI))
823 continue;
824
825 // It's been exported by a cross module import.
826 for (auto &SlotSummary : T.second) {
827 auto *TIdSum = Summary.getTypeIdSummary(SlotSummary.TypeID);
828 assert(TIdSum)((TIdSum) ? static_cast<void> (0) : __assert_fail ("TIdSum"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 828, __PRETTY_FUNCTION__))
;
829 auto WPDRes = TIdSum->WPDRes.find(SlotSummary.ByteOffset);
830 assert(WPDRes != TIdSum->WPDRes.end())((WPDRes != TIdSum->WPDRes.end()) ? static_cast<void>
(0) : __assert_fail ("WPDRes != TIdSum->WPDRes.end()", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 830, __PRETTY_FUNCTION__))
;
831 WPDRes->second.SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal(
832 WPDRes->second.SingleImplName,
833 Summary.getModuleHash(S->modulePath()));
834 }
835 }
836}
837
838} // end namespace llvm
839
840static Error checkCombinedSummaryForTesting(ModuleSummaryIndex *Summary) {
841 // Check that summary index contains regular LTO module when performing
842 // export to prevent occasional use of index from pure ThinLTO compilation
843 // (-fno-split-lto-module). This kind of summary index is passed to
844 // DevirtIndex::run, not to DevirtModule::run used by opt/runForTesting.
845 const auto &ModPaths = Summary->modulePaths();
846 if (ClSummaryAction != PassSummaryAction::Import &&
847 ModPaths.find(ModuleSummaryIndex::getRegularLTOModuleName()) ==
848 ModPaths.end())
849 return createStringError(
850 errc::invalid_argument,
851 "combined summary should contain Regular LTO module");
852 return ErrorSuccess();
853}
854
855bool DevirtModule::runForTesting(
856 Module &M, function_ref<AAResults &(Function &)> AARGetter,
857 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
858 function_ref<DominatorTree &(Function &)> LookupDomTree) {
859 std::unique_ptr<ModuleSummaryIndex> Summary =
860 std::make_unique<ModuleSummaryIndex>(/*HaveGVs=*/false);
861
862 // Handle the command-line summary arguments. This code is for testing
863 // purposes only, so we handle errors directly.
864 if (!ClReadSummary.empty()) {
865 ExitOnError ExitOnErr("-wholeprogramdevirt-read-summary: " + ClReadSummary +
866 ": ");
867 auto ReadSummaryFile =
868 ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary)));
869 if (Expected<std::unique_ptr<ModuleSummaryIndex>> SummaryOrErr =
870 getModuleSummaryIndex(*ReadSummaryFile)) {
871 Summary = std::move(*SummaryOrErr);
872 ExitOnErr(checkCombinedSummaryForTesting(Summary.get()));
873 } else {
874 // Try YAML if we've failed with bitcode.
875 consumeError(SummaryOrErr.takeError());
876 yaml::Input In(ReadSummaryFile->getBuffer());
877 In >> *Summary;
878 ExitOnErr(errorCodeToError(In.error()));
879 }
880 }
881
882 bool Changed =
883 DevirtModule(M, AARGetter, OREGetter, LookupDomTree,
884 ClSummaryAction == PassSummaryAction::Export ? Summary.get()
885 : nullptr,
886 ClSummaryAction == PassSummaryAction::Import ? Summary.get()
887 : nullptr)
888 .run();
889
890 if (!ClWriteSummary.empty()) {
891 ExitOnError ExitOnErr(
892 "-wholeprogramdevirt-write-summary: " + ClWriteSummary + ": ");
893 std::error_code EC;
894 if (StringRef(ClWriteSummary).endswith(".bc")) {
895 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_None);
896 ExitOnErr(errorCodeToError(EC));
897 WriteIndexToFile(*Summary, OS);
898 } else {
899 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_Text);
900 ExitOnErr(errorCodeToError(EC));
901 yaml::Output Out(OS);
902 Out << *Summary;
903 }
904 }
905
906 return Changed;
907}
908
909void DevirtModule::buildTypeIdentifierMap(
910 std::vector<VTableBits> &Bits,
911 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) {
912 DenseMap<GlobalVariable *, VTableBits *> GVToBits;
913 Bits.reserve(M.getGlobalList().size());
914 SmallVector<MDNode *, 2> Types;
915 for (GlobalVariable &GV : M.globals()) {
916 Types.clear();
917 GV.getMetadata(LLVMContext::MD_type, Types);
918 if (GV.isDeclaration() || Types.empty())
919 continue;
920
921 VTableBits *&BitsPtr = GVToBits[&GV];
922 if (!BitsPtr) {
923 Bits.emplace_back();
924 Bits.back().GV = &GV;
925 Bits.back().ObjectSize =
926 M.getDataLayout().getTypeAllocSize(GV.getInitializer()->getType());
927 BitsPtr = &Bits.back();
928 }
929
930 for (MDNode *Type : Types) {
931 auto TypeID = Type->getOperand(1).get();
932
933 uint64_t Offset =
934 cast<ConstantInt>(
935 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
936 ->getZExtValue();
937
938 TypeIdMap[TypeID].insert({BitsPtr, Offset});
939 }
940 }
941}
942
943bool DevirtModule::tryFindVirtualCallTargets(
944 std::vector<VirtualCallTarget> &TargetsForSlot,
945 const std::set<TypeMemberInfo> &TypeMemberInfos, uint64_t ByteOffset) {
946 for (const TypeMemberInfo &TM : TypeMemberInfos) {
947 if (!TM.Bits->GV->isConstant())
948 return false;
949
950 // We cannot perform whole program devirtualization analysis on a vtable
951 // with public LTO visibility.
952 if (TM.Bits->GV->getVCallVisibility() ==
953 GlobalObject::VCallVisibilityPublic)
954 return false;
955
956 Constant *Ptr = getPointerAtOffset(TM.Bits->GV->getInitializer(),
957 TM.Offset + ByteOffset, M);
958 if (!Ptr)
959 return false;
960
961 auto Fn = dyn_cast<Function>(Ptr->stripPointerCasts());
962 if (!Fn)
963 return false;
964
965 if (FunctionsToSkip.match(Fn->getName()))
966 return false;
967
968 // We can disregard __cxa_pure_virtual as a possible call target, as
969 // calls to pure virtuals are UB.
970 if (Fn->getName() == "__cxa_pure_virtual")
971 continue;
972
973 TargetsForSlot.push_back({Fn, &TM});
974 }
975
976 // Give up if we couldn't find any targets.
977 return !TargetsForSlot.empty();
23
Assuming the condition is true
24
Returning the value 1, which participates in a condition later
978}
979
980bool DevirtIndex::tryFindVirtualCallTargets(
981 std::vector<ValueInfo> &TargetsForSlot, const TypeIdCompatibleVtableInfo TIdInfo,
982 uint64_t ByteOffset) {
983 for (const TypeIdOffsetVtableInfo &P : TIdInfo) {
984 // Find the first non-available_externally linkage vtable initializer.
985 // We can have multiple available_externally, linkonce_odr and weak_odr
986 // vtable initializers, however we want to skip available_externally as they
987 // do not have type metadata attached, and therefore the summary will not
988 // contain any vtable functions. We can also have multiple external
989 // vtable initializers in the case of comdats, which we cannot check here.
990 // The linker should give an error in this case.
991 //
992 // Also, handle the case of same-named local Vtables with the same path
993 // and therefore the same GUID. This can happen if there isn't enough
994 // distinguishing path when compiling the source file. In that case we
995 // conservatively return false early.
996 const GlobalVarSummary *VS = nullptr;
997 bool LocalFound = false;
998 for (auto &S : P.VTableVI.getSummaryList()) {
999 if (GlobalValue::isLocalLinkage(S->linkage())) {
1000 if (LocalFound)
1001 return false;
1002 LocalFound = true;
1003 }
1004 if (!GlobalValue::isAvailableExternallyLinkage(S->linkage())) {
1005 VS = cast<GlobalVarSummary>(S->getBaseObject());
1006 // We cannot perform whole program devirtualization analysis on a vtable
1007 // with public LTO visibility.
1008 if (VS->getVCallVisibility() == GlobalObject::VCallVisibilityPublic)
1009 return false;
1010 }
1011 }
1012 if (!VS->isLive())
1013 continue;
1014 for (auto VTP : VS->vTableFuncs()) {
1015 if (VTP.VTableOffset != P.AddressPointOffset + ByteOffset)
1016 continue;
1017
1018 TargetsForSlot.push_back(VTP.FuncVI);
1019 }
1020 }
1021
1022 // Give up if we couldn't find any targets.
1023 return !TargetsForSlot.empty();
1024}
1025
1026void DevirtModule::applySingleImplDevirt(VTableSlotInfo &SlotInfo,
1027 Constant *TheFn, bool &IsExported) {
1028 auto Apply = [&](CallSiteInfo &CSInfo) {
1029 for (auto &&VCallSite : CSInfo.CallSites) {
1030 if (RemarksEnabled)
1031 VCallSite.emitRemark("single-impl",
1032 TheFn->stripPointerCasts()->getName(), OREGetter);
1033 VCallSite.CB.setCalledOperand(ConstantExpr::getBitCast(
1034 TheFn, VCallSite.CB.getCalledOperand()->getType()));
1035 // This use is no longer unsafe.
1036 if (VCallSite.NumUnsafeUses)
1037 --*VCallSite.NumUnsafeUses;
1038 }
1039 if (CSInfo.isExported())
1040 IsExported = true;
1041 CSInfo.markDevirt();
1042 };
1043 Apply(SlotInfo.CSInfo);
1044 for (auto &P : SlotInfo.ConstCSInfo)
1045 Apply(P.second);
1046}
32
Returning without writing to 'IsExported', which participates in a condition later
1047
1048static bool AddCalls(VTableSlotInfo &SlotInfo, const ValueInfo &Callee) {
1049 // We can't add calls if we haven't seen a definition
1050 if (Callee.getSummaryList().empty())
1051 return false;
1052
1053 // Insert calls into the summary index so that the devirtualized targets
1054 // are eligible for import.
1055 // FIXME: Annotate type tests with hotness. For now, mark these as hot
1056 // to better ensure we have the opportunity to inline them.
1057 bool IsExported = false;
1058 auto &S = Callee.getSummaryList()[0];
1059 CalleeInfo CI(CalleeInfo::HotnessType::Hot, /* RelBF = */ 0);
1060 auto AddCalls = [&](CallSiteInfo &CSInfo) {
1061 for (auto *FS : CSInfo.SummaryTypeCheckedLoadUsers) {
1062 FS->addCall({Callee, CI});
1063 IsExported |= S->modulePath() != FS->modulePath();
1064 }
1065 for (auto *FS : CSInfo.SummaryTypeTestAssumeUsers) {
1066 FS->addCall({Callee, CI});
1067 IsExported |= S->modulePath() != FS->modulePath();
1068 }
1069 };
1070 AddCalls(SlotInfo.CSInfo);
1071 for (auto &P : SlotInfo.ConstCSInfo)
1072 AddCalls(P.second);
1073 return IsExported;
1074}
1075
1076bool DevirtModule::trySingleImplDevirt(
1077 ModuleSummaryIndex *ExportSummary,
1078 MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo,
1079 WholeProgramDevirtResolution *Res) {
1080 // See if the program contains a single implementation of this virtual
1081 // function.
1082 Function *TheFn = TargetsForSlot[0].Fn;
1083 for (auto &&Target : TargetsForSlot)
28
Assuming '__begin1' is equal to '__end1'
1084 if (TheFn != Target.Fn)
1085 return false;
1086
1087 // If so, update each call site to call that implementation directly.
1088 if (RemarksEnabled)
29
Assuming field 'RemarksEnabled' is false
30
Taking false branch
1089 TargetsForSlot[0].WasDevirt = true;
1090
1091 bool IsExported = false;
1092 applySingleImplDevirt(SlotInfo, TheFn, IsExported);
31
Calling 'DevirtModule::applySingleImplDevirt'
33
Returning from 'DevirtModule::applySingleImplDevirt'
1093 if (!IsExported
33.1
'IsExported' is false
33.1
'IsExported' is false
)
34
Taking true branch
1094 return false;
35
Returning zero, which participates in a condition later
1095
1096 // If the only implementation has local linkage, we must promote to external
1097 // to make it visible to thin LTO objects. We can only get here during the
1098 // ThinLTO export phase.
1099 if (TheFn->hasLocalLinkage()) {
1100 std::string NewName = (TheFn->getName() + "$merged").str();
1101
1102 // Since we are renaming the function, any comdats with the same name must
1103 // also be renamed. This is required when targeting COFF, as the comdat name
1104 // must match one of the names of the symbols in the comdat.
1105 if (Comdat *C = TheFn->getComdat()) {
1106 if (C->getName() == TheFn->getName()) {
1107 Comdat *NewC = M.getOrInsertComdat(NewName);
1108 NewC->setSelectionKind(C->getSelectionKind());
1109 for (GlobalObject &GO : M.global_objects())
1110 if (GO.getComdat() == C)
1111 GO.setComdat(NewC);
1112 }
1113 }
1114
1115 TheFn->setLinkage(GlobalValue::ExternalLinkage);
1116 TheFn->setVisibility(GlobalValue::HiddenVisibility);
1117 TheFn->setName(NewName);
1118 }
1119 if (ValueInfo TheFnVI = ExportSummary->getValueInfo(TheFn->getGUID()))
1120 // Any needed promotion of 'TheFn' has already been done during
1121 // LTO unit split, so we can ignore return value of AddCalls.
1122 AddCalls(SlotInfo, TheFnVI);
1123
1124 Res->TheKind = WholeProgramDevirtResolution::SingleImpl;
1125 Res->SingleImplName = std::string(TheFn->getName());
1126
1127 return true;
1128}
1129
1130bool DevirtIndex::trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot,
1131 VTableSlotSummary &SlotSummary,
1132 VTableSlotInfo &SlotInfo,
1133 WholeProgramDevirtResolution *Res,
1134 std::set<ValueInfo> &DevirtTargets) {
1135 // See if the program contains a single implementation of this virtual
1136 // function.
1137 auto TheFn = TargetsForSlot[0];
1138 for (auto &&Target : TargetsForSlot)
1139 if (TheFn != Target)
1140 return false;
1141
1142 // Don't devirtualize if we don't have target definition.
1143 auto Size = TheFn.getSummaryList().size();
1144 if (!Size)
1145 return false;
1146
1147 // Don't devirtualize function if we're told to skip it
1148 // in -wholeprogramdevirt-skip.
1149 if (FunctionsToSkip.match(TheFn.name()))
1150 return false;
1151
1152 // If the summary list contains multiple summaries where at least one is
1153 // a local, give up, as we won't know which (possibly promoted) name to use.
1154 for (auto &S : TheFn.getSummaryList())
1155 if (GlobalValue::isLocalLinkage(S->linkage()) && Size > 1)
1156 return false;
1157
1158 // Collect functions devirtualized at least for one call site for stats.
1159 if (PrintSummaryDevirt)
1160 DevirtTargets.insert(TheFn);
1161
1162 auto &S = TheFn.getSummaryList()[0];
1163 bool IsExported = AddCalls(SlotInfo, TheFn);
1164 if (IsExported)
1165 ExportedGUIDs.insert(TheFn.getGUID());
1166
1167 // Record in summary for use in devirtualization during the ThinLTO import
1168 // step.
1169 Res->TheKind = WholeProgramDevirtResolution::SingleImpl;
1170 if (GlobalValue::isLocalLinkage(S->linkage())) {
1171 if (IsExported)
1172 // If target is a local function and we are exporting it by
1173 // devirtualizing a call in another module, we need to record the
1174 // promoted name.
1175 Res->SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal(
1176 TheFn.name(), ExportSummary.getModuleHash(S->modulePath()));
1177 else {
1178 LocalWPDTargetsMap[TheFn].push_back(SlotSummary);
1179 Res->SingleImplName = std::string(TheFn.name());
1180 }
1181 } else
1182 Res->SingleImplName = std::string(TheFn.name());
1183
1184 // Name will be empty if this thin link driven off of serialized combined
1185 // index (e.g. llvm-lto). However, WPD is not supported/invoked for the
1186 // legacy LTO API anyway.
1187 assert(!Res->SingleImplName.empty())((!Res->SingleImplName.empty()) ? static_cast<void> (
0) : __assert_fail ("!Res->SingleImplName.empty()", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 1187, __PRETTY_FUNCTION__))
;
1188
1189 return true;
1190}
1191
1192void DevirtModule::tryICallBranchFunnel(
1193 MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo,
1194 WholeProgramDevirtResolution *Res, VTableSlot Slot) {
1195 Triple T(M.getTargetTriple());
1196 if (T.getArch() != Triple::x86_64)
45
Assuming the condition is false
46
Taking false branch
1197 return;
1198
1199 if (TargetsForSlot.size() > ClThreshold)
47
Assuming the condition is false
48
Taking false branch
1200 return;
1201
1202 bool HasNonDevirt = !SlotInfo.CSInfo.AllCallSitesDevirted;
49
Assuming field 'AllCallSitesDevirted' is true
1203 if (!HasNonDevirt
49.1
'HasNonDevirt' is false
49.1
'HasNonDevirt' is false
)
50
Taking true branch
1204 for (auto &P : SlotInfo.ConstCSInfo)
1205 if (!P.second.AllCallSitesDevirted) {
51
Assuming field 'AllCallSitesDevirted' is false
52
Taking true branch
1206 HasNonDevirt = true;
1207 break;
53
Execution continues on line 1210
1208 }
1209
1210 if (!HasNonDevirt
53.1
'HasNonDevirt' is true
53.1
'HasNonDevirt' is true
)
54
Taking false branch
1211 return;
1212
1213 FunctionType *FT =
1214 FunctionType::get(Type::getVoidTy(M.getContext()), {Int8PtrTy}, true);
1215 Function *JT;
1216 if (isa<MDString>(Slot.TypeID)) {
55
Assuming field 'TypeID' is not a 'MDString'
56
Taking false branch
1217 JT = Function::Create(FT, Function::ExternalLinkage,
1218 M.getDataLayout().getProgramAddressSpace(),
1219 getGlobalName(Slot, {}, "branch_funnel"), &M);
1220 JT->setVisibility(GlobalValue::HiddenVisibility);
1221 } else {
1222 JT = Function::Create(FT, Function::InternalLinkage,
1223 M.getDataLayout().getProgramAddressSpace(),
1224 "branch_funnel", &M);
1225 }
1226 JT->addAttribute(1, Attribute::Nest);
1227
1228 std::vector<Value *> JTArgs;
1229 JTArgs.push_back(JT->arg_begin());
1230 for (auto &T : TargetsForSlot) {
57
Assuming '__begin1' is equal to '__end1'
1231 JTArgs.push_back(getMemberAddr(T.TM));
1232 JTArgs.push_back(T.Fn);
1233 }
1234
1235 BasicBlock *BB = BasicBlock::Create(M.getContext(), "", JT, nullptr);
1236 Function *Intr =
1237 Intrinsic::getDeclaration(&M, llvm::Intrinsic::icall_branch_funnel, {});
1238
1239 auto *CI = CallInst::Create(Intr, JTArgs, "", BB);
1240 CI->setTailCallKind(CallInst::TCK_MustTail);
1241 ReturnInst::Create(M.getContext(), nullptr, BB);
1242
1243 bool IsExported = false;
1244 applyICallBranchFunnel(SlotInfo, JT, IsExported);
58
Calling 'DevirtModule::applyICallBranchFunnel'
69
Returning from 'DevirtModule::applyICallBranchFunnel'
1245 if (IsExported
69.1
'IsExported' is true
69.1
'IsExported' is true
)
70
Taking true branch
1246 Res->TheKind = WholeProgramDevirtResolution::BranchFunnel;
71
Access to field 'TheKind' results in a dereference of a null pointer (loaded from variable 'Res')
1247}
1248
1249void DevirtModule::applyICallBranchFunnel(VTableSlotInfo &SlotInfo,
1250 Constant *JT, bool &IsExported) {
1251 auto Apply = [&](CallSiteInfo &CSInfo) {
1252 if (CSInfo.isExported())
60
Calling 'CallSiteInfo::isExported'
63
Returning from 'CallSiteInfo::isExported'
64
Taking true branch
1253 IsExported = true;
65
The value 1 is assigned to 'IsExported', which participates in a condition later
1254 if (CSInfo.AllCallSitesDevirted)
66
Assuming field 'AllCallSitesDevirted' is true
67
Taking true branch
1255 return;
1256 for (auto &&VCallSite : CSInfo.CallSites) {
1257 CallBase &CB = VCallSite.CB;
1258
1259 // Jump tables are only profitable if the retpoline mitigation is enabled.
1260 Attribute FSAttr = CB.getCaller()->getFnAttribute("target-features");
1261 if (FSAttr.hasAttribute(Attribute::None) ||
1262 !FSAttr.getValueAsString().contains("+retpoline"))
1263 continue;
1264
1265 if (RemarksEnabled)
1266 VCallSite.emitRemark("branch-funnel",
1267 JT->stripPointerCasts()->getName(), OREGetter);
1268
1269 // Pass the address of the vtable in the nest register, which is r10 on
1270 // x86_64.
1271 std::vector<Type *> NewArgs;
1272 NewArgs.push_back(Int8PtrTy);
1273 for (Type *T : CB.getFunctionType()->params())
1274 NewArgs.push_back(T);
1275 FunctionType *NewFT =
1276 FunctionType::get(CB.getFunctionType()->getReturnType(), NewArgs,
1277 CB.getFunctionType()->isVarArg());
1278 PointerType *NewFTPtr = PointerType::getUnqual(NewFT);
1279
1280 IRBuilder<> IRB(&CB);
1281 std::vector<Value *> Args;
1282 Args.push_back(IRB.CreateBitCast(VCallSite.VTable, Int8PtrTy));
1283 Args.insert(Args.end(), CB.arg_begin(), CB.arg_end());
1284
1285 CallBase *NewCS = nullptr;
1286 if (isa<CallInst>(CB))
1287 NewCS = IRB.CreateCall(NewFT, IRB.CreateBitCast(JT, NewFTPtr), Args);
1288 else
1289 NewCS = IRB.CreateInvoke(NewFT, IRB.CreateBitCast(JT, NewFTPtr),
1290 cast<InvokeInst>(CB).getNormalDest(),
1291 cast<InvokeInst>(CB).getUnwindDest(), Args);
1292 NewCS->setCallingConv(CB.getCallingConv());
1293
1294 AttributeList Attrs = CB.getAttributes();
1295 std::vector<AttributeSet> NewArgAttrs;
1296 NewArgAttrs.push_back(AttributeSet::get(
1297 M.getContext(), ArrayRef<Attribute>{Attribute::get(
1298 M.getContext(), Attribute::Nest)}));
1299 for (unsigned I = 0; I + 2 < Attrs.getNumAttrSets(); ++I)
1300 NewArgAttrs.push_back(Attrs.getParamAttributes(I));
1301 NewCS->setAttributes(
1302 AttributeList::get(M.getContext(), Attrs.getFnAttributes(),
1303 Attrs.getRetAttributes(), NewArgAttrs));
1304
1305 CB.replaceAllUsesWith(NewCS);
1306 CB.eraseFromParent();
1307
1308 // This use is no longer unsafe.
1309 if (VCallSite.NumUnsafeUses)
1310 --*VCallSite.NumUnsafeUses;
1311 }
1312 // Don't mark as devirtualized because there may be callers compiled without
1313 // retpoline mitigation, which would mean that they are lowered to
1314 // llvm.type.test and therefore require an llvm.type.test resolution for the
1315 // type identifier.
1316 };
1317 Apply(SlotInfo.CSInfo);
59
Calling 'operator()'
68
Returning from 'operator()'
1318 for (auto &P : SlotInfo.ConstCSInfo)
1319 Apply(P.second);
1320}
1321
1322bool DevirtModule::tryEvaluateFunctionsWithArgs(
1323 MutableArrayRef<VirtualCallTarget> TargetsForSlot,
1324 ArrayRef<uint64_t> Args) {
1325 // Evaluate each function and store the result in each target's RetVal
1326 // field.
1327 for (VirtualCallTarget &Target : TargetsForSlot) {
1328 if (Target.Fn->arg_size() != Args.size() + 1)
1329 return false;
1330
1331 Evaluator Eval(M.getDataLayout(), nullptr);
1332 SmallVector<Constant *, 2> EvalArgs;
1333 EvalArgs.push_back(
1334 Constant::getNullValue(Target.Fn->getFunctionType()->getParamType(0)));
1335 for (unsigned I = 0; I != Args.size(); ++I) {
1336 auto *ArgTy = dyn_cast<IntegerType>(
1337 Target.Fn->getFunctionType()->getParamType(I + 1));
1338 if (!ArgTy)
1339 return false;
1340 EvalArgs.push_back(ConstantInt::get(ArgTy, Args[I]));
1341 }
1342
1343 Constant *RetVal;
1344 if (!Eval.EvaluateFunction(Target.Fn, RetVal, EvalArgs) ||
1345 !isa<ConstantInt>(RetVal))
1346 return false;
1347 Target.RetVal = cast<ConstantInt>(RetVal)->getZExtValue();
1348 }
1349 return true;
1350}
1351
1352void DevirtModule::applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName,
1353 uint64_t TheRetVal) {
1354 for (auto Call : CSInfo.CallSites)
1355 Call.replaceAndErase(
1356 "uniform-ret-val", FnName, RemarksEnabled, OREGetter,
1357 ConstantInt::get(cast<IntegerType>(Call.CB.getType()), TheRetVal));
1358 CSInfo.markDevirt();
1359}
1360
1361bool DevirtModule::tryUniformRetValOpt(
1362 MutableArrayRef<VirtualCallTarget> TargetsForSlot, CallSiteInfo &CSInfo,
1363 WholeProgramDevirtResolution::ByArg *Res) {
1364 // Uniform return value optimization. If all functions return the same
1365 // constant, replace all calls with that constant.
1366 uint64_t TheRetVal = TargetsForSlot[0].RetVal;
1367 for (const VirtualCallTarget &Target : TargetsForSlot)
1368 if (Target.RetVal != TheRetVal)
1369 return false;
1370
1371 if (CSInfo.isExported()) {
1372 Res->TheKind = WholeProgramDevirtResolution::ByArg::UniformRetVal;
1373 Res->Info = TheRetVal;
1374 }
1375
1376 applyUniformRetValOpt(CSInfo, TargetsForSlot[0].Fn->getName(), TheRetVal);
1377 if (RemarksEnabled)
1378 for (auto &&Target : TargetsForSlot)
1379 Target.WasDevirt = true;
1380 return true;
1381}
1382
1383std::string DevirtModule::getGlobalName(VTableSlot Slot,
1384 ArrayRef<uint64_t> Args,
1385 StringRef Name) {
1386 std::string FullName = "__typeid_";
1387 raw_string_ostream OS(FullName);
1388 OS << cast<MDString>(Slot.TypeID)->getString() << '_' << Slot.ByteOffset;
1389 for (uint64_t Arg : Args)
1390 OS << '_' << Arg;
1391 OS << '_' << Name;
1392 return OS.str();
1393}
1394
1395bool DevirtModule::shouldExportConstantsAsAbsoluteSymbols() {
1396 Triple T(M.getTargetTriple());
1397 return T.isX86() && T.getObjectFormat() == Triple::ELF;
1398}
1399
1400void DevirtModule::exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args,
1401 StringRef Name, Constant *C) {
1402 GlobalAlias *GA = GlobalAlias::create(Int8Ty, 0, GlobalValue::ExternalLinkage,
1403 getGlobalName(Slot, Args, Name), C, &M);
1404 GA->setVisibility(GlobalValue::HiddenVisibility);
1405}
1406
1407void DevirtModule::exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args,
1408 StringRef Name, uint32_t Const,
1409 uint32_t &Storage) {
1410 if (shouldExportConstantsAsAbsoluteSymbols()) {
1411 exportGlobal(
1412 Slot, Args, Name,
1413 ConstantExpr::getIntToPtr(ConstantInt::get(Int32Ty, Const), Int8PtrTy));
1414 return;
1415 }
1416
1417 Storage = Const;
1418}
1419
1420Constant *DevirtModule::importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args,
1421 StringRef Name) {
1422 Constant *C =
1423 M.getOrInsertGlobal(getGlobalName(Slot, Args, Name), Int8Arr0Ty);
1424 auto *GV = dyn_cast<GlobalVariable>(C);
1425 if (GV)
1426 GV->setVisibility(GlobalValue::HiddenVisibility);
1427 return C;
1428}
1429
1430Constant *DevirtModule::importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args,
1431 StringRef Name, IntegerType *IntTy,
1432 uint32_t Storage) {
1433 if (!shouldExportConstantsAsAbsoluteSymbols())
1434 return ConstantInt::get(IntTy, Storage);
1435
1436 Constant *C = importGlobal(Slot, Args, Name);
1437 auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
1438 C = ConstantExpr::getPtrToInt(C, IntTy);
1439
1440 // We only need to set metadata if the global is newly created, in which
1441 // case it would not have hidden visibility.
1442 if (GV->hasMetadata(LLVMContext::MD_absolute_symbol))
1443 return C;
1444
1445 auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
1446 auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
1447 auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
1448 GV->setMetadata(LLVMContext::MD_absolute_symbol,
1449 MDNode::get(M.getContext(), {MinC, MaxC}));
1450 };
1451 unsigned AbsWidth = IntTy->getBitWidth();
1452 if (AbsWidth == IntPtrTy->getBitWidth())
1453 SetAbsRange(~0ull, ~0ull); // Full set.
1454 else
1455 SetAbsRange(0, 1ull << AbsWidth);
1456 return C;
1457}
1458
1459void DevirtModule::applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName,
1460 bool IsOne,
1461 Constant *UniqueMemberAddr) {
1462 for (auto &&Call : CSInfo.CallSites) {
1463 IRBuilder<> B(&Call.CB);
1464 Value *Cmp =
1465 B.CreateICmp(IsOne ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, Call.VTable,
1466 B.CreateBitCast(UniqueMemberAddr, Call.VTable->getType()));
1467 Cmp = B.CreateZExt(Cmp, Call.CB.getType());
1468 Call.replaceAndErase("unique-ret-val", FnName, RemarksEnabled, OREGetter,
1469 Cmp);
1470 }
1471 CSInfo.markDevirt();
1472}
1473
1474Constant *DevirtModule::getMemberAddr(const TypeMemberInfo *M) {
1475 Constant *C = ConstantExpr::getBitCast(M->Bits->GV, Int8PtrTy);
1476 return ConstantExpr::getGetElementPtr(Int8Ty, C,
1477 ConstantInt::get(Int64Ty, M->Offset));
1478}
1479
1480bool DevirtModule::tryUniqueRetValOpt(
1481 unsigned BitWidth, MutableArrayRef<VirtualCallTarget> TargetsForSlot,
1482 CallSiteInfo &CSInfo, WholeProgramDevirtResolution::ByArg *Res,
1483 VTableSlot Slot, ArrayRef<uint64_t> Args) {
1484 // IsOne controls whether we look for a 0 or a 1.
1485 auto tryUniqueRetValOptFor = [&](bool IsOne) {
1486 const TypeMemberInfo *UniqueMember = nullptr;
1487 for (const VirtualCallTarget &Target : TargetsForSlot) {
1488 if (Target.RetVal == (IsOne ? 1 : 0)) {
1489 if (UniqueMember)
1490 return false;
1491 UniqueMember = Target.TM;
1492 }
1493 }
1494
1495 // We should have found a unique member or bailed out by now. We already
1496 // checked for a uniform return value in tryUniformRetValOpt.
1497 assert(UniqueMember)((UniqueMember) ? static_cast<void> (0) : __assert_fail
("UniqueMember", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 1497, __PRETTY_FUNCTION__))
;
1498
1499 Constant *UniqueMemberAddr = getMemberAddr(UniqueMember);
1500 if (CSInfo.isExported()) {
1501 Res->TheKind = WholeProgramDevirtResolution::ByArg::UniqueRetVal;
1502 Res->Info = IsOne;
1503
1504 exportGlobal(Slot, Args, "unique_member", UniqueMemberAddr);
1505 }
1506
1507 // Replace each call with the comparison.
1508 applyUniqueRetValOpt(CSInfo, TargetsForSlot[0].Fn->getName(), IsOne,
1509 UniqueMemberAddr);
1510
1511 // Update devirtualization statistics for targets.
1512 if (RemarksEnabled)
1513 for (auto &&Target : TargetsForSlot)
1514 Target.WasDevirt = true;
1515
1516 return true;
1517 };
1518
1519 if (BitWidth == 1) {
1520 if (tryUniqueRetValOptFor(true))
1521 return true;
1522 if (tryUniqueRetValOptFor(false))
1523 return true;
1524 }
1525 return false;
1526}
1527
1528void DevirtModule::applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName,
1529 Constant *Byte, Constant *Bit) {
1530 for (auto Call : CSInfo.CallSites) {
1531 auto *RetType = cast<IntegerType>(Call.CB.getType());
1532 IRBuilder<> B(&Call.CB);
1533 Value *Addr =
1534 B.CreateGEP(Int8Ty, B.CreateBitCast(Call.VTable, Int8PtrTy), Byte);
1535 if (RetType->getBitWidth() == 1) {
1536 Value *Bits = B.CreateLoad(Int8Ty, Addr);
1537 Value *BitsAndBit = B.CreateAnd(Bits, Bit);
1538 auto IsBitSet = B.CreateICmpNE(BitsAndBit, ConstantInt::get(Int8Ty, 0));
1539 Call.replaceAndErase("virtual-const-prop-1-bit", FnName, RemarksEnabled,
1540 OREGetter, IsBitSet);
1541 } else {
1542 Value *ValAddr = B.CreateBitCast(Addr, RetType->getPointerTo());
1543 Value *Val = B.CreateLoad(RetType, ValAddr);
1544 Call.replaceAndErase("virtual-const-prop", FnName, RemarksEnabled,
1545 OREGetter, Val);
1546 }
1547 }
1548 CSInfo.markDevirt();
1549}
1550
1551bool DevirtModule::tryVirtualConstProp(
1552 MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo,
1553 WholeProgramDevirtResolution *Res, VTableSlot Slot) {
1554 // This only works if the function returns an integer.
1555 auto RetType = dyn_cast<IntegerType>(TargetsForSlot[0].Fn->getReturnType());
39
Assuming the object is not a 'IntegerType'
1556 if (!RetType
39.1
'RetType' is null
39.1
'RetType' is null
)
40
Taking true branch
1557 return false;
41
Returning without writing to 'SlotInfo.CSInfo.AllCallSitesDevirted', which participates in a condition later
1558 unsigned BitWidth = RetType->getBitWidth();
1559 if (BitWidth > 64)
1560 return false;
1561
1562 // Make sure that each function is defined, does not access memory, takes at
1563 // least one argument, does not use its first argument (which we assume is
1564 // 'this'), and has the same return type.
1565 //
1566 // Note that we test whether this copy of the function is readnone, rather
1567 // than testing function attributes, which must hold for any copy of the
1568 // function, even a less optimized version substituted at link time. This is
1569 // sound because the virtual constant propagation optimizations effectively
1570 // inline all implementations of the virtual function into each call site,
1571 // rather than using function attributes to perform local optimization.
1572 for (VirtualCallTarget &Target : TargetsForSlot) {
1573 if (Target.Fn->isDeclaration() ||
1574 computeFunctionBodyMemoryAccess(*Target.Fn, AARGetter(*Target.Fn)) !=
1575 MAK_ReadNone ||
1576 Target.Fn->arg_empty() || !Target.Fn->arg_begin()->use_empty() ||
1577 Target.Fn->getReturnType() != RetType)
1578 return false;
1579 }
1580
1581 for (auto &&CSByConstantArg : SlotInfo.ConstCSInfo) {
1582 if (!tryEvaluateFunctionsWithArgs(TargetsForSlot, CSByConstantArg.first))
1583 continue;
1584
1585 WholeProgramDevirtResolution::ByArg *ResByArg = nullptr;
1586 if (Res)
1587 ResByArg = &Res->ResByArg[CSByConstantArg.first];
1588
1589 if (tryUniformRetValOpt(TargetsForSlot, CSByConstantArg.second, ResByArg))
1590 continue;
1591
1592 if (tryUniqueRetValOpt(BitWidth, TargetsForSlot, CSByConstantArg.second,
1593 ResByArg, Slot, CSByConstantArg.first))
1594 continue;
1595
1596 // Find an allocation offset in bits in all vtables associated with the
1597 // type.
1598 uint64_t AllocBefore =
1599 findLowestOffset(TargetsForSlot, /*IsAfter=*/false, BitWidth);
1600 uint64_t AllocAfter =
1601 findLowestOffset(TargetsForSlot, /*IsAfter=*/true, BitWidth);
1602
1603 // Calculate the total amount of padding needed to store a value at both
1604 // ends of the object.
1605 uint64_t TotalPaddingBefore = 0, TotalPaddingAfter = 0;
1606 for (auto &&Target : TargetsForSlot) {
1607 TotalPaddingBefore += std::max<int64_t>(
1608 (AllocBefore + 7) / 8 - Target.allocatedBeforeBytes() - 1, 0);
1609 TotalPaddingAfter += std::max<int64_t>(
1610 (AllocAfter + 7) / 8 - Target.allocatedAfterBytes() - 1, 0);
1611 }
1612
1613 // If the amount of padding is too large, give up.
1614 // FIXME: do something smarter here.
1615 if (std::min(TotalPaddingBefore, TotalPaddingAfter) > 128)
1616 continue;
1617
1618 // Calculate the offset to the value as a (possibly negative) byte offset
1619 // and (if applicable) a bit offset, and store the values in the targets.
1620 int64_t OffsetByte;
1621 uint64_t OffsetBit;
1622 if (TotalPaddingBefore <= TotalPaddingAfter)
1623 setBeforeReturnValues(TargetsForSlot, AllocBefore, BitWidth, OffsetByte,
1624 OffsetBit);
1625 else
1626 setAfterReturnValues(TargetsForSlot, AllocAfter, BitWidth, OffsetByte,
1627 OffsetBit);
1628
1629 if (RemarksEnabled)
1630 for (auto &&Target : TargetsForSlot)
1631 Target.WasDevirt = true;
1632
1633
1634 if (CSByConstantArg.second.isExported()) {
1635 ResByArg->TheKind = WholeProgramDevirtResolution::ByArg::VirtualConstProp;
1636 exportConstant(Slot, CSByConstantArg.first, "byte", OffsetByte,
1637 ResByArg->Byte);
1638 exportConstant(Slot, CSByConstantArg.first, "bit", 1ULL << OffsetBit,
1639 ResByArg->Bit);
1640 }
1641
1642 // Rewrite each call to a load from OffsetByte/OffsetBit.
1643 Constant *ByteConst = ConstantInt::get(Int32Ty, OffsetByte);
1644 Constant *BitConst = ConstantInt::get(Int8Ty, 1ULL << OffsetBit);
1645 applyVirtualConstProp(CSByConstantArg.second,
1646 TargetsForSlot[0].Fn->getName(), ByteConst, BitConst);
1647 }
1648 return true;
1649}
1650
1651void DevirtModule::rebuildGlobal(VTableBits &B) {
1652 if (B.Before.Bytes.empty() && B.After.Bytes.empty())
1653 return;
1654
1655 // Align the before byte array to the global's minimum alignment so that we
1656 // don't break any alignment requirements on the global.
1657 Align Alignment = M.getDataLayout().getValueOrABITypeAlignment(
1658 B.GV->getAlign(), B.GV->getValueType());
1659 B.Before.Bytes.resize(alignTo(B.Before.Bytes.size(), Alignment));
1660
1661 // Before was stored in reverse order; flip it now.
1662 for (size_t I = 0, Size = B.Before.Bytes.size(); I != Size / 2; ++I)
1663 std::swap(B.Before.Bytes[I], B.Before.Bytes[Size - 1 - I]);
1664
1665 // Build an anonymous global containing the before bytes, followed by the
1666 // original initializer, followed by the after bytes.
1667 auto NewInit = ConstantStruct::getAnon(
1668 {ConstantDataArray::get(M.getContext(), B.Before.Bytes),
1669 B.GV->getInitializer(),
1670 ConstantDataArray::get(M.getContext(), B.After.Bytes)});
1671 auto NewGV =
1672 new GlobalVariable(M, NewInit->getType(), B.GV->isConstant(),
1673 GlobalVariable::PrivateLinkage, NewInit, "", B.GV);
1674 NewGV->setSection(B.GV->getSection());
1675 NewGV->setComdat(B.GV->getComdat());
1676 NewGV->setAlignment(MaybeAlign(B.GV->getAlignment()));
1677
1678 // Copy the original vtable's metadata to the anonymous global, adjusting
1679 // offsets as required.
1680 NewGV->copyMetadata(B.GV, B.Before.Bytes.size());
1681
1682 // Build an alias named after the original global, pointing at the second
1683 // element (the original initializer).
1684 auto Alias = GlobalAlias::create(
1685 B.GV->getInitializer()->getType(), 0, B.GV->getLinkage(), "",
1686 ConstantExpr::getGetElementPtr(
1687 NewInit->getType(), NewGV,
1688 ArrayRef<Constant *>{ConstantInt::get(Int32Ty, 0),
1689 ConstantInt::get(Int32Ty, 1)}),
1690 &M);
1691 Alias->setVisibility(B.GV->getVisibility());
1692 Alias->takeName(B.GV);
1693
1694 B.GV->replaceAllUsesWith(Alias);
1695 B.GV->eraseFromParent();
1696}
1697
1698bool DevirtModule::areRemarksEnabled() {
1699 const auto &FL = M.getFunctionList();
1700 for (const Function &Fn : FL) {
1701 const auto &BBL = Fn.getBasicBlockList();
1702 if (BBL.empty())
1703 continue;
1704 auto DI = OptimizationRemark(DEBUG_TYPE"wholeprogramdevirt", "", DebugLoc(), &BBL.front());
1705 return DI.isEnabled();
1706 }
1707 return false;
1708}
1709
1710void DevirtModule::scanTypeTestUsers(
1711 Function *TypeTestFunc,
1712 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) {
1713 // Find all virtual calls via a virtual table pointer %p under an assumption
1714 // of the form llvm.assume(llvm.type.test(%p, %md)). This indicates that %p
1715 // points to a member of the type identifier %md. Group calls by (type ID,
1716 // offset) pair (effectively the identity of the virtual function) and store
1717 // to CallSlots.
1718 for (auto I = TypeTestFunc->use_begin(), E = TypeTestFunc->use_end();
1719 I != E;) {
1720 auto CI = dyn_cast<CallInst>(I->getUser());
1721 ++I;
1722 if (!CI)
1723 continue;
1724
1725 // Search for virtual calls based on %p and add them to DevirtCalls.
1726 SmallVector<DevirtCallSite, 1> DevirtCalls;
1727 SmallVector<CallInst *, 1> Assumes;
1728 auto &DT = LookupDomTree(*CI->getFunction());
1729 findDevirtualizableCallsForTypeTest(DevirtCalls, Assumes, CI, DT);
1730
1731 Metadata *TypeId =
1732 cast<MetadataAsValue>(CI->getArgOperand(1))->getMetadata();
1733 // If we found any, add them to CallSlots.
1734 if (!Assumes.empty()) {
1735 Value *Ptr = CI->getArgOperand(0)->stripPointerCasts();
1736 for (DevirtCallSite Call : DevirtCalls)
1737 CallSlots[{TypeId, Call.Offset}].addCallSite(Ptr, Call.CB, nullptr);
1738 }
1739
1740 auto RemoveTypeTestAssumes = [&]() {
1741 // We no longer need the assumes or the type test.
1742 for (auto Assume : Assumes)
1743 Assume->eraseFromParent();
1744 // We can't use RecursivelyDeleteTriviallyDeadInstructions here because we
1745 // may use the vtable argument later.
1746 if (CI->use_empty())
1747 CI->eraseFromParent();
1748 };
1749
1750 // At this point we could remove all type test assume sequences, as they
1751 // were originally inserted for WPD. However, we can keep these in the
1752 // code stream for later analysis (e.g. to help drive more efficient ICP
1753 // sequences). They will eventually be removed by a second LowerTypeTests
1754 // invocation that cleans them up. In order to do this correctly, the first
1755 // LowerTypeTests invocation needs to know that they have "Unknown" type
1756 // test resolution, so that they aren't treated as Unsat and lowered to
1757 // False, which will break any uses on assumes. Below we remove any type
1758 // test assumes that will not be treated as Unknown by LTT.
1759
1760 // The type test assumes will be treated by LTT as Unsat if the type id is
1761 // not used on a global (in which case it has no entry in the TypeIdMap).
1762 if (!TypeIdMap.count(TypeId))
1763 RemoveTypeTestAssumes();
1764
1765 // For ThinLTO importing, we need to remove the type test assumes if this is
1766 // an MDString type id without a corresponding TypeIdSummary. Any
1767 // non-MDString type ids are ignored and treated as Unknown by LTT, so their
1768 // type test assumes can be kept. If the MDString type id is missing a
1769 // TypeIdSummary (e.g. because there was no use on a vcall, preventing the
1770 // exporting phase of WPD from analyzing it), then it would be treated as
1771 // Unsat by LTT and we need to remove its type test assumes here. If not
1772 // used on a vcall we don't need them for later optimization use in any
1773 // case.
1774 else if (ImportSummary && isa<MDString>(TypeId)) {
1775 const TypeIdSummary *TidSummary =
1776 ImportSummary->getTypeIdSummary(cast<MDString>(TypeId)->getString());
1777 if (!TidSummary)
1778 RemoveTypeTestAssumes();
1779 else
1780 // If one was created it should not be Unsat, because if we reached here
1781 // the type id was used on a global.
1782 assert(TidSummary->TTRes.TheKind != TypeTestResolution::Unsat)((TidSummary->TTRes.TheKind != TypeTestResolution::Unsat) ?
static_cast<void> (0) : __assert_fail ("TidSummary->TTRes.TheKind != TypeTestResolution::Unsat"
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 1782, __PRETTY_FUNCTION__))
;
1783 }
1784 }
1785}
1786
1787void DevirtModule::scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc) {
1788 Function *TypeTestFunc = Intrinsic::getDeclaration(&M, Intrinsic::type_test);
1789
1790 for (auto I = TypeCheckedLoadFunc->use_begin(),
1791 E = TypeCheckedLoadFunc->use_end();
1792 I != E;) {
1793 auto CI = dyn_cast<CallInst>(I->getUser());
1794 ++I;
1795 if (!CI)
1796 continue;
1797
1798 Value *Ptr = CI->getArgOperand(0);
1799 Value *Offset = CI->getArgOperand(1);
1800 Value *TypeIdValue = CI->getArgOperand(2);
1801 Metadata *TypeId = cast<MetadataAsValue>(TypeIdValue)->getMetadata();
1802
1803 SmallVector<DevirtCallSite, 1> DevirtCalls;
1804 SmallVector<Instruction *, 1> LoadedPtrs;
1805 SmallVector<Instruction *, 1> Preds;
1806 bool HasNonCallUses = false;
1807 auto &DT = LookupDomTree(*CI->getFunction());
1808 findDevirtualizableCallsForTypeCheckedLoad(DevirtCalls, LoadedPtrs, Preds,
1809 HasNonCallUses, CI, DT);
1810
1811 // Start by generating "pessimistic" code that explicitly loads the function
1812 // pointer from the vtable and performs the type check. If possible, we will
1813 // eliminate the load and the type check later.
1814
1815 // If possible, only generate the load at the point where it is used.
1816 // This helps avoid unnecessary spills.
1817 IRBuilder<> LoadB(
1818 (LoadedPtrs.size() == 1 && !HasNonCallUses) ? LoadedPtrs[0] : CI);
1819 Value *GEP = LoadB.CreateGEP(Int8Ty, Ptr, Offset);
1820 Value *GEPPtr = LoadB.CreateBitCast(GEP, PointerType::getUnqual(Int8PtrTy));
1821 Value *LoadedValue = LoadB.CreateLoad(Int8PtrTy, GEPPtr);
1822
1823 for (Instruction *LoadedPtr : LoadedPtrs) {
1824 LoadedPtr->replaceAllUsesWith(LoadedValue);
1825 LoadedPtr->eraseFromParent();
1826 }
1827
1828 // Likewise for the type test.
1829 IRBuilder<> CallB((Preds.size() == 1 && !HasNonCallUses) ? Preds[0] : CI);
1830 CallInst *TypeTestCall = CallB.CreateCall(TypeTestFunc, {Ptr, TypeIdValue});
1831
1832 for (Instruction *Pred : Preds) {
1833 Pred->replaceAllUsesWith(TypeTestCall);
1834 Pred->eraseFromParent();
1835 }
1836
1837 // We have already erased any extractvalue instructions that refer to the
1838 // intrinsic call, but the intrinsic may have other non-extractvalue uses
1839 // (although this is unlikely). In that case, explicitly build a pair and
1840 // RAUW it.
1841 if (!CI->use_empty()) {
1842 Value *Pair = UndefValue::get(CI->getType());
1843 IRBuilder<> B(CI);
1844 Pair = B.CreateInsertValue(Pair, LoadedValue, {0});
1845 Pair = B.CreateInsertValue(Pair, TypeTestCall, {1});
1846 CI->replaceAllUsesWith(Pair);
1847 }
1848
1849 // The number of unsafe uses is initially the number of uses.
1850 auto &NumUnsafeUses = NumUnsafeUsesForTypeTest[TypeTestCall];
1851 NumUnsafeUses = DevirtCalls.size();
1852
1853 // If the function pointer has a non-call user, we cannot eliminate the type
1854 // check, as one of those users may eventually call the pointer. Increment
1855 // the unsafe use count to make sure it cannot reach zero.
1856 if (HasNonCallUses)
1857 ++NumUnsafeUses;
1858 for (DevirtCallSite Call : DevirtCalls) {
1859 CallSlots[{TypeId, Call.Offset}].addCallSite(Ptr, Call.CB,
1860 &NumUnsafeUses);
1861 }
1862
1863 CI->eraseFromParent();
1864 }
1865}
1866
1867void DevirtModule::importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo) {
1868 auto *TypeId = dyn_cast<MDString>(Slot.TypeID);
1869 if (!TypeId)
1870 return;
1871 const TypeIdSummary *TidSummary =
1872 ImportSummary->getTypeIdSummary(TypeId->getString());
1873 if (!TidSummary)
1874 return;
1875 auto ResI = TidSummary->WPDRes.find(Slot.ByteOffset);
1876 if (ResI == TidSummary->WPDRes.end())
1877 return;
1878 const WholeProgramDevirtResolution &Res = ResI->second;
1879
1880 if (Res.TheKind == WholeProgramDevirtResolution::SingleImpl) {
1881 assert(!Res.SingleImplName.empty())((!Res.SingleImplName.empty()) ? static_cast<void> (0) :
__assert_fail ("!Res.SingleImplName.empty()", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 1881, __PRETTY_FUNCTION__))
;
1882 // The type of the function in the declaration is irrelevant because every
1883 // call site will cast it to the correct type.
1884 Constant *SingleImpl =
1885 cast<Constant>(M.getOrInsertFunction(Res.SingleImplName,
1886 Type::getVoidTy(M.getContext()))
1887 .getCallee());
1888
1889 // This is the import phase so we should not be exporting anything.
1890 bool IsExported = false;
1891 applySingleImplDevirt(SlotInfo, SingleImpl, IsExported);
1892 assert(!IsExported)((!IsExported) ? static_cast<void> (0) : __assert_fail (
"!IsExported", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 1892, __PRETTY_FUNCTION__))
;
1893 }
1894
1895 for (auto &CSByConstantArg : SlotInfo.ConstCSInfo) {
1896 auto I = Res.ResByArg.find(CSByConstantArg.first);
1897 if (I == Res.ResByArg.end())
1898 continue;
1899 auto &ResByArg = I->second;
1900 // FIXME: We should figure out what to do about the "function name" argument
1901 // to the apply* functions, as the function names are unavailable during the
1902 // importing phase. For now we just pass the empty string. This does not
1903 // impact correctness because the function names are just used for remarks.
1904 switch (ResByArg.TheKind) {
1905 case WholeProgramDevirtResolution::ByArg::UniformRetVal:
1906 applyUniformRetValOpt(CSByConstantArg.second, "", ResByArg.Info);
1907 break;
1908 case WholeProgramDevirtResolution::ByArg::UniqueRetVal: {
1909 Constant *UniqueMemberAddr =
1910 importGlobal(Slot, CSByConstantArg.first, "unique_member");
1911 applyUniqueRetValOpt(CSByConstantArg.second, "", ResByArg.Info,
1912 UniqueMemberAddr);
1913 break;
1914 }
1915 case WholeProgramDevirtResolution::ByArg::VirtualConstProp: {
1916 Constant *Byte = importConstant(Slot, CSByConstantArg.first, "byte",
1917 Int32Ty, ResByArg.Byte);
1918 Constant *Bit = importConstant(Slot, CSByConstantArg.first, "bit", Int8Ty,
1919 ResByArg.Bit);
1920 applyVirtualConstProp(CSByConstantArg.second, "", Byte, Bit);
1921 break;
1922 }
1923 default:
1924 break;
1925 }
1926 }
1927
1928 if (Res.TheKind == WholeProgramDevirtResolution::BranchFunnel) {
1929 // The type of the function is irrelevant, because it's bitcast at calls
1930 // anyhow.
1931 Constant *JT = cast<Constant>(
1932 M.getOrInsertFunction(getGlobalName(Slot, {}, "branch_funnel"),
1933 Type::getVoidTy(M.getContext()))
1934 .getCallee());
1935 bool IsExported = false;
1936 applyICallBranchFunnel(SlotInfo, JT, IsExported);
1937 assert(!IsExported)((!IsExported) ? static_cast<void> (0) : __assert_fail (
"!IsExported", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 1937, __PRETTY_FUNCTION__))
;
1938 }
1939}
1940
1941void DevirtModule::removeRedundantTypeTests() {
1942 auto True = ConstantInt::getTrue(M.getContext());
1943 for (auto &&U : NumUnsafeUsesForTypeTest) {
1944 if (U.second == 0) {
1945 U.first->replaceAllUsesWith(True);
1946 U.first->eraseFromParent();
1947 }
1948 }
1949}
1950
1951bool DevirtModule::run() {
1952 // If only some of the modules were split, we cannot correctly perform
1953 // this transformation. We already checked for the presense of type tests
1954 // with partially split modules during the thin link, and would have emitted
1955 // an error if any were found, so here we can simply return.
1956 if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
1
Assuming field 'ExportSummary' is null
1957 (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
2
Assuming field 'ImportSummary' is null
1958 return false;
1959
1960 Function *TypeTestFunc =
1961 M.getFunction(Intrinsic::getName(Intrinsic::type_test));
1962 Function *TypeCheckedLoadFunc =
1963 M.getFunction(Intrinsic::getName(Intrinsic::type_checked_load));
1964 Function *AssumeFunc = M.getFunction(Intrinsic::getName(Intrinsic::assume));
1965
1966 // Normally if there are no users of the devirtualization intrinsics in the
1967 // module, this pass has nothing to do. But if we are exporting, we also need
1968 // to handle any users that appear only in the function summaries.
1969 if (!ExportSummary
2.1
Field 'ExportSummary' is null
2.1
Field 'ExportSummary' is null
&&
1970 (!TypeTestFunc || TypeTestFunc->use_empty() || !AssumeFunc ||
3
Assuming 'TypeTestFunc' is non-null
4
Calling 'Value::use_empty'
7
Returning from 'Value::use_empty'
8
Assuming 'AssumeFunc' is non-null
1971 AssumeFunc->use_empty()) &&
9
Calling 'Value::use_empty'
12
Returning from 'Value::use_empty'
1972 (!TypeCheckedLoadFunc || TypeCheckedLoadFunc->use_empty()))
1973 return false;
1974
1975 // Rebuild type metadata into a map for easy lookup.
1976 std::vector<VTableBits> Bits;
1977 DenseMap<Metadata *, std::set<TypeMemberInfo>> TypeIdMap;
1978 buildTypeIdentifierMap(Bits, TypeIdMap);
1979
1980 if (TypeTestFunc
12.1
'TypeTestFunc' is non-null
12.1
'TypeTestFunc' is non-null
&& AssumeFunc
12.2
'AssumeFunc' is non-null
12.2
'AssumeFunc' is non-null
)
13
Taking true branch
1981 scanTypeTestUsers(TypeTestFunc, TypeIdMap);
1982
1983 if (TypeCheckedLoadFunc)
14
Assuming 'TypeCheckedLoadFunc' is null
15
Taking false branch
1984 scanTypeCheckedLoadUsers(TypeCheckedLoadFunc);
1985
1986 if (ImportSummary
15.1
Field 'ImportSummary' is null
15.1
Field 'ImportSummary' is null
) {
16
Taking false branch
1987 for (auto &S : CallSlots)
1988 importResolution(S.first, S.second);
1989
1990 removeRedundantTypeTests();
1991
1992 // We have lowered or deleted the type instrinsics, so we will no
1993 // longer have enough information to reason about the liveness of virtual
1994 // function pointers in GlobalDCE.
1995 for (GlobalVariable &GV : M.globals())
1996 GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
1997
1998 // The rest of the code is only necessary when exporting or during regular
1999 // LTO, so we are done.
2000 return true;
2001 }
2002
2003 if (TypeIdMap.empty())
17
Assuming the condition is false
18
Taking false branch
2004 return true;
2005
2006 // Collect information from summary about which calls to try to devirtualize.
2007 if (ExportSummary
18.1
Field 'ExportSummary' is null
18.1
Field 'ExportSummary' is null
) {
19
Taking false branch
2008 DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
2009 for (auto &P : TypeIdMap) {
2010 if (auto *TypeId = dyn_cast<MDString>(P.first))
2011 MetadataByGUID[GlobalValue::getGUID(TypeId->getString())].push_back(
2012 TypeId);
2013 }
2014
2015 for (auto &P : *ExportSummary) {
2016 for (auto &S : P.second.SummaryList) {
2017 auto *FS = dyn_cast<FunctionSummary>(S.get());
2018 if (!FS)
2019 continue;
2020 // FIXME: Only add live functions.
2021 for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) {
2022 for (Metadata *MD : MetadataByGUID[VF.GUID]) {
2023 CallSlots[{MD, VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS);
2024 }
2025 }
2026 for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) {
2027 for (Metadata *MD : MetadataByGUID[VF.GUID]) {
2028 CallSlots[{MD, VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS);
2029 }
2030 }
2031 for (const FunctionSummary::ConstVCall &VC :
2032 FS->type_test_assume_const_vcalls()) {
2033 for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) {
2034 CallSlots[{MD, VC.VFunc.Offset}]
2035 .ConstCSInfo[VC.Args]
2036 .addSummaryTypeTestAssumeUser(FS);
2037 }
2038 }
2039 for (const FunctionSummary::ConstVCall &VC :
2040 FS->type_checked_load_const_vcalls()) {
2041 for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) {
2042 CallSlots[{MD, VC.VFunc.Offset}]
2043 .ConstCSInfo[VC.Args]
2044 .addSummaryTypeCheckedLoadUser(FS);
2045 }
2046 }
2047 }
2048 }
2049 }
2050
2051 // For each (type, offset) pair:
2052 bool DidVirtualConstProp = false;
2053 std::map<std::string, Function*> DevirtTargets;
2054 for (auto &S : CallSlots) {
2055 // Search each of the members of the type identifier for the virtual
2056 // function implementation at offset S.first.ByteOffset, and add to
2057 // TargetsForSlot.
2058 std::vector<VirtualCallTarget> TargetsForSlot;
2059 WholeProgramDevirtResolution *Res = nullptr;
20
'Res' initialized to a null pointer value
2060 const std::set<TypeMemberInfo> &TypeMemberInfos = TypeIdMap[S.first.TypeID];
2061 if (ExportSummary && isa<MDString>(S.first.TypeID) &&
21
Assuming field 'ExportSummary' is null
2062 TypeMemberInfos.size())
2063 // For any type id used on a global's type metadata, create the type id
2064 // summary resolution regardless of whether we can devirtualize, so that
2065 // lower type tests knows the type id is not Unsat. If it was not used on
2066 // a global's type metadata, the TypeIdMap entry set will be empty, and
2067 // we don't want to create an entry (with the default Unknown type
2068 // resolution), which can prevent detection of the Unsat.
2069 Res = &ExportSummary
2070 ->getOrInsertTypeIdSummary(
2071 cast<MDString>(S.first.TypeID)->getString())
2072 .WPDRes[S.first.ByteOffset];
2073 if (tryFindVirtualCallTargets(TargetsForSlot, TypeMemberInfos,
22
Calling 'DevirtModule::tryFindVirtualCallTargets'
25
Returning from 'DevirtModule::tryFindVirtualCallTargets'
26
Taking true branch
2074 S.first.ByteOffset)) {
2075
2076 if (!trySingleImplDevirt(ExportSummary, TargetsForSlot, S.second, Res)) {
27
Calling 'DevirtModule::trySingleImplDevirt'
36
Returning from 'DevirtModule::trySingleImplDevirt'
37
Taking true branch
2077 DidVirtualConstProp |=
2078 tryVirtualConstProp(TargetsForSlot, S.second, Res, S.first);
38
Calling 'DevirtModule::tryVirtualConstProp'
42
Returning from 'DevirtModule::tryVirtualConstProp'
2079
2080 tryICallBranchFunnel(TargetsForSlot, S.second, Res, S.first);
43
Passing null pointer value via 3rd parameter 'Res'
44
Calling 'DevirtModule::tryICallBranchFunnel'
2081 }
2082
2083 // Collect functions devirtualized at least for one call site for stats.
2084 if (RemarksEnabled)
2085 for (const auto &T : TargetsForSlot)
2086 if (T.WasDevirt)
2087 DevirtTargets[std::string(T.Fn->getName())] = T.Fn;
2088 }
2089
2090 // CFI-specific: if we are exporting and any llvm.type.checked.load
2091 // intrinsics were *not* devirtualized, we need to add the resulting
2092 // llvm.type.test intrinsics to the function summaries so that the
2093 // LowerTypeTests pass will export them.
2094 if (ExportSummary && isa<MDString>(S.first.TypeID)) {
2095 auto GUID =
2096 GlobalValue::getGUID(cast<MDString>(S.first.TypeID)->getString());
2097 for (auto FS : S.second.CSInfo.SummaryTypeCheckedLoadUsers)
2098 FS->addTypeTest(GUID);
2099 for (auto &CCS : S.second.ConstCSInfo)
2100 for (auto FS : CCS.second.SummaryTypeCheckedLoadUsers)
2101 FS->addTypeTest(GUID);
2102 }
2103 }
2104
2105 if (RemarksEnabled) {
2106 // Generate remarks for each devirtualized function.
2107 for (const auto &DT : DevirtTargets) {
2108 Function *F = DT.second;
2109
2110 using namespace ore;
2111 OREGetter(F).emit(OptimizationRemark(DEBUG_TYPE"wholeprogramdevirt", "Devirtualized", F)
2112 << "devirtualized "
2113 << NV("FunctionName", DT.first));
2114 }
2115 }
2116
2117 removeRedundantTypeTests();
2118
2119 // Rebuild each global we touched as part of virtual constant propagation to
2120 // include the before and after bytes.
2121 if (DidVirtualConstProp)
2122 for (VTableBits &B : Bits)
2123 rebuildGlobal(B);
2124
2125 // We have lowered or deleted the type instrinsics, so we will no
2126 // longer have enough information to reason about the liveness of virtual
2127 // function pointers in GlobalDCE.
2128 for (GlobalVariable &GV : M.globals())
2129 GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
2130
2131 return true;
2132}
2133
2134void DevirtIndex::run() {
2135 if (ExportSummary.typeIdCompatibleVtableMap().empty())
2136 return;
2137
2138 DenseMap<GlobalValue::GUID, std::vector<StringRef>> NameByGUID;
2139 for (auto &P : ExportSummary.typeIdCompatibleVtableMap()) {
2140 NameByGUID[GlobalValue::getGUID(P.first)].push_back(P.first);
2141 }
2142
2143 // Collect information from summary about which calls to try to devirtualize.
2144 for (auto &P : ExportSummary) {
2145 for (auto &S : P.second.SummaryList) {
2146 auto *FS = dyn_cast<FunctionSummary>(S.get());
2147 if (!FS)
2148 continue;
2149 // FIXME: Only add live functions.
2150 for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) {
2151 for (StringRef Name : NameByGUID[VF.GUID]) {
2152 CallSlots[{Name, VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS);
2153 }
2154 }
2155 for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) {
2156 for (StringRef Name : NameByGUID[VF.GUID]) {
2157 CallSlots[{Name, VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS);
2158 }
2159 }
2160 for (const FunctionSummary::ConstVCall &VC :
2161 FS->type_test_assume_const_vcalls()) {
2162 for (StringRef Name : NameByGUID[VC.VFunc.GUID]) {
2163 CallSlots[{Name, VC.VFunc.Offset}]
2164 .ConstCSInfo[VC.Args]
2165 .addSummaryTypeTestAssumeUser(FS);
2166 }
2167 }
2168 for (const FunctionSummary::ConstVCall &VC :
2169 FS->type_checked_load_const_vcalls()) {
2170 for (StringRef Name : NameByGUID[VC.VFunc.GUID]) {
2171 CallSlots[{Name, VC.VFunc.Offset}]
2172 .ConstCSInfo[VC.Args]
2173 .addSummaryTypeCheckedLoadUser(FS);
2174 }
2175 }
2176 }
2177 }
2178
2179 std::set<ValueInfo> DevirtTargets;
2180 // For each (type, offset) pair:
2181 for (auto &S : CallSlots) {
2182 // Search each of the members of the type identifier for the virtual
2183 // function implementation at offset S.first.ByteOffset, and add to
2184 // TargetsForSlot.
2185 std::vector<ValueInfo> TargetsForSlot;
2186 auto TidSummary = ExportSummary.getTypeIdCompatibleVtableSummary(S.first.TypeID);
2187 assert(TidSummary)((TidSummary) ? static_cast<void> (0) : __assert_fail (
"TidSummary", "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 2187, __PRETTY_FUNCTION__))
;
2188 // Create the type id summary resolution regardlness of whether we can
2189 // devirtualize, so that lower type tests knows the type id is used on
2190 // a global and not Unsat.
2191 WholeProgramDevirtResolution *Res =
2192 &ExportSummary.getOrInsertTypeIdSummary(S.first.TypeID)
2193 .WPDRes[S.first.ByteOffset];
2194 if (tryFindVirtualCallTargets(TargetsForSlot, *TidSummary,
2195 S.first.ByteOffset)) {
2196
2197 if (!trySingleImplDevirt(TargetsForSlot, S.first, S.second, Res,
2198 DevirtTargets))
2199 continue;
2200 }
2201 }
2202
2203 // Optionally have the thin link print message for each devirtualized
2204 // function.
2205 if (PrintSummaryDevirt)
2206 for (const auto &DT : DevirtTargets)
2207 errs() << "Devirtualized call to " << DT << "\n";
2208
2209 return;
2210}

/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/IR/Value.h

1//===- llvm/Value.h - Definition of the Value class -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the Value class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_IR_VALUE_H
14#define LLVM_IR_VALUE_H
15
16#include "llvm-c/Types.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/iterator_range.h"
19#include "llvm/IR/Use.h"
20#include "llvm/Support/Alignment.h"
21#include "llvm/Support/CBindingWrapping.h"
22#include "llvm/Support/Casting.h"
23#include <cassert>
24#include <iterator>
25#include <memory>
26
27namespace llvm {
28
29class APInt;
30class Argument;
31class BasicBlock;
32class Constant;
33class ConstantData;
34class ConstantAggregate;
35class DataLayout;
36class Function;
37class GlobalAlias;
38class GlobalIFunc;
39class GlobalIndirectSymbol;
40class GlobalObject;
41class GlobalValue;
42class GlobalVariable;
43class InlineAsm;
44class Instruction;
45class LLVMContext;
46class Module;
47class ModuleSlotTracker;
48class raw_ostream;
49template<typename ValueTy> class StringMapEntry;
50class StringRef;
51class Twine;
52class Type;
53class User;
54
55using ValueName = StringMapEntry<Value *>;
56
57//===----------------------------------------------------------------------===//
58// Value Class
59//===----------------------------------------------------------------------===//
60
61/// LLVM Value Representation
62///
63/// This is a very important LLVM class. It is the base class of all values
64/// computed by a program that may be used as operands to other values. Value is
65/// the super class of other important classes such as Instruction and Function.
66/// All Values have a Type. Type is not a subclass of Value. Some values can
67/// have a name and they belong to some Module. Setting the name on the Value
68/// automatically updates the module's symbol table.
69///
70/// Every value has a "use list" that keeps track of which other Values are
71/// using this Value. A Value can also have an arbitrary number of ValueHandle
72/// objects that watch it and listen to RAUW and Destroy events. See
73/// llvm/IR/ValueHandle.h for details.
74class Value {
75 Type *VTy;
76 Use *UseList;
77
78 friend class ValueAsMetadata; // Allow access to IsUsedByMD.
79 friend class ValueHandleBase;
80
81 const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast)
82 unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this?
83
84protected:
85 /// Hold subclass data that can be dropped.
86 ///
87 /// This member is similar to SubclassData, however it is for holding
88 /// information which may be used to aid optimization, but which may be
89 /// cleared to zero without affecting conservative interpretation.
90 unsigned char SubclassOptionalData : 7;
91
92private:
93 /// Hold arbitrary subclass data.
94 ///
95 /// This member is defined by this class, but is not used for anything.
96 /// Subclasses can use it to hold whatever state they find useful. This
97 /// field is initialized to zero by the ctor.
98 unsigned short SubclassData;
99
100protected:
101 /// The number of operands in the subclass.
102 ///
103 /// This member is defined by this class, but not used for anything.
104 /// Subclasses can use it to store their number of operands, if they have
105 /// any.
106 ///
107 /// This is stored here to save space in User on 64-bit hosts. Since most
108 /// instances of Value have operands, 32-bit hosts aren't significantly
109 /// affected.
110 ///
111 /// Note, this should *NOT* be used directly by any class other than User.
112 /// User uses this value to find the Use list.
113 enum : unsigned { NumUserOperandsBits = 28 };
114 unsigned NumUserOperands : NumUserOperandsBits;
115
116 // Use the same type as the bitfield above so that MSVC will pack them.
117 unsigned IsUsedByMD : 1;
118 unsigned HasName : 1;
119 unsigned HasHungOffUses : 1;
120 unsigned HasDescriptor : 1;
121
122private:
123 template <typename UseT> // UseT == 'Use' or 'const Use'
124 class use_iterator_impl
125 : public std::iterator<std::forward_iterator_tag, UseT *> {
126 friend class Value;
127
128 UseT *U;
129
130 explicit use_iterator_impl(UseT *u) : U(u) {}
131
132 public:
133 use_iterator_impl() : U() {}
134
135 bool operator==(const use_iterator_impl &x) const { return U == x.U; }
136 bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }
137
138 use_iterator_impl &operator++() { // Preincrement
139 assert(U && "Cannot increment end iterator!")((U && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("U && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/IR/Value.h"
, 139, __PRETTY_FUNCTION__))
;
140 U = U->getNext();
141 return *this;
142 }
143
144 use_iterator_impl operator++(int) { // Postincrement
145 auto tmp = *this;
146 ++*this;
147 return tmp;
148 }
149
150 UseT &operator*() const {
151 assert(U && "Cannot dereference end iterator!")((U && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("U && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/IR/Value.h"
, 151, __PRETTY_FUNCTION__))
;
152 return *U;
153 }
154
155 UseT *operator->() const { return &operator*(); }
156
157 operator use_iterator_impl<const UseT>() const {
158 return use_iterator_impl<const UseT>(U);
159 }
160 };
161
162 template <typename UserTy> // UserTy == 'User' or 'const User'
163 class user_iterator_impl
164 : public std::iterator<std::forward_iterator_tag, UserTy *> {
165 use_iterator_impl<Use> UI;
166 explicit user_iterator_impl(Use *U) : UI(U) {}
167 friend class Value;
168
169 public:
170 user_iterator_impl() = default;
171
172 bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
173 bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
174
175 /// Returns true if this iterator is equal to user_end() on the value.
176 bool atEnd() const { return *this == user_iterator_impl(); }
177
178 user_iterator_impl &operator++() { // Preincrement
179 ++UI;
180 return *this;
181 }
182
183 user_iterator_impl operator++(int) { // Postincrement
184 auto tmp = *this;
185 ++*this;
186 return tmp;
187 }
188
189 // Retrieve a pointer to the current User.
190 UserTy *operator*() const {
191 return UI->getUser();
192 }
193
194 UserTy *operator->() const { return operator*(); }
195
196 operator user_iterator_impl<const UserTy>() const {
197 return user_iterator_impl<const UserTy>(*UI);
198 }
199
200 Use &getUse() const { return *UI; }
201 };
202
203protected:
204 Value(Type *Ty, unsigned scid);
205
206 /// Value's destructor should be virtual by design, but that would require
207 /// that Value and all of its subclasses have a vtable that effectively
208 /// duplicates the information in the value ID. As a size optimization, the
209 /// destructor has been protected, and the caller should manually call
210 /// deleteValue.
211 ~Value(); // Use deleteValue() to delete a generic Value.
212
213public:
214 Value(const Value &) = delete;
215 Value &operator=(const Value &) = delete;
216
217 /// Delete a pointer to a generic Value.
218 void deleteValue();
219
220 /// Support for debugging, callable in GDB: V->dump()
221 void dump() const;
222
223 /// Implement operator<< on Value.
224 /// @{
225 void print(raw_ostream &O, bool IsForDebug = false) const;
226 void print(raw_ostream &O, ModuleSlotTracker &MST,
227 bool IsForDebug = false) const;
228 /// @}
229
230 /// Print the name of this Value out to the specified raw_ostream.
231 ///
232 /// This is useful when you just want to print 'int %reg126', not the
233 /// instruction that generated it. If you specify a Module for context, then
234 /// even constanst get pretty-printed; for example, the type of a null
235 /// pointer is printed symbolically.
236 /// @{
237 void printAsOperand(raw_ostream &O, bool PrintType = true,
238 const Module *M = nullptr) const;
239 void printAsOperand(raw_ostream &O, bool PrintType,
240 ModuleSlotTracker &MST) const;
241 /// @}
242
243 /// All values are typed, get the type of this value.
244 Type *getType() const { return VTy; }
245
246 /// All values hold a context through their type.
247 LLVMContext &getContext() const;
248
249 // All values can potentially be named.
250 bool hasName() const { return HasName; }
251 ValueName *getValueName() const;
252 void setValueName(ValueName *VN);
253
254private:
255 void destroyValueName();
256 enum class ReplaceMetadataUses { No, Yes };
257 void doRAUW(Value *New, ReplaceMetadataUses);
258 void setNameImpl(const Twine &Name);
259
260public:
261 /// Return a constant reference to the value's name.
262 ///
263 /// This guaranteed to return the same reference as long as the value is not
264 /// modified. If the value has a name, this does a hashtable lookup, so it's
265 /// not free.
266 StringRef getName() const;
267
268 /// Change the name of the value.
269 ///
270 /// Choose a new unique name if the provided name is taken.
271 ///
272 /// \param Name The new name; or "" if the value's name should be removed.
273 void setName(const Twine &Name);
274
275 /// Transfer the name from V to this value.
276 ///
277 /// After taking V's name, sets V's name to empty.
278 ///
279 /// \note It is an error to call V->takeName(V).
280 void takeName(Value *V);
281
282 /// Change all uses of this to point to a new Value.
283 ///
284 /// Go through the uses list for this definition and make each use point to
285 /// "V" instead of "this". After this completes, 'this's use list is
286 /// guaranteed to be empty.
287 void replaceAllUsesWith(Value *V);
288
289 /// Change non-metadata uses of this to point to a new Value.
290 ///
291 /// Go through the uses list for this definition and make each use point to
292 /// "V" instead of "this". This function skips metadata entries in the list.
293 void replaceNonMetadataUsesWith(Value *V);
294
295 /// Go through the uses list for this definition and make each use point
296 /// to "V" if the callback ShouldReplace returns true for the given Use.
297 /// Unlike replaceAllUsesWith() this function does not support basic block
298 /// values or constant users.
299 void replaceUsesWithIf(Value *New,
300 llvm::function_ref<bool(Use &U)> ShouldReplace) {
301 assert(New && "Value::replaceUsesWithIf(<null>) is invalid!")((New && "Value::replaceUsesWithIf(<null>) is invalid!"
) ? static_cast<void> (0) : __assert_fail ("New && \"Value::replaceUsesWithIf(<null>) is invalid!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/IR/Value.h"
, 301, __PRETTY_FUNCTION__))
;
302 assert(New->getType() == getType() &&((New->getType() == getType() && "replaceUses of value with new value of different type!"
) ? static_cast<void> (0) : __assert_fail ("New->getType() == getType() && \"replaceUses of value with new value of different type!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/IR/Value.h"
, 303, __PRETTY_FUNCTION__))
303 "replaceUses of value with new value of different type!")((New->getType() == getType() && "replaceUses of value with new value of different type!"
) ? static_cast<void> (0) : __assert_fail ("New->getType() == getType() && \"replaceUses of value with new value of different type!\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/IR/Value.h"
, 303, __PRETTY_FUNCTION__))
;
304
305 for (use_iterator UI = use_begin(), E = use_end(); UI != E;) {
306 Use &U = *UI;
307 ++UI;
308 if (!ShouldReplace(U))
309 continue;
310 U.set(New);
311 }
312 }
313
314 /// replaceUsesOutsideBlock - Go through the uses list for this definition and
315 /// make each use point to "V" instead of "this" when the use is outside the
316 /// block. 'This's use list is expected to have at least one element.
317 /// Unlike replaceAllUsesWith() this function does not support basic block
318 /// values or constant users.
319 void replaceUsesOutsideBlock(Value *V, BasicBlock *BB);
320
321 //----------------------------------------------------------------------
322 // Methods for handling the chain of uses of this Value.
323 //
324 // Materializing a function can introduce new uses, so these methods come in
325 // two variants:
326 // The methods that start with materialized_ check the uses that are
327 // currently known given which functions are materialized. Be very careful
328 // when using them since you might not get all uses.
329 // The methods that don't start with materialized_ assert that modules is
330 // fully materialized.
331 void assertModuleIsMaterializedImpl() const;
332 // This indirection exists so we can keep assertModuleIsMaterializedImpl()
333 // around in release builds of Value.cpp to be linked with other code built
334 // in debug mode. But this avoids calling it in any of the release built code.
335 void assertModuleIsMaterialized() const {
336#ifndef NDEBUG
337 assertModuleIsMaterializedImpl();
338#endif
339 }
340
341 bool use_empty() const {
342 assertModuleIsMaterialized();
343 return UseList == nullptr;
5
Assuming the condition is false
6
Returning zero, which participates in a condition later
10
Assuming the condition is false
11
Returning zero, which participates in a condition later
344 }
345
346 bool materialized_use_empty() const {
347 return UseList == nullptr;
348 }
349
350 using use_iterator = use_iterator_impl<Use>;
351 using const_use_iterator = use_iterator_impl<const Use>;
352
353 use_iterator materialized_use_begin() { return use_iterator(UseList); }
354 const_use_iterator materialized_use_begin() const {
355 return const_use_iterator(UseList);
356 }
357 use_iterator use_begin() {
358 assertModuleIsMaterialized();
359 return materialized_use_begin();
360 }
361 const_use_iterator use_begin() const {
362 assertModuleIsMaterialized();
363 return materialized_use_begin();
364 }
365 use_iterator use_end() { return use_iterator(); }
366 const_use_iterator use_end() const { return const_use_iterator(); }
367 iterator_range<use_iterator> materialized_uses() {
368 return make_range(materialized_use_begin(), use_end());
369 }
370 iterator_range<const_use_iterator> materialized_uses() const {
371 return make_range(materialized_use_begin(), use_end());
372 }
373 iterator_range<use_iterator> uses() {
374 assertModuleIsMaterialized();
375 return materialized_uses();
376 }
377 iterator_range<const_use_iterator> uses() const {
378 assertModuleIsMaterialized();
379 return materialized_uses();
380 }
381
382 bool user_empty() const {
383 assertModuleIsMaterialized();
384 return UseList == nullptr;
385 }
386
387 using user_iterator = user_iterator_impl<User>;
388 using const_user_iterator = user_iterator_impl<const User>;
389
390 user_iterator materialized_user_begin() { return user_iterator(UseList); }
391 const_user_iterator materialized_user_begin() const {
392 return const_user_iterator(UseList);
393 }
394 user_iterator user_begin() {
395 assertModuleIsMaterialized();
396 return materialized_user_begin();
397 }
398 const_user_iterator user_begin() const {
399 assertModuleIsMaterialized();
400 return materialized_user_begin();
401 }
402 user_iterator user_end() { return user_iterator(); }
403 const_user_iterator user_end() const { return const_user_iterator(); }
404 User *user_back() {
405 assertModuleIsMaterialized();
406 return *materialized_user_begin();
407 }
408 const User *user_back() const {
409 assertModuleIsMaterialized();
410 return *materialized_user_begin();
411 }
412 iterator_range<user_iterator> materialized_users() {
413 return make_range(materialized_user_begin(), user_end());
414 }
415 iterator_range<const_user_iterator> materialized_users() const {
416 return make_range(materialized_user_begin(), user_end());
417 }
418 iterator_range<user_iterator> users() {
419 assertModuleIsMaterialized();
420 return materialized_users();
421 }
422 iterator_range<const_user_iterator> users() const {
423 assertModuleIsMaterialized();
424 return materialized_users();
425 }
426
427 /// Return true if there is exactly one user of this value.
428 ///
429 /// This is specialized because it is a common request and does not require
430 /// traversing the whole use list.
431 bool hasOneUse() const {
432 const_use_iterator I = use_begin(), E = use_end();
433 if (I == E) return false;
434 return ++I == E;
435 }
436
437 /// Return true if this Value has exactly N users.
438 bool hasNUses(unsigned N) const;
439
440 /// Return true if this value has N users or more.
441 ///
442 /// This is logically equivalent to getNumUses() >= N.
443 bool hasNUsesOrMore(unsigned N) const;
444
445 /// Return true if there is exactly one user of this value that cannot be
446 /// dropped.
447 ///
448 /// This is specialized because it is a common request and does not require
449 /// traversing the whole use list.
450 Use *getSingleUndroppableUse();
451
452 /// Return true if there this value.
453 ///
454 /// This is specialized because it is a common request and does not require
455 /// traversing the whole use list.
456 bool hasNUndroppableUses(unsigned N) const;
457
458 /// Return true if this value has N users or more.
459 ///
460 /// This is logically equivalent to getNumUses() >= N.
461 bool hasNUndroppableUsesOrMore(unsigned N) const;
462
463 /// Remove every uses that can safely be removed.
464 ///
465 /// This will remove for example uses in llvm.assume.
466 /// This should be used when performing want to perform a tranformation but
467 /// some Droppable uses pervent it.
468 /// This function optionally takes a filter to only remove some droppable
469 /// uses.
470 void dropDroppableUses(llvm::function_ref<bool(const Use *)> ShouldDrop =
471 [](const Use *) { return true; });
472
473 /// Remove every use of this value in \p User that can safely be removed.
474 void dropDroppableUsesIn(User &Usr);
475
476 /// Remove the droppable use \p U.
477 void dropDroppableUse(Use &U);
478
479 /// Check if this value is used in the specified basic block.
480 bool isUsedInBasicBlock(const BasicBlock *BB) const;
481
482 /// This method computes the number of uses of this Value.
483 ///
484 /// This is a linear time operation. Use hasOneUse, hasNUses, or
485 /// hasNUsesOrMore to check for specific values.
486 unsigned getNumUses() const;
487
488 /// This method should only be used by the Use class.
489 void addUse(Use &U) { U.addToList(&UseList); }
490
491 /// Concrete subclass of this.
492 ///
493 /// An enumeration for keeping track of the concrete subclass of Value that
494 /// is actually instantiated. Values of this enumeration are kept in the
495 /// Value classes SubclassID field. They are used for concrete type
496 /// identification.
497 enum ValueTy {
498#define HANDLE_VALUE(Name) Name##Val,
499#include "llvm/IR/Value.def"
500
501 // Markers:
502#define HANDLE_CONSTANT_MARKER(Marker, Constant) Marker = Constant##Val,
503#include "llvm/IR/Value.def"
504 };
505
506 /// Return an ID for the concrete type of this object.
507 ///
508 /// This is used to implement the classof checks. This should not be used
509 /// for any other purpose, as the values may change as LLVM evolves. Also,
510 /// note that for instructions, the Instruction's opcode is added to
511 /// InstructionVal. So this means three things:
512 /// # there is no value with code InstructionVal (no opcode==0).
513 /// # there are more possible values for the value type than in ValueTy enum.
514 /// # the InstructionVal enumerator must be the highest valued enumerator in
515 /// the ValueTy enum.
516 unsigned getValueID() const {
517 return SubclassID;
518 }
519
520 /// Return the raw optional flags value contained in this value.
521 ///
522 /// This should only be used when testing two Values for equivalence.
523 unsigned getRawSubclassOptionalData() const {
524 return SubclassOptionalData;
525 }
526
527 /// Clear the optional flags contained in this value.
528 void clearSubclassOptionalData() {
529 SubclassOptionalData = 0;
530 }
531
532 /// Check the optional flags for equality.
533 bool hasSameSubclassOptionalData(const Value *V) const {
534 return SubclassOptionalData == V->SubclassOptionalData;
535 }
536
537 /// Return true if there is a value handle associated with this value.
538 bool hasValueHandle() const { return HasValueHandle; }
539
540 /// Return true if there is metadata referencing this value.
541 bool isUsedByMetadata() const { return IsUsedByMD; }
542
543 /// Return true if this value is a swifterror value.
544 ///
545 /// swifterror values can be either a function argument or an alloca with a
546 /// swifterror attribute.
547 bool isSwiftError() const;
548
549 /// Strip off pointer casts, all-zero GEPs and address space casts.
550 ///
551 /// Returns the original uncasted value. If this is called on a non-pointer
552 /// value, it returns 'this'.
553 const Value *stripPointerCasts() const;
554 Value *stripPointerCasts() {
555 return const_cast<Value *>(
556 static_cast<const Value *>(this)->stripPointerCasts());
557 }
558
559 /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
560 ///
561 /// Returns the original uncasted value. If this is called on a non-pointer
562 /// value, it returns 'this'.
563 const Value *stripPointerCastsAndAliases() const;
564 Value *stripPointerCastsAndAliases() {
565 return const_cast<Value *>(
566 static_cast<const Value *>(this)->stripPointerCastsAndAliases());
567 }
568
569 /// Strip off pointer casts, all-zero GEPs and address space casts
570 /// but ensures the representation of the result stays the same.
571 ///
572 /// Returns the original uncasted value with the same representation. If this
573 /// is called on a non-pointer value, it returns 'this'.
574 const Value *stripPointerCastsSameRepresentation() const;
575 Value *stripPointerCastsSameRepresentation() {
576 return const_cast<Value *>(static_cast<const Value *>(this)
577 ->stripPointerCastsSameRepresentation());
578 }
579
580 /// Strip off pointer casts, all-zero GEPs and invariant group info.
581 ///
582 /// Returns the original uncasted value. If this is called on a non-pointer
583 /// value, it returns 'this'. This function should be used only in
584 /// Alias analysis.
585 const Value *stripPointerCastsAndInvariantGroups() const;
586 Value *stripPointerCastsAndInvariantGroups() {
587 return const_cast<Value *>(static_cast<const Value *>(this)
588 ->stripPointerCastsAndInvariantGroups());
589 }
590
591 /// Strip off pointer casts and all-constant inbounds GEPs.
592 ///
593 /// Returns the original pointer value. If this is called on a non-pointer
594 /// value, it returns 'this'.
595 const Value *stripInBoundsConstantOffsets() const;
596 Value *stripInBoundsConstantOffsets() {
597 return const_cast<Value *>(
598 static_cast<const Value *>(this)->stripInBoundsConstantOffsets());
599 }
600
601 /// Accumulate the constant offset this value has compared to a base pointer.
602 /// Only 'getelementptr' instructions (GEPs) are accumulated but other
603 /// instructions, e.g., casts, are stripped away as well.
604 /// The accumulated constant offset is added to \p Offset and the base
605 /// pointer is returned.
606 ///
607 /// The APInt \p Offset has to have a bit-width equal to the IntPtr type for
608 /// the address space of 'this' pointer value, e.g., use
609 /// DataLayout::getIndexTypeSizeInBits(Ty).
610 ///
611 /// If \p AllowNonInbounds is true, offsets in GEPs are stripped and
612 /// accumulated even if the GEP is not "inbounds".
613 ///
614 /// If \p ExternalAnalysis is provided it will be used to calculate a offset
615 /// when a operand of GEP is not constant.
616 /// For example, for a value \p ExternalAnalysis might try to calculate a
617 /// lower bound. If \p ExternalAnalysis is successful, it should return true.
618 ///
619 /// If this is called on a non-pointer value, it returns 'this' and the
620 /// \p Offset is not modified.
621 ///
622 /// Note that this function will never return a nullptr. It will also never
623 /// manipulate the \p Offset in a way that would not match the difference
624 /// between the underlying value and the returned one. Thus, if no constant
625 /// offset was found, the returned value is the underlying one and \p Offset
626 /// is unchanged.
627 const Value *stripAndAccumulateConstantOffsets(
628 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
629 function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
630 nullptr) const;
631 Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
632 bool AllowNonInbounds) {
633 return const_cast<Value *>(
634 static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
635 DL, Offset, AllowNonInbounds));
636 }
637
638 /// This is a wrapper around stripAndAccumulateConstantOffsets with the
639 /// in-bounds requirement set to false.
640 const Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
641 APInt &Offset) const {
642 return stripAndAccumulateConstantOffsets(DL, Offset,
643 /* AllowNonInbounds */ false);
644 }
645 Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
646 APInt &Offset) {
647 return stripAndAccumulateConstantOffsets(DL, Offset,
648 /* AllowNonInbounds */ false);
649 }
650
651 /// Strip off pointer casts and inbounds GEPs.
652 ///
653 /// Returns the original pointer value. If this is called on a non-pointer
654 /// value, it returns 'this'.
655 const Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
656 [](const Value *) {}) const;
657 inline Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
658 [](const Value *) {}) {
659 return const_cast<Value *>(
660 static_cast<const Value *>(this)->stripInBoundsOffsets(Func));
661 }
662
663 /// Returns the number of bytes known to be dereferenceable for the
664 /// pointer value.
665 ///
666 /// If CanBeNull is set by this function the pointer can either be null or be
667 /// dereferenceable up to the returned number of bytes.
668 uint64_t getPointerDereferenceableBytes(const DataLayout &DL,
669 bool &CanBeNull) const;
670
671 /// Returns an alignment of the pointer value.
672 ///
673 /// Returns an alignment which is either specified explicitly, e.g. via
674 /// align attribute of a function argument, or guaranteed by DataLayout.
675 Align getPointerAlignment(const DataLayout &DL) const;
676
677 /// Translate PHI node to its predecessor from the given basic block.
678 ///
679 /// If this value is a PHI node with CurBB as its parent, return the value in
680 /// the PHI node corresponding to PredBB. If not, return ourself. This is
681 /// useful if you want to know the value something has in a predecessor
682 /// block.
683 const Value *DoPHITranslation(const BasicBlock *CurBB,
684 const BasicBlock *PredBB) const;
685 Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) {
686 return const_cast<Value *>(
687 static_cast<const Value *>(this)->DoPHITranslation(CurBB, PredBB));
688 }
689
690 /// The maximum alignment for instructions.
691 ///
692 /// This is the greatest alignment value supported by load, store, and alloca
693 /// instructions, and global values.
694 static const unsigned MaxAlignmentExponent = 29;
695 static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
696
697 /// Mutate the type of this Value to be of the specified type.
698 ///
699 /// Note that this is an extremely dangerous operation which can create
700 /// completely invalid IR very easily. It is strongly recommended that you
701 /// recreate IR objects with the right types instead of mutating them in
702 /// place.
703 void mutateType(Type *Ty) {
704 VTy = Ty;
705 }
706
707 /// Sort the use-list.
708 ///
709 /// Sorts the Value's use-list by Cmp using a stable mergesort. Cmp is
710 /// expected to compare two \a Use references.
711 template <class Compare> void sortUseList(Compare Cmp);
712
713 /// Reverse the use-list.
714 void reverseUseList();
715
716private:
717 /// Merge two lists together.
718 ///
719 /// Merges \c L and \c R using \c Cmp. To enable stable sorts, always pushes
720 /// "equal" items from L before items from R.
721 ///
722 /// \return the first element in the list.
723 ///
724 /// \note Completely ignores \a Use::Prev (doesn't read, doesn't update).
725 template <class Compare>
726 static Use *mergeUseLists(Use *L, Use *R, Compare Cmp) {
727 Use *Merged;
728 Use **Next = &Merged;
729
730 while (true) {
731 if (!L) {
732 *Next = R;
733 break;
734 }
735 if (!R) {
736 *Next = L;
737 break;
738 }
739 if (Cmp(*R, *L)) {
740 *Next = R;
741 Next = &R->Next;
742 R = R->Next;
743 } else {
744 *Next = L;
745 Next = &L->Next;
746 L = L->Next;
747 }
748 }
749
750 return Merged;
751 }
752
753protected:
754 unsigned short getSubclassDataFromValue() const { return SubclassData; }
755 void setValueSubclassData(unsigned short D) { SubclassData = D; }
756};
757
758struct ValueDeleter { void operator()(Value *V) { V->deleteValue(); } };
759
760/// Use this instead of std::unique_ptr<Value> or std::unique_ptr<Instruction>.
761/// Those don't work because Value and Instruction's destructors are protected,
762/// aren't virtual, and won't destroy the complete object.
763using unique_value = std::unique_ptr<Value, ValueDeleter>;
764
765inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
766 V.print(OS);
767 return OS;
768}
769
770void Use::set(Value *V) {
771 if (Val) removeFromList();
772 Val = V;
773 if (V) V->addUse(*this);
774}
775
776Value *Use::operator=(Value *RHS) {
777 set(RHS);
778 return RHS;
779}
780
781const Use &Use::operator=(const Use &RHS) {
782 set(RHS.Val);
783 return *this;
784}
785
786template <class Compare> void Value::sortUseList(Compare Cmp) {
787 if (!UseList || !UseList->Next)
788 // No need to sort 0 or 1 uses.
789 return;
790
791 // Note: this function completely ignores Prev pointers until the end when
792 // they're fixed en masse.
793
794 // Create a binomial vector of sorted lists, visiting uses one at a time and
795 // merging lists as necessary.
796 const unsigned MaxSlots = 32;
797 Use *Slots[MaxSlots];
798
799 // Collect the first use, turning it into a single-item list.
800 Use *Next = UseList->Next;
801 UseList->Next = nullptr;
802 unsigned NumSlots = 1;
803 Slots[0] = UseList;
804
805 // Collect all but the last use.
806 while (Next->Next) {
807 Use *Current = Next;
808 Next = Current->Next;
809
810 // Turn Current into a single-item list.
811 Current->Next = nullptr;
812
813 // Save Current in the first available slot, merging on collisions.
814 unsigned I;
815 for (I = 0; I < NumSlots; ++I) {
816 if (!Slots[I])
817 break;
818
819 // Merge two lists, doubling the size of Current and emptying slot I.
820 //
821 // Since the uses in Slots[I] originally preceded those in Current, send
822 // Slots[I] in as the left parameter to maintain a stable sort.
823 Current = mergeUseLists(Slots[I], Current, Cmp);
824 Slots[I] = nullptr;
825 }
826 // Check if this is a new slot.
827 if (I == NumSlots) {
828 ++NumSlots;
829 assert(NumSlots <= MaxSlots && "Use list bigger than 2^32")((NumSlots <= MaxSlots && "Use list bigger than 2^32"
) ? static_cast<void> (0) : __assert_fail ("NumSlots <= MaxSlots && \"Use list bigger than 2^32\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/IR/Value.h"
, 829, __PRETTY_FUNCTION__))
;
830 }
831
832 // Found an open slot.
833 Slots[I] = Current;
834 }
835
836 // Merge all the lists together.
837 assert(Next && "Expected one more Use")((Next && "Expected one more Use") ? static_cast<void
> (0) : __assert_fail ("Next && \"Expected one more Use\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/IR/Value.h"
, 837, __PRETTY_FUNCTION__))
;
838 assert(!Next->Next && "Expected only one Use")((!Next->Next && "Expected only one Use") ? static_cast
<void> (0) : __assert_fail ("!Next->Next && \"Expected only one Use\""
, "/build/llvm-toolchain-snapshot-12~++20200806111125+5446ec85070/llvm/include/llvm/IR/Value.h"
, 838, __PRETTY_FUNCTION__))
;
839 UseList = Next;
840 for (unsigned I = 0; I < NumSlots; ++I)
841 if (Slots[I])
842 // Since the uses in Slots[I] originally preceded those in UseList, send
843 // Slots[I] in as the left parameter to maintain a stable sort.
844 UseList = mergeUseLists(Slots[I], UseList, Cmp);
845
846 // Fix the Prev pointers.
847 for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) {
848 I->Prev = Prev;
849 Prev = &I->Next;
850 }
851}
852
853// isa - Provide some specializations of isa so that we don't have to include
854// the subtype header files to test to see if the value is a subclass...
855//
856template <> struct isa_impl<Constant, Value> {
857 static inline bool doit(const Value &Val) {
858 static_assert(Value::ConstantFirstVal == 0, "Val.getValueID() >= Value::ConstantFirstVal");
859 return Val.getValueID() <= Value::ConstantLastVal;
860 }
861};
862
863template <> struct isa_impl<ConstantData, Value> {
864 static inline bool doit(const Value &Val) {
865 return Val.getValueID() >= Value::ConstantDataFirstVal &&
866 Val.getValueID() <= Value::ConstantDataLastVal;
867 }
868};
869
870template <> struct isa_impl<ConstantAggregate, Value> {
871 static inline bool doit(const Value &Val) {
872 return Val.getValueID() >= Value::ConstantAggregateFirstVal &&
873 Val.getValueID() <= Value::ConstantAggregateLastVal;
874 }
875};
876
877template <> struct isa_impl<Argument, Value> {
878 static inline bool doit (const Value &Val) {
879 return Val.getValueID() == Value::ArgumentVal;
880 }
881};
882
883template <> struct isa_impl<InlineAsm, Value> {
884 static inline bool doit(const Value &Val) {
885 return Val.getValueID() == Value::InlineAsmVal;
886 }
887};
888
889template <> struct isa_impl<Instruction, Value> {
890 static inline bool doit(const Value &Val) {
891 return Val.getValueID() >= Value::InstructionVal;
892 }
893};
894
895template <> struct isa_impl<BasicBlock, Value> {
896 static inline bool doit(const Value &Val) {
897 return Val.getValueID() == Value::BasicBlockVal;
898 }
899};
900
901template <> struct isa_impl<Function, Value> {
902 static inline bool doit(const Value &Val) {
903 return Val.getValueID() == Value::FunctionVal;
904 }
905};
906
907template <> struct isa_impl<GlobalVariable, Value> {
908 static inline bool doit(const Value &Val) {
909 return Val.getValueID() == Value::GlobalVariableVal;
910 }
911};
912
913template <> struct isa_impl<GlobalAlias, Value> {
914 static inline bool doit(const Value &Val) {
915 return Val.getValueID() == Value::GlobalAliasVal;
916 }
917};
918
919template <> struct isa_impl<GlobalIFunc, Value> {
920 static inline bool doit(const Value &Val) {
921 return Val.getValueID() == Value::GlobalIFuncVal;
922 }
923};
924
925template <> struct isa_impl<GlobalIndirectSymbol, Value> {
926 static inline bool doit(const Value &Val) {
927 return isa<GlobalAlias>(Val) || isa<GlobalIFunc>(Val);
928 }
929};
930
931template <> struct isa_impl<GlobalValue, Value> {
932 static inline bool doit(const Value &Val) {
933 return isa<GlobalObject>(Val) || isa<GlobalIndirectSymbol>(Val);
934 }
935};
936
937template <> struct isa_impl<GlobalObject, Value> {
938 static inline bool doit(const Value &Val) {
939 return isa<GlobalVariable>(Val) || isa<Function>(Val);
940 }
941};
942
943// Create wrappers for C Binding types (see CBindingWrapping.h).
944DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)inline Value *unwrap(LLVMValueRef P) { return reinterpret_cast
<Value*>(P); } inline LLVMValueRef wrap(const Value *P)
{ return reinterpret_cast<LLVMValueRef>(const_cast<
Value*>(P)); } template<typename T> inline T *unwrap
(LLVMValueRef P) { return cast<T>(unwrap(P)); }
945
946// Specialized opaque value conversions.
947inline Value **unwrap(LLVMValueRef *Vals) {
948 return reinterpret_cast<Value**>(Vals);
949}
950
951template<typename T>
952inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
953#ifndef NDEBUG
954 for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
955 unwrap<T>(*I); // For side effect of calling assert on invalid usage.
956#endif
957 (void)Length;
958 return reinterpret_cast<T**>(Vals);
959}
960
961inline LLVMValueRef *wrap(const Value **Vals) {
962 return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals));
963}
964
965} // end namespace llvm
966
967#endif // LLVM_IR_VALUE_H