Bug Summary

File:llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp
Warning:line 1369, column 18
Access to field 'TheKind' results in a dereference of a null pointer (loaded from variable 'Res')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name WholeProgramDevirt.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Transforms/IPO -I include -I /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-16-232930-107970-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp

1//===- WholeProgramDevirt.cpp - Whole program virtual call optimization ---===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass implements whole program optimization of virtual calls in cases
10// where we know (via !type metadata) that the list of callees is fixed. This
11// includes the following:
12// - Single implementation devirtualization: if a virtual call has a single
13// possible callee, replace all calls with a direct call to that callee.
14// - Virtual constant propagation: if the virtual function's return type is an
15// integer <=64 bits and all possible callees are readnone, for each class and
16// each list of constant arguments: evaluate the function, store the return
17// value alongside the virtual table, and rewrite each virtual call as a load
18// from the virtual table.
19// - Uniform return value optimization: if the conditions for virtual constant
20// propagation hold and each function returns the same constant value, replace
21// each virtual call with that constant.
22// - Unique return value optimization for i1 return values: if the conditions
23// for virtual constant propagation hold and a single vtable's function
24// returns 0, or a single vtable's function returns 1, replace each virtual
25// call with a comparison of the vptr against that vtable's address.
26//
27// This pass is intended to be used during the regular and thin LTO pipelines:
28//
29// During regular LTO, the pass determines the best optimization for each
30// virtual call and applies the resolutions directly to virtual calls that are
31// eligible for virtual call optimization (i.e. calls that use either of the
32// llvm.assume(llvm.type.test) or llvm.type.checked.load intrinsics).
33//
34// During hybrid Regular/ThinLTO, the pass operates in two phases:
35// - Export phase: this is run during the thin link over a single merged module
36// that contains all vtables with !type metadata that participate in the link.
37// The pass computes a resolution for each virtual call and stores it in the
38// type identifier summary.
39// - Import phase: this is run during the thin backends over the individual
40// modules. The pass applies the resolutions previously computed during the
41// import phase to each eligible virtual call.
42//
43// During ThinLTO, the pass operates in two phases:
44// - Export phase: this is run during the thin link over the index which
45// contains a summary of all vtables with !type metadata that participate in
46// the link. It computes a resolution for each virtual call and stores it in
47// the type identifier summary. Only single implementation devirtualization
48// is supported.
49// - Import phase: (same as with hybrid case above).
50//
51//===----------------------------------------------------------------------===//
52
53#include "llvm/Transforms/IPO/WholeProgramDevirt.h"
54#include "llvm/ADT/ArrayRef.h"
55#include "llvm/ADT/DenseMap.h"
56#include "llvm/ADT/DenseMapInfo.h"
57#include "llvm/ADT/DenseSet.h"
58#include "llvm/ADT/MapVector.h"
59#include "llvm/ADT/SmallVector.h"
60#include "llvm/ADT/Triple.h"
61#include "llvm/ADT/iterator_range.h"
62#include "llvm/Analysis/AssumptionCache.h"
63#include "llvm/Analysis/BasicAliasAnalysis.h"
64#include "llvm/Analysis/OptimizationRemarkEmitter.h"
65#include "llvm/Analysis/TypeMetadataUtils.h"
66#include "llvm/Bitcode/BitcodeReader.h"
67#include "llvm/Bitcode/BitcodeWriter.h"
68#include "llvm/IR/Constants.h"
69#include "llvm/IR/DataLayout.h"
70#include "llvm/IR/DebugLoc.h"
71#include "llvm/IR/DerivedTypes.h"
72#include "llvm/IR/Dominators.h"
73#include "llvm/IR/Function.h"
74#include "llvm/IR/GlobalAlias.h"
75#include "llvm/IR/GlobalVariable.h"
76#include "llvm/IR/IRBuilder.h"
77#include "llvm/IR/InstrTypes.h"
78#include "llvm/IR/Instruction.h"
79#include "llvm/IR/Instructions.h"
80#include "llvm/IR/Intrinsics.h"
81#include "llvm/IR/LLVMContext.h"
82#include "llvm/IR/Metadata.h"
83#include "llvm/IR/Module.h"
84#include "llvm/IR/ModuleSummaryIndexYAML.h"
85#include "llvm/InitializePasses.h"
86#include "llvm/Pass.h"
87#include "llvm/PassRegistry.h"
88#include "llvm/Support/Casting.h"
89#include "llvm/Support/CommandLine.h"
90#include "llvm/Support/Errc.h"
91#include "llvm/Support/Error.h"
92#include "llvm/Support/FileSystem.h"
93#include "llvm/Support/GlobPattern.h"
94#include "llvm/Support/MathExtras.h"
95#include "llvm/Transforms/IPO.h"
96#include "llvm/Transforms/IPO/FunctionAttrs.h"
97#include "llvm/Transforms/Utils/BasicBlockUtils.h"
98#include "llvm/Transforms/Utils/Evaluator.h"
99#include <algorithm>
100#include <cstddef>
101#include <map>
102#include <set>
103#include <string>
104
105using namespace llvm;
106using namespace wholeprogramdevirt;
107
108#define DEBUG_TYPE"wholeprogramdevirt" "wholeprogramdevirt"
109
110static cl::opt<PassSummaryAction> ClSummaryAction(
111 "wholeprogramdevirt-summary-action",
112 cl::desc("What to do with the summary when running this pass"),
113 cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing")llvm::cl::OptionEnumValue { "none", int(PassSummaryAction::None
), "Do nothing" }
,
114 clEnumValN(PassSummaryAction::Import, "import",llvm::cl::OptionEnumValue { "import", int(PassSummaryAction::
Import), "Import typeid resolutions from summary and globals"
}
115 "Import typeid resolutions from summary and globals")llvm::cl::OptionEnumValue { "import", int(PassSummaryAction::
Import), "Import typeid resolutions from summary and globals"
}
,
116 clEnumValN(PassSummaryAction::Export, "export",llvm::cl::OptionEnumValue { "export", int(PassSummaryAction::
Export), "Export typeid resolutions to summary and globals" }
117 "Export typeid resolutions to summary and globals")llvm::cl::OptionEnumValue { "export", int(PassSummaryAction::
Export), "Export typeid resolutions to summary and globals" }
),
118 cl::Hidden);
119
120static cl::opt<std::string> ClReadSummary(
121 "wholeprogramdevirt-read-summary",
122 cl::desc(
123 "Read summary from given bitcode or YAML file before running pass"),
124 cl::Hidden);
125
126static cl::opt<std::string> ClWriteSummary(
127 "wholeprogramdevirt-write-summary",
128 cl::desc("Write summary to given bitcode or YAML file after running pass. "
129 "Output file format is deduced from extension: *.bc means writing "
130 "bitcode, otherwise YAML"),
131 cl::Hidden);
132
133static cl::opt<unsigned>
134 ClThreshold("wholeprogramdevirt-branch-funnel-threshold", cl::Hidden,
135 cl::init(10), cl::ZeroOrMore,
136 cl::desc("Maximum number of call targets per "
137 "call site to enable branch funnels"));
138
139static cl::opt<bool>
140 PrintSummaryDevirt("wholeprogramdevirt-print-index-based", cl::Hidden,
141 cl::init(false), cl::ZeroOrMore,
142 cl::desc("Print index-based devirtualization messages"));
143
144/// Provide a way to force enable whole program visibility in tests.
145/// This is needed to support legacy tests that don't contain
146/// !vcall_visibility metadata (the mere presense of type tests
147/// previously implied hidden visibility).
148static cl::opt<bool>
149 WholeProgramVisibility("whole-program-visibility", cl::init(false),
150 cl::Hidden, cl::ZeroOrMore,
151 cl::desc("Enable whole program visibility"));
152
153/// Provide a way to force disable whole program for debugging or workarounds,
154/// when enabled via the linker.
155static cl::opt<bool> DisableWholeProgramVisibility(
156 "disable-whole-program-visibility", cl::init(false), cl::Hidden,
157 cl::ZeroOrMore,
158 cl::desc("Disable whole program visibility (overrides enabling options)"));
159
160/// Provide way to prevent certain function from being devirtualized
161static cl::list<std::string>
162 SkipFunctionNames("wholeprogramdevirt-skip",
163 cl::desc("Prevent function(s) from being devirtualized"),
164 cl::Hidden, cl::ZeroOrMore, cl::CommaSeparated);
165
166/// Mechanism to add runtime checking of devirtualization decisions, trapping on
167/// any that are not correct. Useful for debugging undefined behavior leading to
168/// failures with WPD.
169static cl::opt<bool>
170 CheckDevirt("wholeprogramdevirt-check", cl::init(false), cl::Hidden,
171 cl::ZeroOrMore,
172 cl::desc("Add code to trap on incorrect devirtualizations"));
173
174namespace {
175struct PatternList {
176 std::vector<GlobPattern> Patterns;
177 template <class T> void init(const T &StringList) {
178 for (const auto &S : StringList)
179 if (Expected<GlobPattern> Pat = GlobPattern::create(S))
180 Patterns.push_back(std::move(*Pat));
181 }
182 bool match(StringRef S) {
183 for (const GlobPattern &P : Patterns)
184 if (P.match(S))
185 return true;
186 return false;
187 }
188};
189} // namespace
190
191// Find the minimum offset that we may store a value of size Size bits at. If
192// IsAfter is set, look for an offset before the object, otherwise look for an
193// offset after the object.
194uint64_t
195wholeprogramdevirt::findLowestOffset(ArrayRef<VirtualCallTarget> Targets,
196 bool IsAfter, uint64_t Size) {
197 // Find a minimum offset taking into account only vtable sizes.
198 uint64_t MinByte = 0;
199 for (const VirtualCallTarget &Target : Targets) {
200 if (IsAfter)
201 MinByte = std::max(MinByte, Target.minAfterBytes());
202 else
203 MinByte = std::max(MinByte, Target.minBeforeBytes());
204 }
205
206 // Build a vector of arrays of bytes covering, for each target, a slice of the
207 // used region (see AccumBitVector::BytesUsed in
208 // llvm/Transforms/IPO/WholeProgramDevirt.h) starting at MinByte. Effectively,
209 // this aligns the used regions to start at MinByte.
210 //
211 // In this example, A, B and C are vtables, # is a byte already allocated for
212 // a virtual function pointer, AAAA... (etc.) are the used regions for the
213 // vtables and Offset(X) is the value computed for the Offset variable below
214 // for X.
215 //
216 // Offset(A)
217 // | |
218 // |MinByte
219 // A: ################AAAAAAAA|AAAAAAAA
220 // B: ########BBBBBBBBBBBBBBBB|BBBB
221 // C: ########################|CCCCCCCCCCCCCCCC
222 // | Offset(B) |
223 //
224 // This code produces the slices of A, B and C that appear after the divider
225 // at MinByte.
226 std::vector<ArrayRef<uint8_t>> Used;
227 for (const VirtualCallTarget &Target : Targets) {
228 ArrayRef<uint8_t> VTUsed = IsAfter ? Target.TM->Bits->After.BytesUsed
229 : Target.TM->Bits->Before.BytesUsed;
230 uint64_t Offset = IsAfter ? MinByte - Target.minAfterBytes()
231 : MinByte - Target.minBeforeBytes();
232
233 // Disregard used regions that are smaller than Offset. These are
234 // effectively all-free regions that do not need to be checked.
235 if (VTUsed.size() > Offset)
236 Used.push_back(VTUsed.slice(Offset));
237 }
238
239 if (Size == 1) {
240 // Find a free bit in each member of Used.
241 for (unsigned I = 0;; ++I) {
242 uint8_t BitsUsed = 0;
243 for (auto &&B : Used)
244 if (I < B.size())
245 BitsUsed |= B[I];
246 if (BitsUsed != 0xff)
247 return (MinByte + I) * 8 +
248 countTrailingZeros(uint8_t(~BitsUsed), ZB_Undefined);
249 }
250 } else {
251 // Find a free (Size/8) byte region in each member of Used.
252 // FIXME: see if alignment helps.
253 for (unsigned I = 0;; ++I) {
254 for (auto &&B : Used) {
255 unsigned Byte = 0;
256 while ((I + Byte) < B.size() && Byte < (Size / 8)) {
257 if (B[I + Byte])
258 goto NextI;
259 ++Byte;
260 }
261 }
262 return (MinByte + I) * 8;
263 NextI:;
264 }
265 }
266}
267
268void wholeprogramdevirt::setBeforeReturnValues(
269 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocBefore,
270 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) {
271 if (BitWidth == 1)
272 OffsetByte = -(AllocBefore / 8 + 1);
273 else
274 OffsetByte = -((AllocBefore + 7) / 8 + (BitWidth + 7) / 8);
275 OffsetBit = AllocBefore % 8;
276
277 for (VirtualCallTarget &Target : Targets) {
278 if (BitWidth == 1)
279 Target.setBeforeBit(AllocBefore);
280 else
281 Target.setBeforeBytes(AllocBefore, (BitWidth + 7) / 8);
282 }
283}
284
285void wholeprogramdevirt::setAfterReturnValues(
286 MutableArrayRef<VirtualCallTarget> Targets, uint64_t AllocAfter,
287 unsigned BitWidth, int64_t &OffsetByte, uint64_t &OffsetBit) {
288 if (BitWidth == 1)
289 OffsetByte = AllocAfter / 8;
290 else
291 OffsetByte = (AllocAfter + 7) / 8;
292 OffsetBit = AllocAfter % 8;
293
294 for (VirtualCallTarget &Target : Targets) {
295 if (BitWidth == 1)
296 Target.setAfterBit(AllocAfter);
297 else
298 Target.setAfterBytes(AllocAfter, (BitWidth + 7) / 8);
299 }
300}
301
302VirtualCallTarget::VirtualCallTarget(Function *Fn, const TypeMemberInfo *TM)
303 : Fn(Fn), TM(TM),
304 IsBigEndian(Fn->getParent()->getDataLayout().isBigEndian()), WasDevirt(false) {}
305
306namespace {
307
308// A slot in a set of virtual tables. The TypeID identifies the set of virtual
309// tables, and the ByteOffset is the offset in bytes from the address point to
310// the virtual function pointer.
311struct VTableSlot {
312 Metadata *TypeID;
313 uint64_t ByteOffset;
314};
315
316} // end anonymous namespace
317
318namespace llvm {
319
320template <> struct DenseMapInfo<VTableSlot> {
321 static VTableSlot getEmptyKey() {
322 return {DenseMapInfo<Metadata *>::getEmptyKey(),
323 DenseMapInfo<uint64_t>::getEmptyKey()};
324 }
325 static VTableSlot getTombstoneKey() {
326 return {DenseMapInfo<Metadata *>::getTombstoneKey(),
327 DenseMapInfo<uint64_t>::getTombstoneKey()};
328 }
329 static unsigned getHashValue(const VTableSlot &I) {
330 return DenseMapInfo<Metadata *>::getHashValue(I.TypeID) ^
331 DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset);
332 }
333 static bool isEqual(const VTableSlot &LHS,
334 const VTableSlot &RHS) {
335 return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset;
336 }
337};
338
339template <> struct DenseMapInfo<VTableSlotSummary> {
340 static VTableSlotSummary getEmptyKey() {
341 return {DenseMapInfo<StringRef>::getEmptyKey(),
342 DenseMapInfo<uint64_t>::getEmptyKey()};
343 }
344 static VTableSlotSummary getTombstoneKey() {
345 return {DenseMapInfo<StringRef>::getTombstoneKey(),
346 DenseMapInfo<uint64_t>::getTombstoneKey()};
347 }
348 static unsigned getHashValue(const VTableSlotSummary &I) {
349 return DenseMapInfo<StringRef>::getHashValue(I.TypeID) ^
350 DenseMapInfo<uint64_t>::getHashValue(I.ByteOffset);
351 }
352 static bool isEqual(const VTableSlotSummary &LHS,
353 const VTableSlotSummary &RHS) {
354 return LHS.TypeID == RHS.TypeID && LHS.ByteOffset == RHS.ByteOffset;
355 }
356};
357
358} // end namespace llvm
359
360namespace {
361
362// Returns true if the function must be unreachable based on ValueInfo.
363//
364// In particular, identifies a function as unreachable in the following
365// conditions
366// 1) All summaries are live.
367// 2) All function summaries indicate it's unreachable
368bool mustBeUnreachableFunction(ValueInfo TheFnVI) {
369 if ((!TheFnVI) || TheFnVI.getSummaryList().empty()) {
370 // Returns false if ValueInfo is absent, or the summary list is empty
371 // (e.g., function declarations).
372 return false;
373 }
374
375 for (auto &Summary : TheFnVI.getSummaryList()) {
376 // Conservatively returns false if any non-live functions are seen.
377 // In general either all summaries should be live or all should be dead.
378 if (!Summary->isLive())
379 return false;
380 if (auto *FS = dyn_cast<FunctionSummary>(Summary.get())) {
381 if (!FS->fflags().MustBeUnreachable)
382 return false;
383 }
384 // Do nothing if a non-function has the same GUID (which is rare).
385 // This is correct since non-function summaries are not relevant.
386 }
387 // All function summaries are live and all of them agree that the function is
388 // unreachble.
389 return true;
390}
391
392// A virtual call site. VTable is the loaded virtual table pointer, and CS is
393// the indirect virtual call.
394struct VirtualCallSite {
395 Value *VTable = nullptr;
396 CallBase &CB;
397
398 // If non-null, this field points to the associated unsafe use count stored in
399 // the DevirtModule::NumUnsafeUsesForTypeTest map below. See the description
400 // of that field for details.
401 unsigned *NumUnsafeUses = nullptr;
402
403 void
404 emitRemark(const StringRef OptName, const StringRef TargetName,
405 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter) {
406 Function *F = CB.getCaller();
407 DebugLoc DLoc = CB.getDebugLoc();
408 BasicBlock *Block = CB.getParent();
409
410 using namespace ore;
411 OREGetter(F).emit(OptimizationRemark(DEBUG_TYPE"wholeprogramdevirt", OptName, DLoc, Block)
412 << NV("Optimization", OptName)
413 << ": devirtualized a call to "
414 << NV("FunctionName", TargetName));
415 }
416
417 void replaceAndErase(
418 const StringRef OptName, const StringRef TargetName, bool RemarksEnabled,
419 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
420 Value *New) {
421 if (RemarksEnabled)
422 emitRemark(OptName, TargetName, OREGetter);
423 CB.replaceAllUsesWith(New);
424 if (auto *II = dyn_cast<InvokeInst>(&CB)) {
425 BranchInst::Create(II->getNormalDest(), &CB);
426 II->getUnwindDest()->removePredecessor(II->getParent());
427 }
428 CB.eraseFromParent();
429 // This use is no longer unsafe.
430 if (NumUnsafeUses)
431 --*NumUnsafeUses;
432 }
433};
434
435// Call site information collected for a specific VTableSlot and possibly a list
436// of constant integer arguments. The grouping by arguments is handled by the
437// VTableSlotInfo class.
438struct CallSiteInfo {
439 /// The set of call sites for this slot. Used during regular LTO and the
440 /// import phase of ThinLTO (as well as the export phase of ThinLTO for any
441 /// call sites that appear in the merged module itself); in each of these
442 /// cases we are directly operating on the call sites at the IR level.
443 std::vector<VirtualCallSite> CallSites;
444
445 /// Whether all call sites represented by this CallSiteInfo, including those
446 /// in summaries, have been devirtualized. This starts off as true because a
447 /// default constructed CallSiteInfo represents no call sites.
448 bool AllCallSitesDevirted = true;
449
450 // These fields are used during the export phase of ThinLTO and reflect
451 // information collected from function summaries.
452
453 /// Whether any function summary contains an llvm.assume(llvm.type.test) for
454 /// this slot.
455 bool SummaryHasTypeTestAssumeUsers = false;
456
457 /// CFI-specific: a vector containing the list of function summaries that use
458 /// the llvm.type.checked.load intrinsic and therefore will require
459 /// resolutions for llvm.type.test in order to implement CFI checks if
460 /// devirtualization was unsuccessful. If devirtualization was successful, the
461 /// pass will clear this vector by calling markDevirt(). If at the end of the
462 /// pass the vector is non-empty, we will need to add a use of llvm.type.test
463 /// to each of the function summaries in the vector.
464 std::vector<FunctionSummary *> SummaryTypeCheckedLoadUsers;
465 std::vector<FunctionSummary *> SummaryTypeTestAssumeUsers;
466
467 bool isExported() const {
468 return SummaryHasTypeTestAssumeUsers ||
63
Assuming field 'SummaryHasTypeTestAssumeUsers' is true
64
Returning the value 1, which participates in a condition later
469 !SummaryTypeCheckedLoadUsers.empty();
470 }
471
472 void addSummaryTypeCheckedLoadUser(FunctionSummary *FS) {
473 SummaryTypeCheckedLoadUsers.push_back(FS);
474 AllCallSitesDevirted = false;
475 }
476
477 void addSummaryTypeTestAssumeUser(FunctionSummary *FS) {
478 SummaryTypeTestAssumeUsers.push_back(FS);
479 SummaryHasTypeTestAssumeUsers = true;
480 AllCallSitesDevirted = false;
481 }
482
483 void markDevirt() {
484 AllCallSitesDevirted = true;
485
486 // As explained in the comment for SummaryTypeCheckedLoadUsers.
487 SummaryTypeCheckedLoadUsers.clear();
488 }
489};
490
491// Call site information collected for a specific VTableSlot.
492struct VTableSlotInfo {
493 // The set of call sites which do not have all constant integer arguments
494 // (excluding "this").
495 CallSiteInfo CSInfo;
496
497 // The set of call sites with all constant integer arguments (excluding
498 // "this"), grouped by argument list.
499 std::map<std::vector<uint64_t>, CallSiteInfo> ConstCSInfo;
500
501 void addCallSite(Value *VTable, CallBase &CB, unsigned *NumUnsafeUses);
502
503private:
504 CallSiteInfo &findCallSiteInfo(CallBase &CB);
505};
506
507CallSiteInfo &VTableSlotInfo::findCallSiteInfo(CallBase &CB) {
508 std::vector<uint64_t> Args;
509 auto *CBType = dyn_cast<IntegerType>(CB.getType());
510 if (!CBType || CBType->getBitWidth() > 64 || CB.arg_empty())
511 return CSInfo;
512 for (auto &&Arg : drop_begin(CB.args())) {
513 auto *CI = dyn_cast<ConstantInt>(Arg);
514 if (!CI || CI->getBitWidth() > 64)
515 return CSInfo;
516 Args.push_back(CI->getZExtValue());
517 }
518 return ConstCSInfo[Args];
519}
520
521void VTableSlotInfo::addCallSite(Value *VTable, CallBase &CB,
522 unsigned *NumUnsafeUses) {
523 auto &CSI = findCallSiteInfo(CB);
524 CSI.AllCallSitesDevirted = false;
525 CSI.CallSites.push_back({VTable, CB, NumUnsafeUses});
526}
527
528struct DevirtModule {
529 Module &M;
530 function_ref<AAResults &(Function &)> AARGetter;
531 function_ref<DominatorTree &(Function &)> LookupDomTree;
532
533 ModuleSummaryIndex *ExportSummary;
534 const ModuleSummaryIndex *ImportSummary;
535
536 IntegerType *Int8Ty;
537 PointerType *Int8PtrTy;
538 IntegerType *Int32Ty;
539 IntegerType *Int64Ty;
540 IntegerType *IntPtrTy;
541 /// Sizeless array type, used for imported vtables. This provides a signal
542 /// to analyzers that these imports may alias, as they do for example
543 /// when multiple unique return values occur in the same vtable.
544 ArrayType *Int8Arr0Ty;
545
546 bool RemarksEnabled;
547 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter;
548
549 MapVector<VTableSlot, VTableSlotInfo> CallSlots;
550
551 // Calls that have already been optimized. We may add a call to multiple
552 // VTableSlotInfos if vtable loads are coalesced and need to make sure not to
553 // optimize a call more than once.
554 SmallPtrSet<CallBase *, 8> OptimizedCalls;
555
556 // This map keeps track of the number of "unsafe" uses of a loaded function
557 // pointer. The key is the associated llvm.type.test intrinsic call generated
558 // by this pass. An unsafe use is one that calls the loaded function pointer
559 // directly. Every time we eliminate an unsafe use (for example, by
560 // devirtualizing it or by applying virtual constant propagation), we
561 // decrement the value stored in this map. If a value reaches zero, we can
562 // eliminate the type check by RAUWing the associated llvm.type.test call with
563 // true.
564 std::map<CallInst *, unsigned> NumUnsafeUsesForTypeTest;
565 PatternList FunctionsToSkip;
566
567 DevirtModule(Module &M, function_ref<AAResults &(Function &)> AARGetter,
568 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
569 function_ref<DominatorTree &(Function &)> LookupDomTree,
570 ModuleSummaryIndex *ExportSummary,
571 const ModuleSummaryIndex *ImportSummary)
572 : M(M), AARGetter(AARGetter), LookupDomTree(LookupDomTree),
573 ExportSummary(ExportSummary), ImportSummary(ImportSummary),
574 Int8Ty(Type::getInt8Ty(M.getContext())),
575 Int8PtrTy(Type::getInt8PtrTy(M.getContext())),
576 Int32Ty(Type::getInt32Ty(M.getContext())),
577 Int64Ty(Type::getInt64Ty(M.getContext())),
578 IntPtrTy(M.getDataLayout().getIntPtrType(M.getContext(), 0)),
579 Int8Arr0Ty(ArrayType::get(Type::getInt8Ty(M.getContext()), 0)),
580 RemarksEnabled(areRemarksEnabled()), OREGetter(OREGetter) {
581 assert(!(ExportSummary && ImportSummary))(static_cast <bool> (!(ExportSummary && ImportSummary
)) ? void (0) : __assert_fail ("!(ExportSummary && ImportSummary)"
, "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp", 581, __extension__
__PRETTY_FUNCTION__))
;
582 FunctionsToSkip.init(SkipFunctionNames);
583 }
584
585 bool areRemarksEnabled();
586
587 void
588 scanTypeTestUsers(Function *TypeTestFunc,
589 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap);
590 void scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc);
591
592 void buildTypeIdentifierMap(
593 std::vector<VTableBits> &Bits,
594 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap);
595
596 bool
597 tryFindVirtualCallTargets(std::vector<VirtualCallTarget> &TargetsForSlot,
598 const std::set<TypeMemberInfo> &TypeMemberInfos,
599 uint64_t ByteOffset,
600 ModuleSummaryIndex *ExportSummary);
601
602 void applySingleImplDevirt(VTableSlotInfo &SlotInfo, Constant *TheFn,
603 bool &IsExported);
604 bool trySingleImplDevirt(ModuleSummaryIndex *ExportSummary,
605 MutableArrayRef<VirtualCallTarget> TargetsForSlot,
606 VTableSlotInfo &SlotInfo,
607 WholeProgramDevirtResolution *Res);
608
609 void applyICallBranchFunnel(VTableSlotInfo &SlotInfo, Constant *JT,
610 bool &IsExported);
611 void tryICallBranchFunnel(MutableArrayRef<VirtualCallTarget> TargetsForSlot,
612 VTableSlotInfo &SlotInfo,
613 WholeProgramDevirtResolution *Res, VTableSlot Slot);
614
615 bool tryEvaluateFunctionsWithArgs(
616 MutableArrayRef<VirtualCallTarget> TargetsForSlot,
617 ArrayRef<uint64_t> Args);
618
619 void applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName,
620 uint64_t TheRetVal);
621 bool tryUniformRetValOpt(MutableArrayRef<VirtualCallTarget> TargetsForSlot,
622 CallSiteInfo &CSInfo,
623 WholeProgramDevirtResolution::ByArg *Res);
624
625 // Returns the global symbol name that is used to export information about the
626 // given vtable slot and list of arguments.
627 std::string getGlobalName(VTableSlot Slot, ArrayRef<uint64_t> Args,
628 StringRef Name);
629
630 bool shouldExportConstantsAsAbsoluteSymbols();
631
632 // This function is called during the export phase to create a symbol
633 // definition containing information about the given vtable slot and list of
634 // arguments.
635 void exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name,
636 Constant *C);
637 void exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args, StringRef Name,
638 uint32_t Const, uint32_t &Storage);
639
640 // This function is called during the import phase to create a reference to
641 // the symbol definition created during the export phase.
642 Constant *importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args,
643 StringRef Name);
644 Constant *importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args,
645 StringRef Name, IntegerType *IntTy,
646 uint32_t Storage);
647
648 Constant *getMemberAddr(const TypeMemberInfo *M);
649
650 void applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName, bool IsOne,
651 Constant *UniqueMemberAddr);
652 bool tryUniqueRetValOpt(unsigned BitWidth,
653 MutableArrayRef<VirtualCallTarget> TargetsForSlot,
654 CallSiteInfo &CSInfo,
655 WholeProgramDevirtResolution::ByArg *Res,
656 VTableSlot Slot, ArrayRef<uint64_t> Args);
657
658 void applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName,
659 Constant *Byte, Constant *Bit);
660 bool tryVirtualConstProp(MutableArrayRef<VirtualCallTarget> TargetsForSlot,
661 VTableSlotInfo &SlotInfo,
662 WholeProgramDevirtResolution *Res, VTableSlot Slot);
663
664 void rebuildGlobal(VTableBits &B);
665
666 // Apply the summary resolution for Slot to all virtual calls in SlotInfo.
667 void importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo);
668
669 // If we were able to eliminate all unsafe uses for a type checked load,
670 // eliminate the associated type tests by replacing them with true.
671 void removeRedundantTypeTests();
672
673 bool run();
674
675 // Look up the corresponding ValueInfo entry of `TheFn` in `ExportSummary`.
676 //
677 // Caller guarantees that `ExportSummary` is not nullptr.
678 static ValueInfo lookUpFunctionValueInfo(Function *TheFn,
679 ModuleSummaryIndex *ExportSummary);
680
681 // Returns true if the function definition must be unreachable.
682 //
683 // Note if this helper function returns true, `F` is guaranteed
684 // to be unreachable; if it returns false, `F` might still
685 // be unreachable but not covered by this helper function.
686 //
687 // Implementation-wise, if function definition is present, IR is analyzed; if
688 // not, look up function flags from ExportSummary as a fallback.
689 static bool mustBeUnreachableFunction(Function *const F,
690 ModuleSummaryIndex *ExportSummary);
691
692 // Lower the module using the action and summary passed as command line
693 // arguments. For testing purposes only.
694 static bool
695 runForTesting(Module &M, function_ref<AAResults &(Function &)> AARGetter,
696 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
697 function_ref<DominatorTree &(Function &)> LookupDomTree);
698};
699
700struct DevirtIndex {
701 ModuleSummaryIndex &ExportSummary;
702 // The set in which to record GUIDs exported from their module by
703 // devirtualization, used by client to ensure they are not internalized.
704 std::set<GlobalValue::GUID> &ExportedGUIDs;
705 // A map in which to record the information necessary to locate the WPD
706 // resolution for local targets in case they are exported by cross module
707 // importing.
708 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap;
709
710 MapVector<VTableSlotSummary, VTableSlotInfo> CallSlots;
711
712 PatternList FunctionsToSkip;
713
714 DevirtIndex(
715 ModuleSummaryIndex &ExportSummary,
716 std::set<GlobalValue::GUID> &ExportedGUIDs,
717 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap)
718 : ExportSummary(ExportSummary), ExportedGUIDs(ExportedGUIDs),
719 LocalWPDTargetsMap(LocalWPDTargetsMap) {
720 FunctionsToSkip.init(SkipFunctionNames);
721 }
722
723 bool tryFindVirtualCallTargets(std::vector<ValueInfo> &TargetsForSlot,
724 const TypeIdCompatibleVtableInfo TIdInfo,
725 uint64_t ByteOffset);
726
727 bool trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot,
728 VTableSlotSummary &SlotSummary,
729 VTableSlotInfo &SlotInfo,
730 WholeProgramDevirtResolution *Res,
731 std::set<ValueInfo> &DevirtTargets);
732
733 void run();
734};
735
736struct WholeProgramDevirt : public ModulePass {
737 static char ID;
738
739 bool UseCommandLine = false;
740
741 ModuleSummaryIndex *ExportSummary = nullptr;
742 const ModuleSummaryIndex *ImportSummary = nullptr;
743
744 WholeProgramDevirt() : ModulePass(ID), UseCommandLine(true) {
745 initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry());
746 }
747
748 WholeProgramDevirt(ModuleSummaryIndex *ExportSummary,
749 const ModuleSummaryIndex *ImportSummary)
750 : ModulePass(ID), ExportSummary(ExportSummary),
751 ImportSummary(ImportSummary) {
752 initializeWholeProgramDevirtPass(*PassRegistry::getPassRegistry());
753 }
754
755 bool runOnModule(Module &M) override {
756 if (skipModule(M))
757 return false;
758
759 // In the new pass manager, we can request the optimization
760 // remark emitter pass on a per-function-basis, which the
761 // OREGetter will do for us.
762 // In the old pass manager, this is harder, so we just build
763 // an optimization remark emitter on the fly, when we need it.
764 std::unique_ptr<OptimizationRemarkEmitter> ORE;
765 auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & {
766 ORE = std::make_unique<OptimizationRemarkEmitter>(F);
767 return *ORE;
768 };
769
770 auto LookupDomTree = [this](Function &F) -> DominatorTree & {
771 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
772 };
773
774 if (UseCommandLine)
775 return DevirtModule::runForTesting(M, LegacyAARGetter(*this), OREGetter,
776 LookupDomTree);
777
778 return DevirtModule(M, LegacyAARGetter(*this), OREGetter, LookupDomTree,
779 ExportSummary, ImportSummary)
780 .run();
781 }
782
783 void getAnalysisUsage(AnalysisUsage &AU) const override {
784 AU.addRequired<AssumptionCacheTracker>();
785 AU.addRequired<TargetLibraryInfoWrapperPass>();
786 AU.addRequired<DominatorTreeWrapperPass>();
787 }
788};
789
790} // end anonymous namespace
791
792INITIALIZE_PASS_BEGIN(WholeProgramDevirt, "wholeprogramdevirt",static void *initializeWholeProgramDevirtPassOnce(PassRegistry
&Registry) {
793 "Whole program devirtualization", false, false)static void *initializeWholeProgramDevirtPassOnce(PassRegistry
&Registry) {
794INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)initializeAssumptionCacheTrackerPass(Registry);
795INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
796INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
797INITIALIZE_PASS_END(WholeProgramDevirt, "wholeprogramdevirt",PassInfo *PI = new PassInfo( "Whole program devirtualization"
, "wholeprogramdevirt", &WholeProgramDevirt::ID, PassInfo
::NormalCtor_t(callDefaultCtor<WholeProgramDevirt>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeWholeProgramDevirtPassFlag; void llvm
::initializeWholeProgramDevirtPass(PassRegistry &Registry
) { llvm::call_once(InitializeWholeProgramDevirtPassFlag, initializeWholeProgramDevirtPassOnce
, std::ref(Registry)); }
798 "Whole program devirtualization", false, false)PassInfo *PI = new PassInfo( "Whole program devirtualization"
, "wholeprogramdevirt", &WholeProgramDevirt::ID, PassInfo
::NormalCtor_t(callDefaultCtor<WholeProgramDevirt>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeWholeProgramDevirtPassFlag; void llvm
::initializeWholeProgramDevirtPass(PassRegistry &Registry
) { llvm::call_once(InitializeWholeProgramDevirtPassFlag, initializeWholeProgramDevirtPassOnce
, std::ref(Registry)); }
799char WholeProgramDevirt::ID = 0;
800
801ModulePass *
802llvm::createWholeProgramDevirtPass(ModuleSummaryIndex *ExportSummary,
803 const ModuleSummaryIndex *ImportSummary) {
804 return new WholeProgramDevirt(ExportSummary, ImportSummary);
805}
806
807PreservedAnalyses WholeProgramDevirtPass::run(Module &M,
808 ModuleAnalysisManager &AM) {
809 auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
810 auto AARGetter = [&](Function &F) -> AAResults & {
811 return FAM.getResult<AAManager>(F);
812 };
813 auto OREGetter = [&](Function *F) -> OptimizationRemarkEmitter & {
814 return FAM.getResult<OptimizationRemarkEmitterAnalysis>(*F);
815 };
816 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree & {
817 return FAM.getResult<DominatorTreeAnalysis>(F);
818 };
819 if (UseCommandLine) {
820 if (DevirtModule::runForTesting(M, AARGetter, OREGetter, LookupDomTree))
821 return PreservedAnalyses::all();
822 return PreservedAnalyses::none();
823 }
824 if (!DevirtModule(M, AARGetter, OREGetter, LookupDomTree, ExportSummary,
825 ImportSummary)
826 .run())
827 return PreservedAnalyses::all();
828 return PreservedAnalyses::none();
829}
830
831// Enable whole program visibility if enabled by client (e.g. linker) or
832// internal option, and not force disabled.
833static bool hasWholeProgramVisibility(bool WholeProgramVisibilityEnabledInLTO) {
834 return (WholeProgramVisibilityEnabledInLTO || WholeProgramVisibility) &&
835 !DisableWholeProgramVisibility;
836}
837
838namespace llvm {
839
840/// If whole program visibility asserted, then upgrade all public vcall
841/// visibility metadata on vtable definitions to linkage unit visibility in
842/// Module IR (for regular or hybrid LTO).
843void updateVCallVisibilityInModule(
844 Module &M, bool WholeProgramVisibilityEnabledInLTO,
845 const DenseSet<GlobalValue::GUID> &DynamicExportSymbols) {
846 if (!hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO))
847 return;
848 for (GlobalVariable &GV : M.globals())
849 // Add linkage unit visibility to any variable with type metadata, which are
850 // the vtable definitions. We won't have an existing vcall_visibility
851 // metadata on vtable definitions with public visibility.
852 if (GV.hasMetadata(LLVMContext::MD_type) &&
853 GV.getVCallVisibility() == GlobalObject::VCallVisibilityPublic &&
854 // Don't upgrade the visibility for symbols exported to the dynamic
855 // linker, as we have no information on their eventual use.
856 !DynamicExportSymbols.count(GV.getGUID()))
857 GV.setVCallVisibilityMetadata(GlobalObject::VCallVisibilityLinkageUnit);
858}
859
860/// If whole program visibility asserted, then upgrade all public vcall
861/// visibility metadata on vtable definition summaries to linkage unit
862/// visibility in Module summary index (for ThinLTO).
863void updateVCallVisibilityInIndex(
864 ModuleSummaryIndex &Index, bool WholeProgramVisibilityEnabledInLTO,
865 const DenseSet<GlobalValue::GUID> &DynamicExportSymbols) {
866 if (!hasWholeProgramVisibility(WholeProgramVisibilityEnabledInLTO))
867 return;
868 for (auto &P : Index) {
869 for (auto &S : P.second.SummaryList) {
870 auto *GVar = dyn_cast<GlobalVarSummary>(S.get());
871 if (!GVar ||
872 GVar->getVCallVisibility() != GlobalObject::VCallVisibilityPublic ||
873 // Don't upgrade the visibility for symbols exported to the dynamic
874 // linker, as we have no information on their eventual use.
875 DynamicExportSymbols.count(P.first))
876 continue;
877 GVar->setVCallVisibility(GlobalObject::VCallVisibilityLinkageUnit);
878 }
879 }
880}
881
882void runWholeProgramDevirtOnIndex(
883 ModuleSummaryIndex &Summary, std::set<GlobalValue::GUID> &ExportedGUIDs,
884 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) {
885 DevirtIndex(Summary, ExportedGUIDs, LocalWPDTargetsMap).run();
886}
887
888void updateIndexWPDForExports(
889 ModuleSummaryIndex &Summary,
890 function_ref<bool(StringRef, ValueInfo)> isExported,
891 std::map<ValueInfo, std::vector<VTableSlotSummary>> &LocalWPDTargetsMap) {
892 for (auto &T : LocalWPDTargetsMap) {
893 auto &VI = T.first;
894 // This was enforced earlier during trySingleImplDevirt.
895 assert(VI.getSummaryList().size() == 1 &&(static_cast <bool> (VI.getSummaryList().size() == 1 &&
"Devirt of local target has more than one copy") ? void (0) :
__assert_fail ("VI.getSummaryList().size() == 1 && \"Devirt of local target has more than one copy\""
, "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp", 896, __extension__
__PRETTY_FUNCTION__))
896 "Devirt of local target has more than one copy")(static_cast <bool> (VI.getSummaryList().size() == 1 &&
"Devirt of local target has more than one copy") ? void (0) :
__assert_fail ("VI.getSummaryList().size() == 1 && \"Devirt of local target has more than one copy\""
, "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp", 896, __extension__
__PRETTY_FUNCTION__))
;
897 auto &S = VI.getSummaryList()[0];
898 if (!isExported(S->modulePath(), VI))
899 continue;
900
901 // It's been exported by a cross module import.
902 for (auto &SlotSummary : T.second) {
903 auto *TIdSum = Summary.getTypeIdSummary(SlotSummary.TypeID);
904 assert(TIdSum)(static_cast <bool> (TIdSum) ? void (0) : __assert_fail
("TIdSum", "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp",
904, __extension__ __PRETTY_FUNCTION__))
;
905 auto WPDRes = TIdSum->WPDRes.find(SlotSummary.ByteOffset);
906 assert(WPDRes != TIdSum->WPDRes.end())(static_cast <bool> (WPDRes != TIdSum->WPDRes.end())
? void (0) : __assert_fail ("WPDRes != TIdSum->WPDRes.end()"
, "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp", 906, __extension__
__PRETTY_FUNCTION__))
;
907 WPDRes->second.SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal(
908 WPDRes->second.SingleImplName,
909 Summary.getModuleHash(S->modulePath()));
910 }
911 }
912}
913
914} // end namespace llvm
915
916static Error checkCombinedSummaryForTesting(ModuleSummaryIndex *Summary) {
917 // Check that summary index contains regular LTO module when performing
918 // export to prevent occasional use of index from pure ThinLTO compilation
919 // (-fno-split-lto-module). This kind of summary index is passed to
920 // DevirtIndex::run, not to DevirtModule::run used by opt/runForTesting.
921 const auto &ModPaths = Summary->modulePaths();
922 if (ClSummaryAction != PassSummaryAction::Import &&
923 ModPaths.find(ModuleSummaryIndex::getRegularLTOModuleName()) ==
924 ModPaths.end())
925 return createStringError(
926 errc::invalid_argument,
927 "combined summary should contain Regular LTO module");
928 return ErrorSuccess();
929}
930
931bool DevirtModule::runForTesting(
932 Module &M, function_ref<AAResults &(Function &)> AARGetter,
933 function_ref<OptimizationRemarkEmitter &(Function *)> OREGetter,
934 function_ref<DominatorTree &(Function &)> LookupDomTree) {
935 std::unique_ptr<ModuleSummaryIndex> Summary =
936 std::make_unique<ModuleSummaryIndex>(/*HaveGVs=*/false);
937
938 // Handle the command-line summary arguments. This code is for testing
939 // purposes only, so we handle errors directly.
940 if (!ClReadSummary.empty()) {
941 ExitOnError ExitOnErr("-wholeprogramdevirt-read-summary: " + ClReadSummary +
942 ": ");
943 auto ReadSummaryFile =
944 ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary)));
945 if (Expected<std::unique_ptr<ModuleSummaryIndex>> SummaryOrErr =
946 getModuleSummaryIndex(*ReadSummaryFile)) {
947 Summary = std::move(*SummaryOrErr);
948 ExitOnErr(checkCombinedSummaryForTesting(Summary.get()));
949 } else {
950 // Try YAML if we've failed with bitcode.
951 consumeError(SummaryOrErr.takeError());
952 yaml::Input In(ReadSummaryFile->getBuffer());
953 In >> *Summary;
954 ExitOnErr(errorCodeToError(In.error()));
955 }
956 }
957
958 bool Changed =
959 DevirtModule(M, AARGetter, OREGetter, LookupDomTree,
960 ClSummaryAction == PassSummaryAction::Export ? Summary.get()
961 : nullptr,
962 ClSummaryAction == PassSummaryAction::Import ? Summary.get()
963 : nullptr)
964 .run();
965
966 if (!ClWriteSummary.empty()) {
967 ExitOnError ExitOnErr(
968 "-wholeprogramdevirt-write-summary: " + ClWriteSummary + ": ");
969 std::error_code EC;
970 if (StringRef(ClWriteSummary).endswith(".bc")) {
971 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_None);
972 ExitOnErr(errorCodeToError(EC));
973 WriteIndexToFile(*Summary, OS);
974 } else {
975 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF);
976 ExitOnErr(errorCodeToError(EC));
977 yaml::Output Out(OS);
978 Out << *Summary;
979 }
980 }
981
982 return Changed;
983}
984
985void DevirtModule::buildTypeIdentifierMap(
986 std::vector<VTableBits> &Bits,
987 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) {
988 DenseMap<GlobalVariable *, VTableBits *> GVToBits;
989 Bits.reserve(M.getGlobalList().size());
990 SmallVector<MDNode *, 2> Types;
991 for (GlobalVariable &GV : M.globals()) {
992 Types.clear();
993 GV.getMetadata(LLVMContext::MD_type, Types);
994 if (GV.isDeclaration() || Types.empty())
995 continue;
996
997 VTableBits *&BitsPtr = GVToBits[&GV];
998 if (!BitsPtr) {
999 Bits.emplace_back();
1000 Bits.back().GV = &GV;
1001 Bits.back().ObjectSize =
1002 M.getDataLayout().getTypeAllocSize(GV.getInitializer()->getType());
1003 BitsPtr = &Bits.back();
1004 }
1005
1006 for (MDNode *Type : Types) {
1007 auto TypeID = Type->getOperand(1).get();
1008
1009 uint64_t Offset =
1010 cast<ConstantInt>(
1011 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
1012 ->getZExtValue();
1013
1014 TypeIdMap[TypeID].insert({BitsPtr, Offset});
1015 }
1016 }
1017}
1018
1019bool DevirtModule::tryFindVirtualCallTargets(
1020 std::vector<VirtualCallTarget> &TargetsForSlot,
1021 const std::set<TypeMemberInfo> &TypeMemberInfos, uint64_t ByteOffset,
1022 ModuleSummaryIndex *ExportSummary) {
1023 for (const TypeMemberInfo &TM : TypeMemberInfos) {
1024 if (!TM.Bits->GV->isConstant())
1025 return false;
1026
1027 // We cannot perform whole program devirtualization analysis on a vtable
1028 // with public LTO visibility.
1029 if (TM.Bits->GV->getVCallVisibility() ==
1030 GlobalObject::VCallVisibilityPublic)
1031 return false;
1032
1033 Constant *Ptr = getPointerAtOffset(TM.Bits->GV->getInitializer(),
1034 TM.Offset + ByteOffset, M);
1035 if (!Ptr)
1036 return false;
1037
1038 auto Fn = dyn_cast<Function>(Ptr->stripPointerCasts());
1039 if (!Fn)
1040 return false;
1041
1042 if (FunctionsToSkip.match(Fn->getName()))
1043 return false;
1044
1045 // We can disregard __cxa_pure_virtual as a possible call target, as
1046 // calls to pure virtuals are UB.
1047 if (Fn->getName() == "__cxa_pure_virtual")
1048 continue;
1049
1050 // We can disregard unreachable functions as possible call targets, as
1051 // unreachable functions shouldn't be called.
1052 if (mustBeUnreachableFunction(Fn, ExportSummary))
1053 continue;
1054
1055 TargetsForSlot.push_back({Fn, &TM});
1056 }
1057
1058 // Give up if we couldn't find any targets.
1059 return !TargetsForSlot.empty();
23
Assuming the condition is true
24
Returning the value 1, which participates in a condition later
1060}
1061
1062bool DevirtIndex::tryFindVirtualCallTargets(
1063 std::vector<ValueInfo> &TargetsForSlot, const TypeIdCompatibleVtableInfo TIdInfo,
1064 uint64_t ByteOffset) {
1065 for (const TypeIdOffsetVtableInfo &P : TIdInfo) {
1066 // Find a representative copy of the vtable initializer.
1067 // We can have multiple available_externally, linkonce_odr and weak_odr
1068 // vtable initializers. We can also have multiple external vtable
1069 // initializers in the case of comdats, which we cannot check here.
1070 // The linker should give an error in this case.
1071 //
1072 // Also, handle the case of same-named local Vtables with the same path
1073 // and therefore the same GUID. This can happen if there isn't enough
1074 // distinguishing path when compiling the source file. In that case we
1075 // conservatively return false early.
1076 const GlobalVarSummary *VS = nullptr;
1077 bool LocalFound = false;
1078 for (auto &S : P.VTableVI.getSummaryList()) {
1079 if (GlobalValue::isLocalLinkage(S->linkage())) {
1080 if (LocalFound)
1081 return false;
1082 LocalFound = true;
1083 }
1084 auto *CurVS = cast<GlobalVarSummary>(S->getBaseObject());
1085 if (!CurVS->vTableFuncs().empty() ||
1086 // Previously clang did not attach the necessary type metadata to
1087 // available_externally vtables, in which case there would not
1088 // be any vtable functions listed in the summary and we need
1089 // to treat this case conservatively (in case the bitcode is old).
1090 // However, we will also not have any vtable functions in the
1091 // case of a pure virtual base class. In that case we do want
1092 // to set VS to avoid treating it conservatively.
1093 !GlobalValue::isAvailableExternallyLinkage(S->linkage())) {
1094 VS = CurVS;
1095 // We cannot perform whole program devirtualization analysis on a vtable
1096 // with public LTO visibility.
1097 if (VS->getVCallVisibility() == GlobalObject::VCallVisibilityPublic)
1098 return false;
1099 }
1100 }
1101 // There will be no VS if all copies are available_externally having no
1102 // type metadata. In that case we can't safely perform WPD.
1103 if (!VS)
1104 return false;
1105 if (!VS->isLive())
1106 continue;
1107 for (auto VTP : VS->vTableFuncs()) {
1108 if (VTP.VTableOffset != P.AddressPointOffset + ByteOffset)
1109 continue;
1110
1111 if (mustBeUnreachableFunction(VTP.FuncVI))
1112 continue;
1113
1114 TargetsForSlot.push_back(VTP.FuncVI);
1115 }
1116 }
1117
1118 // Give up if we couldn't find any targets.
1119 return !TargetsForSlot.empty();
1120}
1121
1122void DevirtModule::applySingleImplDevirt(VTableSlotInfo &SlotInfo,
1123 Constant *TheFn, bool &IsExported) {
1124 // Don't devirtualize function if we're told to skip it
1125 // in -wholeprogramdevirt-skip.
1126 if (FunctionsToSkip.match(TheFn->stripPointerCasts()->getName()))
32
Assuming the condition is false
33
Taking false branch
1127 return;
1128 auto Apply = [&](CallSiteInfo &CSInfo) {
1129 for (auto &&VCallSite : CSInfo.CallSites) {
1130 if (!OptimizedCalls.insert(&VCallSite.CB).second)
1131 continue;
1132
1133 if (RemarksEnabled)
1134 VCallSite.emitRemark("single-impl",
1135 TheFn->stripPointerCasts()->getName(), OREGetter);
1136 auto &CB = VCallSite.CB;
1137 assert(!CB.getCalledFunction() && "devirtualizing direct call?")(static_cast <bool> (!CB.getCalledFunction() &&
"devirtualizing direct call?") ? void (0) : __assert_fail ("!CB.getCalledFunction() && \"devirtualizing direct call?\""
, "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp", 1137, __extension__
__PRETTY_FUNCTION__))
;
1138 IRBuilder<> Builder(&CB);
1139 Value *Callee =
1140 Builder.CreateBitCast(TheFn, CB.getCalledOperand()->getType());
1141
1142 // If checking is enabled, add support to compare the virtual function
1143 // pointer to the devirtualized target. In case of a mismatch, perform a
1144 // debug trap.
1145 if (CheckDevirt) {
1146 auto *Cond = Builder.CreateICmpNE(CB.getCalledOperand(), Callee);
1147 Instruction *ThenTerm =
1148 SplitBlockAndInsertIfThen(Cond, &CB, /*Unreachable=*/false);
1149 Builder.SetInsertPoint(ThenTerm);
1150 Function *TrapFn = Intrinsic::getDeclaration(&M, Intrinsic::debugtrap);
1151 auto *CallTrap = Builder.CreateCall(TrapFn);
1152 CallTrap->setDebugLoc(CB.getDebugLoc());
1153 }
1154
1155 // Devirtualize.
1156 CB.setCalledOperand(Callee);
1157
1158 // This use is no longer unsafe.
1159 if (VCallSite.NumUnsafeUses)
1160 --*VCallSite.NumUnsafeUses;
1161 }
1162 if (CSInfo.isExported())
1163 IsExported = true;
1164 CSInfo.markDevirt();
1165 };
1166 Apply(SlotInfo.CSInfo);
1167 for (auto &P : SlotInfo.ConstCSInfo)
1168 Apply(P.second);
1169}
34
Returning without writing to 'IsExported', which participates in a condition later
1170
1171static bool AddCalls(VTableSlotInfo &SlotInfo, const ValueInfo &Callee) {
1172 // We can't add calls if we haven't seen a definition
1173 if (Callee.getSummaryList().empty())
1174 return false;
1175
1176 // Insert calls into the summary index so that the devirtualized targets
1177 // are eligible for import.
1178 // FIXME: Annotate type tests with hotness. For now, mark these as hot
1179 // to better ensure we have the opportunity to inline them.
1180 bool IsExported = false;
1181 auto &S = Callee.getSummaryList()[0];
1182 CalleeInfo CI(CalleeInfo::HotnessType::Hot, /* RelBF = */ 0);
1183 auto AddCalls = [&](CallSiteInfo &CSInfo) {
1184 for (auto *FS : CSInfo.SummaryTypeCheckedLoadUsers) {
1185 FS->addCall({Callee, CI});
1186 IsExported |= S->modulePath() != FS->modulePath();
1187 }
1188 for (auto *FS : CSInfo.SummaryTypeTestAssumeUsers) {
1189 FS->addCall({Callee, CI});
1190 IsExported |= S->modulePath() != FS->modulePath();
1191 }
1192 };
1193 AddCalls(SlotInfo.CSInfo);
1194 for (auto &P : SlotInfo.ConstCSInfo)
1195 AddCalls(P.second);
1196 return IsExported;
1197}
1198
1199bool DevirtModule::trySingleImplDevirt(
1200 ModuleSummaryIndex *ExportSummary,
1201 MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo,
1202 WholeProgramDevirtResolution *Res) {
1203 // See if the program contains a single implementation of this virtual
1204 // function.
1205 Function *TheFn = TargetsForSlot[0].Fn;
1206 for (auto &&Target : TargetsForSlot)
28
Assuming '__begin1' is equal to '__end1'
1207 if (TheFn != Target.Fn)
1208 return false;
1209
1210 // If so, update each call site to call that implementation directly.
1211 if (RemarksEnabled)
29
Assuming field 'RemarksEnabled' is false
30
Taking false branch
1212 TargetsForSlot[0].WasDevirt = true;
1213
1214 bool IsExported = false;
1215 applySingleImplDevirt(SlotInfo, TheFn, IsExported);
31
Calling 'DevirtModule::applySingleImplDevirt'
35
Returning from 'DevirtModule::applySingleImplDevirt'
1216 if (!IsExported
35.1
'IsExported' is false
35.1
'IsExported' is false
)
36
Taking true branch
1217 return false;
37
Returning zero, which participates in a condition later
1218
1219 // If the only implementation has local linkage, we must promote to external
1220 // to make it visible to thin LTO objects. We can only get here during the
1221 // ThinLTO export phase.
1222 if (TheFn->hasLocalLinkage()) {
1223 std::string NewName = (TheFn->getName() + ".llvm.merged").str();
1224
1225 // Since we are renaming the function, any comdats with the same name must
1226 // also be renamed. This is required when targeting COFF, as the comdat name
1227 // must match one of the names of the symbols in the comdat.
1228 if (Comdat *C = TheFn->getComdat()) {
1229 if (C->getName() == TheFn->getName()) {
1230 Comdat *NewC = M.getOrInsertComdat(NewName);
1231 NewC->setSelectionKind(C->getSelectionKind());
1232 for (GlobalObject &GO : M.global_objects())
1233 if (GO.getComdat() == C)
1234 GO.setComdat(NewC);
1235 }
1236 }
1237
1238 TheFn->setLinkage(GlobalValue::ExternalLinkage);
1239 TheFn->setVisibility(GlobalValue::HiddenVisibility);
1240 TheFn->setName(NewName);
1241 }
1242 if (ValueInfo TheFnVI = ExportSummary->getValueInfo(TheFn->getGUID()))
1243 // Any needed promotion of 'TheFn' has already been done during
1244 // LTO unit split, so we can ignore return value of AddCalls.
1245 AddCalls(SlotInfo, TheFnVI);
1246
1247 Res->TheKind = WholeProgramDevirtResolution::SingleImpl;
1248 Res->SingleImplName = std::string(TheFn->getName());
1249
1250 return true;
1251}
1252
1253bool DevirtIndex::trySingleImplDevirt(MutableArrayRef<ValueInfo> TargetsForSlot,
1254 VTableSlotSummary &SlotSummary,
1255 VTableSlotInfo &SlotInfo,
1256 WholeProgramDevirtResolution *Res,
1257 std::set<ValueInfo> &DevirtTargets) {
1258 // See if the program contains a single implementation of this virtual
1259 // function.
1260 auto TheFn = TargetsForSlot[0];
1261 for (auto &&Target : TargetsForSlot)
1262 if (TheFn != Target)
1263 return false;
1264
1265 // Don't devirtualize if we don't have target definition.
1266 auto Size = TheFn.getSummaryList().size();
1267 if (!Size)
1268 return false;
1269
1270 // Don't devirtualize function if we're told to skip it
1271 // in -wholeprogramdevirt-skip.
1272 if (FunctionsToSkip.match(TheFn.name()))
1273 return false;
1274
1275 // If the summary list contains multiple summaries where at least one is
1276 // a local, give up, as we won't know which (possibly promoted) name to use.
1277 for (auto &S : TheFn.getSummaryList())
1278 if (GlobalValue::isLocalLinkage(S->linkage()) && Size > 1)
1279 return false;
1280
1281 // Collect functions devirtualized at least for one call site for stats.
1282 if (PrintSummaryDevirt)
1283 DevirtTargets.insert(TheFn);
1284
1285 auto &S = TheFn.getSummaryList()[0];
1286 bool IsExported = AddCalls(SlotInfo, TheFn);
1287 if (IsExported)
1288 ExportedGUIDs.insert(TheFn.getGUID());
1289
1290 // Record in summary for use in devirtualization during the ThinLTO import
1291 // step.
1292 Res->TheKind = WholeProgramDevirtResolution::SingleImpl;
1293 if (GlobalValue::isLocalLinkage(S->linkage())) {
1294 if (IsExported)
1295 // If target is a local function and we are exporting it by
1296 // devirtualizing a call in another module, we need to record the
1297 // promoted name.
1298 Res->SingleImplName = ModuleSummaryIndex::getGlobalNameForLocal(
1299 TheFn.name(), ExportSummary.getModuleHash(S->modulePath()));
1300 else {
1301 LocalWPDTargetsMap[TheFn].push_back(SlotSummary);
1302 Res->SingleImplName = std::string(TheFn.name());
1303 }
1304 } else
1305 Res->SingleImplName = std::string(TheFn.name());
1306
1307 // Name will be empty if this thin link driven off of serialized combined
1308 // index (e.g. llvm-lto). However, WPD is not supported/invoked for the
1309 // legacy LTO API anyway.
1310 assert(!Res->SingleImplName.empty())(static_cast <bool> (!Res->SingleImplName.empty()) ?
void (0) : __assert_fail ("!Res->SingleImplName.empty()",
"llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp", 1310, __extension__
__PRETTY_FUNCTION__))
;
1311
1312 return true;
1313}
1314
1315void DevirtModule::tryICallBranchFunnel(
1316 MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo,
1317 WholeProgramDevirtResolution *Res, VTableSlot Slot) {
1318 Triple T(M.getTargetTriple());
1319 if (T.getArch() != Triple::x86_64)
47
Assuming the condition is false
48
Taking false branch
1320 return;
1321
1322 if (TargetsForSlot.size() > ClThreshold)
49
Assuming the condition is false
50
Taking false branch
1323 return;
1324
1325 bool HasNonDevirt = !SlotInfo.CSInfo.AllCallSitesDevirted;
51
Assuming field 'AllCallSitesDevirted' is true
1326 if (!HasNonDevirt
51.1
'HasNonDevirt' is false
51.1
'HasNonDevirt' is false
)
52
Taking true branch
1327 for (auto &P : SlotInfo.ConstCSInfo)
1328 if (!P.second.AllCallSitesDevirted) {
53
Assuming field 'AllCallSitesDevirted' is false
54
Taking true branch
1329 HasNonDevirt = true;
1330 break;
1331 }
1332
1333 if (!HasNonDevirt
55.1
'HasNonDevirt' is true
55.1
'HasNonDevirt' is true
)
55
Execution continues on line 1333
56
Taking false branch
1334 return;
1335
1336 FunctionType *FT =
1337 FunctionType::get(Type::getVoidTy(M.getContext()), {Int8PtrTy}, true);
1338 Function *JT;
1339 if (isa<MDString>(Slot.TypeID)) {
57
Assuming field 'TypeID' is a 'MDString'
58
Taking true branch
1340 JT = Function::Create(FT, Function::ExternalLinkage,
1341 M.getDataLayout().getProgramAddressSpace(),
1342 getGlobalName(Slot, {}, "branch_funnel"), &M);
1343 JT->setVisibility(GlobalValue::HiddenVisibility);
1344 } else {
1345 JT = Function::Create(FT, Function::InternalLinkage,
1346 M.getDataLayout().getProgramAddressSpace(),
1347 "branch_funnel", &M);
1348 }
1349 JT->addParamAttr(0, Attribute::Nest);
1350
1351 std::vector<Value *> JTArgs;
1352 JTArgs.push_back(JT->arg_begin());
1353 for (auto &T : TargetsForSlot) {
59
Assuming '__begin1' is equal to '__end1'
1354 JTArgs.push_back(getMemberAddr(T.TM));
1355 JTArgs.push_back(T.Fn);
1356 }
1357
1358 BasicBlock *BB = BasicBlock::Create(M.getContext(), "", JT, nullptr);
1359 Function *Intr =
1360 Intrinsic::getDeclaration(&M, llvm::Intrinsic::icall_branch_funnel, {});
1361
1362 auto *CI = CallInst::Create(Intr, JTArgs, "", BB);
1363 CI->setTailCallKind(CallInst::TCK_MustTail);
1364 ReturnInst::Create(M.getContext(), nullptr, BB);
1365
1366 bool IsExported = false;
1367 applyICallBranchFunnel(SlotInfo, JT, IsExported);
60
Calling 'DevirtModule::applyICallBranchFunnel'
71
Returning from 'DevirtModule::applyICallBranchFunnel'
1368 if (IsExported
71.1
'IsExported' is true
71.1
'IsExported' is true
)
72
Taking true branch
1369 Res->TheKind = WholeProgramDevirtResolution::BranchFunnel;
73
Access to field 'TheKind' results in a dereference of a null pointer (loaded from variable 'Res')
1370}
1371
1372void DevirtModule::applyICallBranchFunnel(VTableSlotInfo &SlotInfo,
1373 Constant *JT, bool &IsExported) {
1374 auto Apply = [&](CallSiteInfo &CSInfo) {
1375 if (CSInfo.isExported())
62
Calling 'CallSiteInfo::isExported'
65
Returning from 'CallSiteInfo::isExported'
66
Taking true branch
1376 IsExported = true;
67
The value 1 is assigned to 'IsExported', which participates in a condition later
1377 if (CSInfo.AllCallSitesDevirted)
68
Assuming field 'AllCallSitesDevirted' is true
69
Taking true branch
1378 return;
1379 for (auto &&VCallSite : CSInfo.CallSites) {
1380 CallBase &CB = VCallSite.CB;
1381
1382 // Jump tables are only profitable if the retpoline mitigation is enabled.
1383 Attribute FSAttr = CB.getCaller()->getFnAttribute("target-features");
1384 if (!FSAttr.isValid() ||
1385 !FSAttr.getValueAsString().contains("+retpoline"))
1386 continue;
1387
1388 if (RemarksEnabled)
1389 VCallSite.emitRemark("branch-funnel",
1390 JT->stripPointerCasts()->getName(), OREGetter);
1391
1392 // Pass the address of the vtable in the nest register, which is r10 on
1393 // x86_64.
1394 std::vector<Type *> NewArgs;
1395 NewArgs.push_back(Int8PtrTy);
1396 append_range(NewArgs, CB.getFunctionType()->params());
1397 FunctionType *NewFT =
1398 FunctionType::get(CB.getFunctionType()->getReturnType(), NewArgs,
1399 CB.getFunctionType()->isVarArg());
1400 PointerType *NewFTPtr = PointerType::getUnqual(NewFT);
1401
1402 IRBuilder<> IRB(&CB);
1403 std::vector<Value *> Args;
1404 Args.push_back(IRB.CreateBitCast(VCallSite.VTable, Int8PtrTy));
1405 llvm::append_range(Args, CB.args());
1406
1407 CallBase *NewCS = nullptr;
1408 if (isa<CallInst>(CB))
1409 NewCS = IRB.CreateCall(NewFT, IRB.CreateBitCast(JT, NewFTPtr), Args);
1410 else
1411 NewCS = IRB.CreateInvoke(NewFT, IRB.CreateBitCast(JT, NewFTPtr),
1412 cast<InvokeInst>(CB).getNormalDest(),
1413 cast<InvokeInst>(CB).getUnwindDest(), Args);
1414 NewCS->setCallingConv(CB.getCallingConv());
1415
1416 AttributeList Attrs = CB.getAttributes();
1417 std::vector<AttributeSet> NewArgAttrs;
1418 NewArgAttrs.push_back(AttributeSet::get(
1419 M.getContext(), ArrayRef<Attribute>{Attribute::get(
1420 M.getContext(), Attribute::Nest)}));
1421 for (unsigned I = 0; I + 2 < Attrs.getNumAttrSets(); ++I)
1422 NewArgAttrs.push_back(Attrs.getParamAttrs(I));
1423 NewCS->setAttributes(
1424 AttributeList::get(M.getContext(), Attrs.getFnAttrs(),
1425 Attrs.getRetAttrs(), NewArgAttrs));
1426
1427 CB.replaceAllUsesWith(NewCS);
1428 CB.eraseFromParent();
1429
1430 // This use is no longer unsafe.
1431 if (VCallSite.NumUnsafeUses)
1432 --*VCallSite.NumUnsafeUses;
1433 }
1434 // Don't mark as devirtualized because there may be callers compiled without
1435 // retpoline mitigation, which would mean that they are lowered to
1436 // llvm.type.test and therefore require an llvm.type.test resolution for the
1437 // type identifier.
1438 };
1439 Apply(SlotInfo.CSInfo);
61
Calling 'operator()'
70
Returning from 'operator()'
1440 for (auto &P : SlotInfo.ConstCSInfo)
1441 Apply(P.second);
1442}
1443
1444bool DevirtModule::tryEvaluateFunctionsWithArgs(
1445 MutableArrayRef<VirtualCallTarget> TargetsForSlot,
1446 ArrayRef<uint64_t> Args) {
1447 // Evaluate each function and store the result in each target's RetVal
1448 // field.
1449 for (VirtualCallTarget &Target : TargetsForSlot) {
1450 if (Target.Fn->arg_size() != Args.size() + 1)
1451 return false;
1452
1453 Evaluator Eval(M.getDataLayout(), nullptr);
1454 SmallVector<Constant *, 2> EvalArgs;
1455 EvalArgs.push_back(
1456 Constant::getNullValue(Target.Fn->getFunctionType()->getParamType(0)));
1457 for (unsigned I = 0; I != Args.size(); ++I) {
1458 auto *ArgTy = dyn_cast<IntegerType>(
1459 Target.Fn->getFunctionType()->getParamType(I + 1));
1460 if (!ArgTy)
1461 return false;
1462 EvalArgs.push_back(ConstantInt::get(ArgTy, Args[I]));
1463 }
1464
1465 Constant *RetVal;
1466 if (!Eval.EvaluateFunction(Target.Fn, RetVal, EvalArgs) ||
1467 !isa<ConstantInt>(RetVal))
1468 return false;
1469 Target.RetVal = cast<ConstantInt>(RetVal)->getZExtValue();
1470 }
1471 return true;
1472}
1473
1474void DevirtModule::applyUniformRetValOpt(CallSiteInfo &CSInfo, StringRef FnName,
1475 uint64_t TheRetVal) {
1476 for (auto Call : CSInfo.CallSites) {
1477 if (!OptimizedCalls.insert(&Call.CB).second)
1478 continue;
1479 Call.replaceAndErase(
1480 "uniform-ret-val", FnName, RemarksEnabled, OREGetter,
1481 ConstantInt::get(cast<IntegerType>(Call.CB.getType()), TheRetVal));
1482 }
1483 CSInfo.markDevirt();
1484}
1485
1486bool DevirtModule::tryUniformRetValOpt(
1487 MutableArrayRef<VirtualCallTarget> TargetsForSlot, CallSiteInfo &CSInfo,
1488 WholeProgramDevirtResolution::ByArg *Res) {
1489 // Uniform return value optimization. If all functions return the same
1490 // constant, replace all calls with that constant.
1491 uint64_t TheRetVal = TargetsForSlot[0].RetVal;
1492 for (const VirtualCallTarget &Target : TargetsForSlot)
1493 if (Target.RetVal != TheRetVal)
1494 return false;
1495
1496 if (CSInfo.isExported()) {
1497 Res->TheKind = WholeProgramDevirtResolution::ByArg::UniformRetVal;
1498 Res->Info = TheRetVal;
1499 }
1500
1501 applyUniformRetValOpt(CSInfo, TargetsForSlot[0].Fn->getName(), TheRetVal);
1502 if (RemarksEnabled)
1503 for (auto &&Target : TargetsForSlot)
1504 Target.WasDevirt = true;
1505 return true;
1506}
1507
1508std::string DevirtModule::getGlobalName(VTableSlot Slot,
1509 ArrayRef<uint64_t> Args,
1510 StringRef Name) {
1511 std::string FullName = "__typeid_";
1512 raw_string_ostream OS(FullName);
1513 OS << cast<MDString>(Slot.TypeID)->getString() << '_' << Slot.ByteOffset;
1514 for (uint64_t Arg : Args)
1515 OS << '_' << Arg;
1516 OS << '_' << Name;
1517 return OS.str();
1518}
1519
1520bool DevirtModule::shouldExportConstantsAsAbsoluteSymbols() {
1521 Triple T(M.getTargetTriple());
1522 return T.isX86() && T.getObjectFormat() == Triple::ELF;
1523}
1524
1525void DevirtModule::exportGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args,
1526 StringRef Name, Constant *C) {
1527 GlobalAlias *GA = GlobalAlias::create(Int8Ty, 0, GlobalValue::ExternalLinkage,
1528 getGlobalName(Slot, Args, Name), C, &M);
1529 GA->setVisibility(GlobalValue::HiddenVisibility);
1530}
1531
1532void DevirtModule::exportConstant(VTableSlot Slot, ArrayRef<uint64_t> Args,
1533 StringRef Name, uint32_t Const,
1534 uint32_t &Storage) {
1535 if (shouldExportConstantsAsAbsoluteSymbols()) {
1536 exportGlobal(
1537 Slot, Args, Name,
1538 ConstantExpr::getIntToPtr(ConstantInt::get(Int32Ty, Const), Int8PtrTy));
1539 return;
1540 }
1541
1542 Storage = Const;
1543}
1544
1545Constant *DevirtModule::importGlobal(VTableSlot Slot, ArrayRef<uint64_t> Args,
1546 StringRef Name) {
1547 Constant *C =
1548 M.getOrInsertGlobal(getGlobalName(Slot, Args, Name), Int8Arr0Ty);
1549 auto *GV = dyn_cast<GlobalVariable>(C);
1550 if (GV)
1551 GV->setVisibility(GlobalValue::HiddenVisibility);
1552 return C;
1553}
1554
1555Constant *DevirtModule::importConstant(VTableSlot Slot, ArrayRef<uint64_t> Args,
1556 StringRef Name, IntegerType *IntTy,
1557 uint32_t Storage) {
1558 if (!shouldExportConstantsAsAbsoluteSymbols())
1559 return ConstantInt::get(IntTy, Storage);
1560
1561 Constant *C = importGlobal(Slot, Args, Name);
1562 auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
1563 C = ConstantExpr::getPtrToInt(C, IntTy);
1564
1565 // We only need to set metadata if the global is newly created, in which
1566 // case it would not have hidden visibility.
1567 if (GV->hasMetadata(LLVMContext::MD_absolute_symbol))
1568 return C;
1569
1570 auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
1571 auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
1572 auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
1573 GV->setMetadata(LLVMContext::MD_absolute_symbol,
1574 MDNode::get(M.getContext(), {MinC, MaxC}));
1575 };
1576 unsigned AbsWidth = IntTy->getBitWidth();
1577 if (AbsWidth == IntPtrTy->getBitWidth())
1578 SetAbsRange(~0ull, ~0ull); // Full set.
1579 else
1580 SetAbsRange(0, 1ull << AbsWidth);
1581 return C;
1582}
1583
1584void DevirtModule::applyUniqueRetValOpt(CallSiteInfo &CSInfo, StringRef FnName,
1585 bool IsOne,
1586 Constant *UniqueMemberAddr) {
1587 for (auto &&Call : CSInfo.CallSites) {
1588 if (!OptimizedCalls.insert(&Call.CB).second)
1589 continue;
1590 IRBuilder<> B(&Call.CB);
1591 Value *Cmp =
1592 B.CreateICmp(IsOne ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, Call.VTable,
1593 B.CreateBitCast(UniqueMemberAddr, Call.VTable->getType()));
1594 Cmp = B.CreateZExt(Cmp, Call.CB.getType());
1595 Call.replaceAndErase("unique-ret-val", FnName, RemarksEnabled, OREGetter,
1596 Cmp);
1597 }
1598 CSInfo.markDevirt();
1599}
1600
1601Constant *DevirtModule::getMemberAddr(const TypeMemberInfo *M) {
1602 Constant *C = ConstantExpr::getBitCast(M->Bits->GV, Int8PtrTy);
1603 return ConstantExpr::getGetElementPtr(Int8Ty, C,
1604 ConstantInt::get(Int64Ty, M->Offset));
1605}
1606
1607bool DevirtModule::tryUniqueRetValOpt(
1608 unsigned BitWidth, MutableArrayRef<VirtualCallTarget> TargetsForSlot,
1609 CallSiteInfo &CSInfo, WholeProgramDevirtResolution::ByArg *Res,
1610 VTableSlot Slot, ArrayRef<uint64_t> Args) {
1611 // IsOne controls whether we look for a 0 or a 1.
1612 auto tryUniqueRetValOptFor = [&](bool IsOne) {
1613 const TypeMemberInfo *UniqueMember = nullptr;
1614 for (const VirtualCallTarget &Target : TargetsForSlot) {
1615 if (Target.RetVal == (IsOne ? 1 : 0)) {
1616 if (UniqueMember)
1617 return false;
1618 UniqueMember = Target.TM;
1619 }
1620 }
1621
1622 // We should have found a unique member or bailed out by now. We already
1623 // checked for a uniform return value in tryUniformRetValOpt.
1624 assert(UniqueMember)(static_cast <bool> (UniqueMember) ? void (0) : __assert_fail
("UniqueMember", "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 1624, __extension__ __PRETTY_FUNCTION__))
;
1625
1626 Constant *UniqueMemberAddr = getMemberAddr(UniqueMember);
1627 if (CSInfo.isExported()) {
1628 Res->TheKind = WholeProgramDevirtResolution::ByArg::UniqueRetVal;
1629 Res->Info = IsOne;
1630
1631 exportGlobal(Slot, Args, "unique_member", UniqueMemberAddr);
1632 }
1633
1634 // Replace each call with the comparison.
1635 applyUniqueRetValOpt(CSInfo, TargetsForSlot[0].Fn->getName(), IsOne,
1636 UniqueMemberAddr);
1637
1638 // Update devirtualization statistics for targets.
1639 if (RemarksEnabled)
1640 for (auto &&Target : TargetsForSlot)
1641 Target.WasDevirt = true;
1642
1643 return true;
1644 };
1645
1646 if (BitWidth == 1) {
1647 if (tryUniqueRetValOptFor(true))
1648 return true;
1649 if (tryUniqueRetValOptFor(false))
1650 return true;
1651 }
1652 return false;
1653}
1654
1655void DevirtModule::applyVirtualConstProp(CallSiteInfo &CSInfo, StringRef FnName,
1656 Constant *Byte, Constant *Bit) {
1657 for (auto Call : CSInfo.CallSites) {
1658 if (!OptimizedCalls.insert(&Call.CB).second)
1659 continue;
1660 auto *RetType = cast<IntegerType>(Call.CB.getType());
1661 IRBuilder<> B(&Call.CB);
1662 Value *Addr =
1663 B.CreateGEP(Int8Ty, B.CreateBitCast(Call.VTable, Int8PtrTy), Byte);
1664 if (RetType->getBitWidth() == 1) {
1665 Value *Bits = B.CreateLoad(Int8Ty, Addr);
1666 Value *BitsAndBit = B.CreateAnd(Bits, Bit);
1667 auto IsBitSet = B.CreateICmpNE(BitsAndBit, ConstantInt::get(Int8Ty, 0));
1668 Call.replaceAndErase("virtual-const-prop-1-bit", FnName, RemarksEnabled,
1669 OREGetter, IsBitSet);
1670 } else {
1671 Value *ValAddr = B.CreateBitCast(Addr, RetType->getPointerTo());
1672 Value *Val = B.CreateLoad(RetType, ValAddr);
1673 Call.replaceAndErase("virtual-const-prop", FnName, RemarksEnabled,
1674 OREGetter, Val);
1675 }
1676 }
1677 CSInfo.markDevirt();
1678}
1679
1680bool DevirtModule::tryVirtualConstProp(
1681 MutableArrayRef<VirtualCallTarget> TargetsForSlot, VTableSlotInfo &SlotInfo,
1682 WholeProgramDevirtResolution *Res, VTableSlot Slot) {
1683 // This only works if the function returns an integer.
1684 auto RetType = dyn_cast<IntegerType>(TargetsForSlot[0].Fn->getReturnType());
41
Assuming the object is not a 'IntegerType'
1685 if (!RetType
41.1
'RetType' is null
41.1
'RetType' is null
)
42
Taking true branch
1686 return false;
43
Returning without writing to 'SlotInfo.CSInfo.AllCallSitesDevirted', which participates in a condition later
1687 unsigned BitWidth = RetType->getBitWidth();
1688 if (BitWidth > 64)
1689 return false;
1690
1691 // Make sure that each function is defined, does not access memory, takes at
1692 // least one argument, does not use its first argument (which we assume is
1693 // 'this'), and has the same return type.
1694 //
1695 // Note that we test whether this copy of the function is readnone, rather
1696 // than testing function attributes, which must hold for any copy of the
1697 // function, even a less optimized version substituted at link time. This is
1698 // sound because the virtual constant propagation optimizations effectively
1699 // inline all implementations of the virtual function into each call site,
1700 // rather than using function attributes to perform local optimization.
1701 for (VirtualCallTarget &Target : TargetsForSlot) {
1702 if (Target.Fn->isDeclaration() ||
1703 computeFunctionBodyMemoryAccess(*Target.Fn, AARGetter(*Target.Fn)) !=
1704 MAK_ReadNone ||
1705 Target.Fn->arg_empty() || !Target.Fn->arg_begin()->use_empty() ||
1706 Target.Fn->getReturnType() != RetType)
1707 return false;
1708 }
1709
1710 for (auto &&CSByConstantArg : SlotInfo.ConstCSInfo) {
1711 if (!tryEvaluateFunctionsWithArgs(TargetsForSlot, CSByConstantArg.first))
1712 continue;
1713
1714 WholeProgramDevirtResolution::ByArg *ResByArg = nullptr;
1715 if (Res)
1716 ResByArg = &Res->ResByArg[CSByConstantArg.first];
1717
1718 if (tryUniformRetValOpt(TargetsForSlot, CSByConstantArg.second, ResByArg))
1719 continue;
1720
1721 if (tryUniqueRetValOpt(BitWidth, TargetsForSlot, CSByConstantArg.second,
1722 ResByArg, Slot, CSByConstantArg.first))
1723 continue;
1724
1725 // Find an allocation offset in bits in all vtables associated with the
1726 // type.
1727 uint64_t AllocBefore =
1728 findLowestOffset(TargetsForSlot, /*IsAfter=*/false, BitWidth);
1729 uint64_t AllocAfter =
1730 findLowestOffset(TargetsForSlot, /*IsAfter=*/true, BitWidth);
1731
1732 // Calculate the total amount of padding needed to store a value at both
1733 // ends of the object.
1734 uint64_t TotalPaddingBefore = 0, TotalPaddingAfter = 0;
1735 for (auto &&Target : TargetsForSlot) {
1736 TotalPaddingBefore += std::max<int64_t>(
1737 (AllocBefore + 7) / 8 - Target.allocatedBeforeBytes() - 1, 0);
1738 TotalPaddingAfter += std::max<int64_t>(
1739 (AllocAfter + 7) / 8 - Target.allocatedAfterBytes() - 1, 0);
1740 }
1741
1742 // If the amount of padding is too large, give up.
1743 // FIXME: do something smarter here.
1744 if (std::min(TotalPaddingBefore, TotalPaddingAfter) > 128)
1745 continue;
1746
1747 // Calculate the offset to the value as a (possibly negative) byte offset
1748 // and (if applicable) a bit offset, and store the values in the targets.
1749 int64_t OffsetByte;
1750 uint64_t OffsetBit;
1751 if (TotalPaddingBefore <= TotalPaddingAfter)
1752 setBeforeReturnValues(TargetsForSlot, AllocBefore, BitWidth, OffsetByte,
1753 OffsetBit);
1754 else
1755 setAfterReturnValues(TargetsForSlot, AllocAfter, BitWidth, OffsetByte,
1756 OffsetBit);
1757
1758 if (RemarksEnabled)
1759 for (auto &&Target : TargetsForSlot)
1760 Target.WasDevirt = true;
1761
1762
1763 if (CSByConstantArg.second.isExported()) {
1764 ResByArg->TheKind = WholeProgramDevirtResolution::ByArg::VirtualConstProp;
1765 exportConstant(Slot, CSByConstantArg.first, "byte", OffsetByte,
1766 ResByArg->Byte);
1767 exportConstant(Slot, CSByConstantArg.first, "bit", 1ULL << OffsetBit,
1768 ResByArg->Bit);
1769 }
1770
1771 // Rewrite each call to a load from OffsetByte/OffsetBit.
1772 Constant *ByteConst = ConstantInt::get(Int32Ty, OffsetByte);
1773 Constant *BitConst = ConstantInt::get(Int8Ty, 1ULL << OffsetBit);
1774 applyVirtualConstProp(CSByConstantArg.second,
1775 TargetsForSlot[0].Fn->getName(), ByteConst, BitConst);
1776 }
1777 return true;
1778}
1779
1780void DevirtModule::rebuildGlobal(VTableBits &B) {
1781 if (B.Before.Bytes.empty() && B.After.Bytes.empty())
1782 return;
1783
1784 // Align the before byte array to the global's minimum alignment so that we
1785 // don't break any alignment requirements on the global.
1786 Align Alignment = M.getDataLayout().getValueOrABITypeAlignment(
1787 B.GV->getAlign(), B.GV->getValueType());
1788 B.Before.Bytes.resize(alignTo(B.Before.Bytes.size(), Alignment));
1789
1790 // Before was stored in reverse order; flip it now.
1791 for (size_t I = 0, Size = B.Before.Bytes.size(); I != Size / 2; ++I)
1792 std::swap(B.Before.Bytes[I], B.Before.Bytes[Size - 1 - I]);
1793
1794 // Build an anonymous global containing the before bytes, followed by the
1795 // original initializer, followed by the after bytes.
1796 auto NewInit = ConstantStruct::getAnon(
1797 {ConstantDataArray::get(M.getContext(), B.Before.Bytes),
1798 B.GV->getInitializer(),
1799 ConstantDataArray::get(M.getContext(), B.After.Bytes)});
1800 auto NewGV =
1801 new GlobalVariable(M, NewInit->getType(), B.GV->isConstant(),
1802 GlobalVariable::PrivateLinkage, NewInit, "", B.GV);
1803 NewGV->setSection(B.GV->getSection());
1804 NewGV->setComdat(B.GV->getComdat());
1805 NewGV->setAlignment(B.GV->getAlign());
1806
1807 // Copy the original vtable's metadata to the anonymous global, adjusting
1808 // offsets as required.
1809 NewGV->copyMetadata(B.GV, B.Before.Bytes.size());
1810
1811 // Build an alias named after the original global, pointing at the second
1812 // element (the original initializer).
1813 auto Alias = GlobalAlias::create(
1814 B.GV->getInitializer()->getType(), 0, B.GV->getLinkage(), "",
1815 ConstantExpr::getGetElementPtr(
1816 NewInit->getType(), NewGV,
1817 ArrayRef<Constant *>{ConstantInt::get(Int32Ty, 0),
1818 ConstantInt::get(Int32Ty, 1)}),
1819 &M);
1820 Alias->setVisibility(B.GV->getVisibility());
1821 Alias->takeName(B.GV);
1822
1823 B.GV->replaceAllUsesWith(Alias);
1824 B.GV->eraseFromParent();
1825}
1826
1827bool DevirtModule::areRemarksEnabled() {
1828 const auto &FL = M.getFunctionList();
1829 for (const Function &Fn : FL) {
1830 const auto &BBL = Fn.getBasicBlockList();
1831 if (BBL.empty())
1832 continue;
1833 auto DI = OptimizationRemark(DEBUG_TYPE"wholeprogramdevirt", "", DebugLoc(), &BBL.front());
1834 return DI.isEnabled();
1835 }
1836 return false;
1837}
1838
1839void DevirtModule::scanTypeTestUsers(
1840 Function *TypeTestFunc,
1841 DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap) {
1842 // Find all virtual calls via a virtual table pointer %p under an assumption
1843 // of the form llvm.assume(llvm.type.test(%p, %md)). This indicates that %p
1844 // points to a member of the type identifier %md. Group calls by (type ID,
1845 // offset) pair (effectively the identity of the virtual function) and store
1846 // to CallSlots.
1847 for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses())) {
1848 auto *CI = dyn_cast<CallInst>(U.getUser());
1849 if (!CI)
1850 continue;
1851
1852 // Search for virtual calls based on %p and add them to DevirtCalls.
1853 SmallVector<DevirtCallSite, 1> DevirtCalls;
1854 SmallVector<CallInst *, 1> Assumes;
1855 auto &DT = LookupDomTree(*CI->getFunction());
1856 findDevirtualizableCallsForTypeTest(DevirtCalls, Assumes, CI, DT);
1857
1858 Metadata *TypeId =
1859 cast<MetadataAsValue>(CI->getArgOperand(1))->getMetadata();
1860 // If we found any, add them to CallSlots.
1861 if (!Assumes.empty()) {
1862 Value *Ptr = CI->getArgOperand(0)->stripPointerCasts();
1863 for (DevirtCallSite Call : DevirtCalls)
1864 CallSlots[{TypeId, Call.Offset}].addCallSite(Ptr, Call.CB, nullptr);
1865 }
1866
1867 auto RemoveTypeTestAssumes = [&]() {
1868 // We no longer need the assumes or the type test.
1869 for (auto Assume : Assumes)
1870 Assume->eraseFromParent();
1871 // We can't use RecursivelyDeleteTriviallyDeadInstructions here because we
1872 // may use the vtable argument later.
1873 if (CI->use_empty())
1874 CI->eraseFromParent();
1875 };
1876
1877 // At this point we could remove all type test assume sequences, as they
1878 // were originally inserted for WPD. However, we can keep these in the
1879 // code stream for later analysis (e.g. to help drive more efficient ICP
1880 // sequences). They will eventually be removed by a second LowerTypeTests
1881 // invocation that cleans them up. In order to do this correctly, the first
1882 // LowerTypeTests invocation needs to know that they have "Unknown" type
1883 // test resolution, so that they aren't treated as Unsat and lowered to
1884 // False, which will break any uses on assumes. Below we remove any type
1885 // test assumes that will not be treated as Unknown by LTT.
1886
1887 // The type test assumes will be treated by LTT as Unsat if the type id is
1888 // not used on a global (in which case it has no entry in the TypeIdMap).
1889 if (!TypeIdMap.count(TypeId))
1890 RemoveTypeTestAssumes();
1891
1892 // For ThinLTO importing, we need to remove the type test assumes if this is
1893 // an MDString type id without a corresponding TypeIdSummary. Any
1894 // non-MDString type ids are ignored and treated as Unknown by LTT, so their
1895 // type test assumes can be kept. If the MDString type id is missing a
1896 // TypeIdSummary (e.g. because there was no use on a vcall, preventing the
1897 // exporting phase of WPD from analyzing it), then it would be treated as
1898 // Unsat by LTT and we need to remove its type test assumes here. If not
1899 // used on a vcall we don't need them for later optimization use in any
1900 // case.
1901 else if (ImportSummary && isa<MDString>(TypeId)) {
1902 const TypeIdSummary *TidSummary =
1903 ImportSummary->getTypeIdSummary(cast<MDString>(TypeId)->getString());
1904 if (!TidSummary)
1905 RemoveTypeTestAssumes();
1906 else
1907 // If one was created it should not be Unsat, because if we reached here
1908 // the type id was used on a global.
1909 assert(TidSummary->TTRes.TheKind != TypeTestResolution::Unsat)(static_cast <bool> (TidSummary->TTRes.TheKind != TypeTestResolution
::Unsat) ? void (0) : __assert_fail ("TidSummary->TTRes.TheKind != TypeTestResolution::Unsat"
, "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp", 1909, __extension__
__PRETTY_FUNCTION__))
;
1910 }
1911 }
1912}
1913
1914void DevirtModule::scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc) {
1915 Function *TypeTestFunc = Intrinsic::getDeclaration(&M, Intrinsic::type_test);
1916
1917 for (Use &U : llvm::make_early_inc_range(TypeCheckedLoadFunc->uses())) {
1918 auto *CI = dyn_cast<CallInst>(U.getUser());
1919 if (!CI)
1920 continue;
1921
1922 Value *Ptr = CI->getArgOperand(0);
1923 Value *Offset = CI->getArgOperand(1);
1924 Value *TypeIdValue = CI->getArgOperand(2);
1925 Metadata *TypeId = cast<MetadataAsValue>(TypeIdValue)->getMetadata();
1926
1927 SmallVector<DevirtCallSite, 1> DevirtCalls;
1928 SmallVector<Instruction *, 1> LoadedPtrs;
1929 SmallVector<Instruction *, 1> Preds;
1930 bool HasNonCallUses = false;
1931 auto &DT = LookupDomTree(*CI->getFunction());
1932 findDevirtualizableCallsForTypeCheckedLoad(DevirtCalls, LoadedPtrs, Preds,
1933 HasNonCallUses, CI, DT);
1934
1935 // Start by generating "pessimistic" code that explicitly loads the function
1936 // pointer from the vtable and performs the type check. If possible, we will
1937 // eliminate the load and the type check later.
1938
1939 // If possible, only generate the load at the point where it is used.
1940 // This helps avoid unnecessary spills.
1941 IRBuilder<> LoadB(
1942 (LoadedPtrs.size() == 1 && !HasNonCallUses) ? LoadedPtrs[0] : CI);
1943 Value *GEP = LoadB.CreateGEP(Int8Ty, Ptr, Offset);
1944 Value *GEPPtr = LoadB.CreateBitCast(GEP, PointerType::getUnqual(Int8PtrTy));
1945 Value *LoadedValue = LoadB.CreateLoad(Int8PtrTy, GEPPtr);
1946
1947 for (Instruction *LoadedPtr : LoadedPtrs) {
1948 LoadedPtr->replaceAllUsesWith(LoadedValue);
1949 LoadedPtr->eraseFromParent();
1950 }
1951
1952 // Likewise for the type test.
1953 IRBuilder<> CallB((Preds.size() == 1 && !HasNonCallUses) ? Preds[0] : CI);
1954 CallInst *TypeTestCall = CallB.CreateCall(TypeTestFunc, {Ptr, TypeIdValue});
1955
1956 for (Instruction *Pred : Preds) {
1957 Pred->replaceAllUsesWith(TypeTestCall);
1958 Pred->eraseFromParent();
1959 }
1960
1961 // We have already erased any extractvalue instructions that refer to the
1962 // intrinsic call, but the intrinsic may have other non-extractvalue uses
1963 // (although this is unlikely). In that case, explicitly build a pair and
1964 // RAUW it.
1965 if (!CI->use_empty()) {
1966 Value *Pair = UndefValue::get(CI->getType());
1967 IRBuilder<> B(CI);
1968 Pair = B.CreateInsertValue(Pair, LoadedValue, {0});
1969 Pair = B.CreateInsertValue(Pair, TypeTestCall, {1});
1970 CI->replaceAllUsesWith(Pair);
1971 }
1972
1973 // The number of unsafe uses is initially the number of uses.
1974 auto &NumUnsafeUses = NumUnsafeUsesForTypeTest[TypeTestCall];
1975 NumUnsafeUses = DevirtCalls.size();
1976
1977 // If the function pointer has a non-call user, we cannot eliminate the type
1978 // check, as one of those users may eventually call the pointer. Increment
1979 // the unsafe use count to make sure it cannot reach zero.
1980 if (HasNonCallUses)
1981 ++NumUnsafeUses;
1982 for (DevirtCallSite Call : DevirtCalls) {
1983 CallSlots[{TypeId, Call.Offset}].addCallSite(Ptr, Call.CB,
1984 &NumUnsafeUses);
1985 }
1986
1987 CI->eraseFromParent();
1988 }
1989}
1990
1991void DevirtModule::importResolution(VTableSlot Slot, VTableSlotInfo &SlotInfo) {
1992 auto *TypeId = dyn_cast<MDString>(Slot.TypeID);
1993 if (!TypeId)
1994 return;
1995 const TypeIdSummary *TidSummary =
1996 ImportSummary->getTypeIdSummary(TypeId->getString());
1997 if (!TidSummary)
1998 return;
1999 auto ResI = TidSummary->WPDRes.find(Slot.ByteOffset);
2000 if (ResI == TidSummary->WPDRes.end())
2001 return;
2002 const WholeProgramDevirtResolution &Res = ResI->second;
2003
2004 if (Res.TheKind == WholeProgramDevirtResolution::SingleImpl) {
2005 assert(!Res.SingleImplName.empty())(static_cast <bool> (!Res.SingleImplName.empty()) ? void
(0) : __assert_fail ("!Res.SingleImplName.empty()", "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 2005, __extension__ __PRETTY_FUNCTION__))
;
2006 // The type of the function in the declaration is irrelevant because every
2007 // call site will cast it to the correct type.
2008 Constant *SingleImpl =
2009 cast<Constant>(M.getOrInsertFunction(Res.SingleImplName,
2010 Type::getVoidTy(M.getContext()))
2011 .getCallee());
2012
2013 // This is the import phase so we should not be exporting anything.
2014 bool IsExported = false;
2015 applySingleImplDevirt(SlotInfo, SingleImpl, IsExported);
2016 assert(!IsExported)(static_cast <bool> (!IsExported) ? void (0) : __assert_fail
("!IsExported", "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 2016, __extension__ __PRETTY_FUNCTION__))
;
2017 }
2018
2019 for (auto &CSByConstantArg : SlotInfo.ConstCSInfo) {
2020 auto I = Res.ResByArg.find(CSByConstantArg.first);
2021 if (I == Res.ResByArg.end())
2022 continue;
2023 auto &ResByArg = I->second;
2024 // FIXME: We should figure out what to do about the "function name" argument
2025 // to the apply* functions, as the function names are unavailable during the
2026 // importing phase. For now we just pass the empty string. This does not
2027 // impact correctness because the function names are just used for remarks.
2028 switch (ResByArg.TheKind) {
2029 case WholeProgramDevirtResolution::ByArg::UniformRetVal:
2030 applyUniformRetValOpt(CSByConstantArg.second, "", ResByArg.Info);
2031 break;
2032 case WholeProgramDevirtResolution::ByArg::UniqueRetVal: {
2033 Constant *UniqueMemberAddr =
2034 importGlobal(Slot, CSByConstantArg.first, "unique_member");
2035 applyUniqueRetValOpt(CSByConstantArg.second, "", ResByArg.Info,
2036 UniqueMemberAddr);
2037 break;
2038 }
2039 case WholeProgramDevirtResolution::ByArg::VirtualConstProp: {
2040 Constant *Byte = importConstant(Slot, CSByConstantArg.first, "byte",
2041 Int32Ty, ResByArg.Byte);
2042 Constant *Bit = importConstant(Slot, CSByConstantArg.first, "bit", Int8Ty,
2043 ResByArg.Bit);
2044 applyVirtualConstProp(CSByConstantArg.second, "", Byte, Bit);
2045 break;
2046 }
2047 default:
2048 break;
2049 }
2050 }
2051
2052 if (Res.TheKind == WholeProgramDevirtResolution::BranchFunnel) {
2053 // The type of the function is irrelevant, because it's bitcast at calls
2054 // anyhow.
2055 Constant *JT = cast<Constant>(
2056 M.getOrInsertFunction(getGlobalName(Slot, {}, "branch_funnel"),
2057 Type::getVoidTy(M.getContext()))
2058 .getCallee());
2059 bool IsExported = false;
2060 applyICallBranchFunnel(SlotInfo, JT, IsExported);
2061 assert(!IsExported)(static_cast <bool> (!IsExported) ? void (0) : __assert_fail
("!IsExported", "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 2061, __extension__ __PRETTY_FUNCTION__))
;
2062 }
2063}
2064
2065void DevirtModule::removeRedundantTypeTests() {
2066 auto True = ConstantInt::getTrue(M.getContext());
2067 for (auto &&U : NumUnsafeUsesForTypeTest) {
2068 if (U.second == 0) {
2069 U.first->replaceAllUsesWith(True);
2070 U.first->eraseFromParent();
2071 }
2072 }
2073}
2074
2075ValueInfo
2076DevirtModule::lookUpFunctionValueInfo(Function *TheFn,
2077 ModuleSummaryIndex *ExportSummary) {
2078 assert((ExportSummary != nullptr) &&(static_cast <bool> ((ExportSummary != nullptr) &&
"Caller guarantees ExportSummary is not nullptr") ? void (0)
: __assert_fail ("(ExportSummary != nullptr) && \"Caller guarantees ExportSummary is not nullptr\""
, "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp", 2079, __extension__
__PRETTY_FUNCTION__))
2079 "Caller guarantees ExportSummary is not nullptr")(static_cast <bool> ((ExportSummary != nullptr) &&
"Caller guarantees ExportSummary is not nullptr") ? void (0)
: __assert_fail ("(ExportSummary != nullptr) && \"Caller guarantees ExportSummary is not nullptr\""
, "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp", 2079, __extension__
__PRETTY_FUNCTION__))
;
2080
2081 const auto TheFnGUID = TheFn->getGUID();
2082 const auto TheFnGUIDWithExportedName = GlobalValue::getGUID(TheFn->getName());
2083 // Look up ValueInfo with the GUID in the current linkage.
2084 ValueInfo TheFnVI = ExportSummary->getValueInfo(TheFnGUID);
2085 // If no entry is found and GUID is different from GUID computed using
2086 // exported name, look up ValueInfo with the exported name unconditionally.
2087 // This is a fallback.
2088 //
2089 // The reason to have a fallback:
2090 // 1. LTO could enable global value internalization via
2091 // `enable-lto-internalization`.
2092 // 2. The GUID in ExportedSummary is computed using exported name.
2093 if ((!TheFnVI) && (TheFnGUID != TheFnGUIDWithExportedName)) {
2094 TheFnVI = ExportSummary->getValueInfo(TheFnGUIDWithExportedName);
2095 }
2096 return TheFnVI;
2097}
2098
2099bool DevirtModule::mustBeUnreachableFunction(
2100 Function *const F, ModuleSummaryIndex *ExportSummary) {
2101 // First, learn unreachability by analyzing function IR.
2102 if (!F->isDeclaration()) {
2103 // A function must be unreachable if its entry block ends with an
2104 // 'unreachable'.
2105 return isa<UnreachableInst>(F->getEntryBlock().getTerminator());
2106 }
2107 // Learn unreachability from ExportSummary if ExportSummary is present.
2108 return ExportSummary &&
2109 ::mustBeUnreachableFunction(
2110 DevirtModule::lookUpFunctionValueInfo(F, ExportSummary));
2111}
2112
2113bool DevirtModule::run() {
2114 // If only some of the modules were split, we cannot correctly perform
2115 // this transformation. We already checked for the presense of type tests
2116 // with partially split modules during the thin link, and would have emitted
2117 // an error if any were found, so here we can simply return.
2118 if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
1
Assuming field 'ExportSummary' is null
2119 (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
2
Assuming field 'ImportSummary' is null
2120 return false;
2121
2122 Function *TypeTestFunc =
2123 M.getFunction(Intrinsic::getName(Intrinsic::type_test));
2124 Function *TypeCheckedLoadFunc =
2125 M.getFunction(Intrinsic::getName(Intrinsic::type_checked_load));
2126 Function *AssumeFunc = M.getFunction(Intrinsic::getName(Intrinsic::assume));
2127
2128 // Normally if there are no users of the devirtualization intrinsics in the
2129 // module, this pass has nothing to do. But if we are exporting, we also need
2130 // to handle any users that appear only in the function summaries.
2131 if (!ExportSummary
2.1
Field 'ExportSummary' is null
2.1
Field 'ExportSummary' is null
&&
2132 (!TypeTestFunc || TypeTestFunc->use_empty() || !AssumeFunc ||
3
Assuming 'TypeTestFunc' is non-null
4
Calling 'Value::use_empty'
7
Returning from 'Value::use_empty'
8
Assuming 'AssumeFunc' is non-null
2133 AssumeFunc->use_empty()) &&
9
Calling 'Value::use_empty'
12
Returning from 'Value::use_empty'
2134 (!TypeCheckedLoadFunc || TypeCheckedLoadFunc->use_empty()))
2135 return false;
2136
2137 // Rebuild type metadata into a map for easy lookup.
2138 std::vector<VTableBits> Bits;
2139 DenseMap<Metadata *, std::set<TypeMemberInfo>> TypeIdMap;
2140 buildTypeIdentifierMap(Bits, TypeIdMap);
2141
2142 if (TypeTestFunc
12.1
'TypeTestFunc' is non-null
12.1
'TypeTestFunc' is non-null
&& AssumeFunc
12.2
'AssumeFunc' is non-null
12.2
'AssumeFunc' is non-null
)
13
Taking true branch
2143 scanTypeTestUsers(TypeTestFunc, TypeIdMap);
2144
2145 if (TypeCheckedLoadFunc)
14
Assuming 'TypeCheckedLoadFunc' is null
15
Taking false branch
2146 scanTypeCheckedLoadUsers(TypeCheckedLoadFunc);
2147
2148 if (ImportSummary
15.1
Field 'ImportSummary' is null
15.1
Field 'ImportSummary' is null
) {
16
Taking false branch
2149 for (auto &S : CallSlots)
2150 importResolution(S.first, S.second);
2151
2152 removeRedundantTypeTests();
2153
2154 // We have lowered or deleted the type instrinsics, so we will no
2155 // longer have enough information to reason about the liveness of virtual
2156 // function pointers in GlobalDCE.
2157 for (GlobalVariable &GV : M.globals())
2158 GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
2159
2160 // The rest of the code is only necessary when exporting or during regular
2161 // LTO, so we are done.
2162 return true;
2163 }
2164
2165 if (TypeIdMap.empty())
17
Assuming the condition is false
18
Taking false branch
2166 return true;
2167
2168 // Collect information from summary about which calls to try to devirtualize.
2169 if (ExportSummary
18.1
Field 'ExportSummary' is null
18.1
Field 'ExportSummary' is null
) {
19
Taking false branch
2170 DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
2171 for (auto &P : TypeIdMap) {
2172 if (auto *TypeId = dyn_cast<MDString>(P.first))
2173 MetadataByGUID[GlobalValue::getGUID(TypeId->getString())].push_back(
2174 TypeId);
2175 }
2176
2177 for (auto &P : *ExportSummary) {
2178 for (auto &S : P.second.SummaryList) {
2179 auto *FS = dyn_cast<FunctionSummary>(S.get());
2180 if (!FS)
2181 continue;
2182 // FIXME: Only add live functions.
2183 for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) {
2184 for (Metadata *MD : MetadataByGUID[VF.GUID]) {
2185 CallSlots[{MD, VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS);
2186 }
2187 }
2188 for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) {
2189 for (Metadata *MD : MetadataByGUID[VF.GUID]) {
2190 CallSlots[{MD, VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS);
2191 }
2192 }
2193 for (const FunctionSummary::ConstVCall &VC :
2194 FS->type_test_assume_const_vcalls()) {
2195 for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) {
2196 CallSlots[{MD, VC.VFunc.Offset}]
2197 .ConstCSInfo[VC.Args]
2198 .addSummaryTypeTestAssumeUser(FS);
2199 }
2200 }
2201 for (const FunctionSummary::ConstVCall &VC :
2202 FS->type_checked_load_const_vcalls()) {
2203 for (Metadata *MD : MetadataByGUID[VC.VFunc.GUID]) {
2204 CallSlots[{MD, VC.VFunc.Offset}]
2205 .ConstCSInfo[VC.Args]
2206 .addSummaryTypeCheckedLoadUser(FS);
2207 }
2208 }
2209 }
2210 }
2211 }
2212
2213 // For each (type, offset) pair:
2214 bool DidVirtualConstProp = false;
2215 std::map<std::string, Function*> DevirtTargets;
2216 for (auto &S : CallSlots) {
2217 // Search each of the members of the type identifier for the virtual
2218 // function implementation at offset S.first.ByteOffset, and add to
2219 // TargetsForSlot.
2220 std::vector<VirtualCallTarget> TargetsForSlot;
2221 WholeProgramDevirtResolution *Res = nullptr;
20
'Res' initialized to a null pointer value
2222 const std::set<TypeMemberInfo> &TypeMemberInfos = TypeIdMap[S.first.TypeID];
2223 if (ExportSummary && isa<MDString>(S.first.TypeID) &&
21
Assuming field 'ExportSummary' is null
2224 TypeMemberInfos.size())
2225 // For any type id used on a global's type metadata, create the type id
2226 // summary resolution regardless of whether we can devirtualize, so that
2227 // lower type tests knows the type id is not Unsat. If it was not used on
2228 // a global's type metadata, the TypeIdMap entry set will be empty, and
2229 // we don't want to create an entry (with the default Unknown type
2230 // resolution), which can prevent detection of the Unsat.
2231 Res = &ExportSummary
2232 ->getOrInsertTypeIdSummary(
2233 cast<MDString>(S.first.TypeID)->getString())
2234 .WPDRes[S.first.ByteOffset];
2235 if (tryFindVirtualCallTargets(TargetsForSlot, TypeMemberInfos,
22
Calling 'DevirtModule::tryFindVirtualCallTargets'
25
Returning from 'DevirtModule::tryFindVirtualCallTargets'
26
Taking true branch
2236 S.first.ByteOffset, ExportSummary)) {
2237
2238 if (!trySingleImplDevirt(ExportSummary, TargetsForSlot, S.second, Res)) {
27
Calling 'DevirtModule::trySingleImplDevirt'
38
Returning from 'DevirtModule::trySingleImplDevirt'
39
Taking true branch
2239 DidVirtualConstProp |=
2240 tryVirtualConstProp(TargetsForSlot, S.second, Res, S.first);
40
Calling 'DevirtModule::tryVirtualConstProp'
44
Returning from 'DevirtModule::tryVirtualConstProp'
2241
2242 tryICallBranchFunnel(TargetsForSlot, S.second, Res, S.first);
45
Passing null pointer value via 3rd parameter 'Res'
46
Calling 'DevirtModule::tryICallBranchFunnel'
2243 }
2244
2245 // Collect functions devirtualized at least for one call site for stats.
2246 if (RemarksEnabled)
2247 for (const auto &T : TargetsForSlot)
2248 if (T.WasDevirt)
2249 DevirtTargets[std::string(T.Fn->getName())] = T.Fn;
2250 }
2251
2252 // CFI-specific: if we are exporting and any llvm.type.checked.load
2253 // intrinsics were *not* devirtualized, we need to add the resulting
2254 // llvm.type.test intrinsics to the function summaries so that the
2255 // LowerTypeTests pass will export them.
2256 if (ExportSummary && isa<MDString>(S.first.TypeID)) {
2257 auto GUID =
2258 GlobalValue::getGUID(cast<MDString>(S.first.TypeID)->getString());
2259 for (auto FS : S.second.CSInfo.SummaryTypeCheckedLoadUsers)
2260 FS->addTypeTest(GUID);
2261 for (auto &CCS : S.second.ConstCSInfo)
2262 for (auto FS : CCS.second.SummaryTypeCheckedLoadUsers)
2263 FS->addTypeTest(GUID);
2264 }
2265 }
2266
2267 if (RemarksEnabled) {
2268 // Generate remarks for each devirtualized function.
2269 for (const auto &DT : DevirtTargets) {
2270 Function *F = DT.second;
2271
2272 using namespace ore;
2273 OREGetter(F).emit(OptimizationRemark(DEBUG_TYPE"wholeprogramdevirt", "Devirtualized", F)
2274 << "devirtualized "
2275 << NV("FunctionName", DT.first));
2276 }
2277 }
2278
2279 removeRedundantTypeTests();
2280
2281 // Rebuild each global we touched as part of virtual constant propagation to
2282 // include the before and after bytes.
2283 if (DidVirtualConstProp)
2284 for (VTableBits &B : Bits)
2285 rebuildGlobal(B);
2286
2287 // We have lowered or deleted the type instrinsics, so we will no
2288 // longer have enough information to reason about the liveness of virtual
2289 // function pointers in GlobalDCE.
2290 for (GlobalVariable &GV : M.globals())
2291 GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
2292
2293 return true;
2294}
2295
2296void DevirtIndex::run() {
2297 if (ExportSummary.typeIdCompatibleVtableMap().empty())
2298 return;
2299
2300 DenseMap<GlobalValue::GUID, std::vector<StringRef>> NameByGUID;
2301 for (auto &P : ExportSummary.typeIdCompatibleVtableMap()) {
2302 NameByGUID[GlobalValue::getGUID(P.first)].push_back(P.first);
2303 }
2304
2305 // Collect information from summary about which calls to try to devirtualize.
2306 for (auto &P : ExportSummary) {
2307 for (auto &S : P.second.SummaryList) {
2308 auto *FS = dyn_cast<FunctionSummary>(S.get());
2309 if (!FS)
2310 continue;
2311 // FIXME: Only add live functions.
2312 for (FunctionSummary::VFuncId VF : FS->type_test_assume_vcalls()) {
2313 for (StringRef Name : NameByGUID[VF.GUID]) {
2314 CallSlots[{Name, VF.Offset}].CSInfo.addSummaryTypeTestAssumeUser(FS);
2315 }
2316 }
2317 for (FunctionSummary::VFuncId VF : FS->type_checked_load_vcalls()) {
2318 for (StringRef Name : NameByGUID[VF.GUID]) {
2319 CallSlots[{Name, VF.Offset}].CSInfo.addSummaryTypeCheckedLoadUser(FS);
2320 }
2321 }
2322 for (const FunctionSummary::ConstVCall &VC :
2323 FS->type_test_assume_const_vcalls()) {
2324 for (StringRef Name : NameByGUID[VC.VFunc.GUID]) {
2325 CallSlots[{Name, VC.VFunc.Offset}]
2326 .ConstCSInfo[VC.Args]
2327 .addSummaryTypeTestAssumeUser(FS);
2328 }
2329 }
2330 for (const FunctionSummary::ConstVCall &VC :
2331 FS->type_checked_load_const_vcalls()) {
2332 for (StringRef Name : NameByGUID[VC.VFunc.GUID]) {
2333 CallSlots[{Name, VC.VFunc.Offset}]
2334 .ConstCSInfo[VC.Args]
2335 .addSummaryTypeCheckedLoadUser(FS);
2336 }
2337 }
2338 }
2339 }
2340
2341 std::set<ValueInfo> DevirtTargets;
2342 // For each (type, offset) pair:
2343 for (auto &S : CallSlots) {
2344 // Search each of the members of the type identifier for the virtual
2345 // function implementation at offset S.first.ByteOffset, and add to
2346 // TargetsForSlot.
2347 std::vector<ValueInfo> TargetsForSlot;
2348 auto TidSummary = ExportSummary.getTypeIdCompatibleVtableSummary(S.first.TypeID);
2349 assert(TidSummary)(static_cast <bool> (TidSummary) ? void (0) : __assert_fail
("TidSummary", "llvm/lib/Transforms/IPO/WholeProgramDevirt.cpp"
, 2349, __extension__ __PRETTY_FUNCTION__))
;
2350 // Create the type id summary resolution regardlness of whether we can
2351 // devirtualize, so that lower type tests knows the type id is used on
2352 // a global and not Unsat.
2353 WholeProgramDevirtResolution *Res =
2354 &ExportSummary.getOrInsertTypeIdSummary(S.first.TypeID)
2355 .WPDRes[S.first.ByteOffset];
2356 if (tryFindVirtualCallTargets(TargetsForSlot, *TidSummary,
2357 S.first.ByteOffset)) {
2358
2359 if (!trySingleImplDevirt(TargetsForSlot, S.first, S.second, Res,
2360 DevirtTargets))
2361 continue;
2362 }
2363 }
2364
2365 // Optionally have the thin link print message for each devirtualized
2366 // function.
2367 if (PrintSummaryDevirt)
2368 for (const auto &DT : DevirtTargets)
2369 errs() << "Devirtualized call to " << DT << "\n";
2370}

/build/llvm-toolchain-snapshot-14~++20220116100644+5f782d25a742/llvm/include/llvm/IR/Value.h

1//===- llvm/Value.h - Definition of the Value class -------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the Value class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_IR_VALUE_H
14#define LLVM_IR_VALUE_H
15
16#include "llvm-c/Types.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/StringRef.h"
19#include "llvm/ADT/iterator_range.h"
20#include "llvm/IR/Use.h"
21#include "llvm/Support/Alignment.h"
22#include "llvm/Support/CBindingWrapping.h"
23#include "llvm/Support/Casting.h"
24#include <cassert>
25#include <iterator>
26#include <memory>
27
28namespace llvm {
29
30class APInt;
31class Argument;
32class BasicBlock;
33class Constant;
34class ConstantData;
35class ConstantAggregate;
36class DataLayout;
37class Function;
38class GlobalAlias;
39class GlobalIFunc;
40class GlobalObject;
41class GlobalValue;
42class GlobalVariable;
43class InlineAsm;
44class Instruction;
45class LLVMContext;
46class MDNode;
47class Module;
48class ModuleSlotTracker;
49class raw_ostream;
50template<typename ValueTy> class StringMapEntry;
51class Twine;
52class Type;
53class User;
54
55using ValueName = StringMapEntry<Value *>;
56
57//===----------------------------------------------------------------------===//
58// Value Class
59//===----------------------------------------------------------------------===//
60
61/// LLVM Value Representation
62///
63/// This is a very important LLVM class. It is the base class of all values
64/// computed by a program that may be used as operands to other values. Value is
65/// the super class of other important classes such as Instruction and Function.
66/// All Values have a Type. Type is not a subclass of Value. Some values can
67/// have a name and they belong to some Module. Setting the name on the Value
68/// automatically updates the module's symbol table.
69///
70/// Every value has a "use list" that keeps track of which other Values are
71/// using this Value. A Value can also have an arbitrary number of ValueHandle
72/// objects that watch it and listen to RAUW and Destroy events. See
73/// llvm/IR/ValueHandle.h for details.
74class Value {
75 Type *VTy;
76 Use *UseList;
77
78 friend class ValueAsMetadata; // Allow access to IsUsedByMD.
79 friend class ValueHandleBase;
80
81 const unsigned char SubclassID; // Subclass identifier (for isa/dyn_cast)
82 unsigned char HasValueHandle : 1; // Has a ValueHandle pointing to this?
83
84protected:
85 /// Hold subclass data that can be dropped.
86 ///
87 /// This member is similar to SubclassData, however it is for holding
88 /// information which may be used to aid optimization, but which may be
89 /// cleared to zero without affecting conservative interpretation.
90 unsigned char SubclassOptionalData : 7;
91
92private:
93 /// Hold arbitrary subclass data.
94 ///
95 /// This member is defined by this class, but is not used for anything.
96 /// Subclasses can use it to hold whatever state they find useful. This
97 /// field is initialized to zero by the ctor.
98 unsigned short SubclassData;
99
100protected:
101 /// The number of operands in the subclass.
102 ///
103 /// This member is defined by this class, but not used for anything.
104 /// Subclasses can use it to store their number of operands, if they have
105 /// any.
106 ///
107 /// This is stored here to save space in User on 64-bit hosts. Since most
108 /// instances of Value have operands, 32-bit hosts aren't significantly
109 /// affected.
110 ///
111 /// Note, this should *NOT* be used directly by any class other than User.
112 /// User uses this value to find the Use list.
113 enum : unsigned { NumUserOperandsBits = 27 };
114 unsigned NumUserOperands : NumUserOperandsBits;
115
116 // Use the same type as the bitfield above so that MSVC will pack them.
117 unsigned IsUsedByMD : 1;
118 unsigned HasName : 1;
119 unsigned HasMetadata : 1; // Has metadata attached to this?
120 unsigned HasHungOffUses : 1;
121 unsigned HasDescriptor : 1;
122
123private:
124 template <typename UseT> // UseT == 'Use' or 'const Use'
125 class use_iterator_impl {
126 friend class Value;
127
128 UseT *U;
129
130 explicit use_iterator_impl(UseT *u) : U(u) {}
131
132 public:
133 using iterator_category = std::forward_iterator_tag;
134 using value_type = UseT *;
135 using difference_type = std::ptrdiff_t;
136 using pointer = value_type *;
137 using reference = value_type &;
138
139 use_iterator_impl() : U() {}
140
141 bool operator==(const use_iterator_impl &x) const { return U == x.U; }
142 bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }
143
144 use_iterator_impl &operator++() { // Preincrement
145 assert(U && "Cannot increment end iterator!")(static_cast <bool> (U && "Cannot increment end iterator!"
) ? void (0) : __assert_fail ("U && \"Cannot increment end iterator!\""
, "llvm/include/llvm/IR/Value.h", 145, __extension__ __PRETTY_FUNCTION__
))
;
146 U = U->getNext();
147 return *this;
148 }
149
150 use_iterator_impl operator++(int) { // Postincrement
151 auto tmp = *this;
152 ++*this;
153 return tmp;
154 }
155
156 UseT &operator*() const {
157 assert(U && "Cannot dereference end iterator!")(static_cast <bool> (U && "Cannot dereference end iterator!"
) ? void (0) : __assert_fail ("U && \"Cannot dereference end iterator!\""
, "llvm/include/llvm/IR/Value.h", 157, __extension__ __PRETTY_FUNCTION__
))
;
158 return *U;
159 }
160
161 UseT *operator->() const { return &operator*(); }
162
163 operator use_iterator_impl<const UseT>() const {
164 return use_iterator_impl<const UseT>(U);
165 }
166 };
167
168 template <typename UserTy> // UserTy == 'User' or 'const User'
169 class user_iterator_impl {
170 use_iterator_impl<Use> UI;
171 explicit user_iterator_impl(Use *U) : UI(U) {}
172 friend class Value;
173
174 public:
175 using iterator_category = std::forward_iterator_tag;
176 using value_type = UserTy *;
177 using difference_type = std::ptrdiff_t;
178 using pointer = value_type *;
179 using reference = value_type &;
180
181 user_iterator_impl() = default;
182
183 bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
184 bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
185
186 /// Returns true if this iterator is equal to user_end() on the value.
187 bool atEnd() const { return *this == user_iterator_impl(); }
188
189 user_iterator_impl &operator++() { // Preincrement
190 ++UI;
191 return *this;
192 }
193
194 user_iterator_impl operator++(int) { // Postincrement
195 auto tmp = *this;
196 ++*this;
197 return tmp;
198 }
199
200 // Retrieve a pointer to the current User.
201 UserTy *operator*() const {
202 return UI->getUser();
203 }
204
205 UserTy *operator->() const { return operator*(); }
206
207 operator user_iterator_impl<const UserTy>() const {
208 return user_iterator_impl<const UserTy>(*UI);
209 }
210
211 Use &getUse() const { return *UI; }
212 };
213
214protected:
215 Value(Type *Ty, unsigned scid);
216
217 /// Value's destructor should be virtual by design, but that would require
218 /// that Value and all of its subclasses have a vtable that effectively
219 /// duplicates the information in the value ID. As a size optimization, the
220 /// destructor has been protected, and the caller should manually call
221 /// deleteValue.
222 ~Value(); // Use deleteValue() to delete a generic Value.
223
224public:
225 Value(const Value &) = delete;
226 Value &operator=(const Value &) = delete;
227
228 /// Delete a pointer to a generic Value.
229 void deleteValue();
230
231 /// Support for debugging, callable in GDB: V->dump()
232 void dump() const;
233
234 /// Implement operator<< on Value.
235 /// @{
236 void print(raw_ostream &O, bool IsForDebug = false) const;
237 void print(raw_ostream &O, ModuleSlotTracker &MST,
238 bool IsForDebug = false) const;
239 /// @}
240
241 /// Print the name of this Value out to the specified raw_ostream.
242 ///
243 /// This is useful when you just want to print 'int %reg126', not the
244 /// instruction that generated it. If you specify a Module for context, then
245 /// even constanst get pretty-printed; for example, the type of a null
246 /// pointer is printed symbolically.
247 /// @{
248 void printAsOperand(raw_ostream &O, bool PrintType = true,
249 const Module *M = nullptr) const;
250 void printAsOperand(raw_ostream &O, bool PrintType,
251 ModuleSlotTracker &MST) const;
252 /// @}
253
254 /// All values are typed, get the type of this value.
255 Type *getType() const { return VTy; }
256
257 /// All values hold a context through their type.
258 LLVMContext &getContext() const;
259
260 // All values can potentially be named.
261 bool hasName() const { return HasName; }
262 ValueName *getValueName() const;
263 void setValueName(ValueName *VN);
264
265private:
266 void destroyValueName();
267 enum class ReplaceMetadataUses { No, Yes };
268 void doRAUW(Value *New, ReplaceMetadataUses);
269 void setNameImpl(const Twine &Name);
270
271public:
272 /// Return a constant reference to the value's name.
273 ///
274 /// This guaranteed to return the same reference as long as the value is not
275 /// modified. If the value has a name, this does a hashtable lookup, so it's
276 /// not free.
277 StringRef getName() const;
278
279 /// Change the name of the value.
280 ///
281 /// Choose a new unique name if the provided name is taken.
282 ///
283 /// \param Name The new name; or "" if the value's name should be removed.
284 void setName(const Twine &Name);
285
286 /// Transfer the name from V to this value.
287 ///
288 /// After taking V's name, sets V's name to empty.
289 ///
290 /// \note It is an error to call V->takeName(V).
291 void takeName(Value *V);
292
293#ifndef NDEBUG
294 std::string getNameOrAsOperand() const;
295#endif
296
297 /// Change all uses of this to point to a new Value.
298 ///
299 /// Go through the uses list for this definition and make each use point to
300 /// "V" instead of "this". After this completes, 'this's use list is
301 /// guaranteed to be empty.
302 void replaceAllUsesWith(Value *V);
303
304 /// Change non-metadata uses of this to point to a new Value.
305 ///
306 /// Go through the uses list for this definition and make each use point to
307 /// "V" instead of "this". This function skips metadata entries in the list.
308 void replaceNonMetadataUsesWith(Value *V);
309
310 /// Go through the uses list for this definition and make each use point
311 /// to "V" if the callback ShouldReplace returns true for the given Use.
312 /// Unlike replaceAllUsesWith() this function does not support basic block
313 /// values.
314 void replaceUsesWithIf(Value *New,
315 llvm::function_ref<bool(Use &U)> ShouldReplace);
316
317 /// replaceUsesOutsideBlock - Go through the uses list for this definition and
318 /// make each use point to "V" instead of "this" when the use is outside the
319 /// block. 'This's use list is expected to have at least one element.
320 /// Unlike replaceAllUsesWith() this function does not support basic block
321 /// values.
322 void replaceUsesOutsideBlock(Value *V, BasicBlock *BB);
323
324 //----------------------------------------------------------------------
325 // Methods for handling the chain of uses of this Value.
326 //
327 // Materializing a function can introduce new uses, so these methods come in
328 // two variants:
329 // The methods that start with materialized_ check the uses that are
330 // currently known given which functions are materialized. Be very careful
331 // when using them since you might not get all uses.
332 // The methods that don't start with materialized_ assert that modules is
333 // fully materialized.
334 void assertModuleIsMaterializedImpl() const;
335 // This indirection exists so we can keep assertModuleIsMaterializedImpl()
336 // around in release builds of Value.cpp to be linked with other code built
337 // in debug mode. But this avoids calling it in any of the release built code.
338 void assertModuleIsMaterialized() const {
339#ifndef NDEBUG
340 assertModuleIsMaterializedImpl();
341#endif
342 }
343
344 bool use_empty() const {
345 assertModuleIsMaterialized();
346 return UseList == nullptr;
5
Assuming the condition is false
6
Returning zero, which participates in a condition later
10
Assuming the condition is false
11
Returning zero, which participates in a condition later
347 }
348
349 bool materialized_use_empty() const {
350 return UseList == nullptr;
351 }
352
353 using use_iterator = use_iterator_impl<Use>;
354 using const_use_iterator = use_iterator_impl<const Use>;
355
356 use_iterator materialized_use_begin() { return use_iterator(UseList); }
357 const_use_iterator materialized_use_begin() const {
358 return const_use_iterator(UseList);
359 }
360 use_iterator use_begin() {
361 assertModuleIsMaterialized();
362 return materialized_use_begin();
363 }
364 const_use_iterator use_begin() const {
365 assertModuleIsMaterialized();
366 return materialized_use_begin();
367 }
368 use_iterator use_end() { return use_iterator(); }
369 const_use_iterator use_end() const { return const_use_iterator(); }
370 iterator_range<use_iterator> materialized_uses() {
371 return make_range(materialized_use_begin(), use_end());
372 }
373 iterator_range<const_use_iterator> materialized_uses() const {
374 return make_range(materialized_use_begin(), use_end());
375 }
376 iterator_range<use_iterator> uses() {
377 assertModuleIsMaterialized();
378 return materialized_uses();
379 }
380 iterator_range<const_use_iterator> uses() const {
381 assertModuleIsMaterialized();
382 return materialized_uses();
383 }
384
385 bool user_empty() const {
386 assertModuleIsMaterialized();
387 return UseList == nullptr;
388 }
389
390 using user_iterator = user_iterator_impl<User>;
391 using const_user_iterator = user_iterator_impl<const User>;
392
393 user_iterator materialized_user_begin() { return user_iterator(UseList); }
394 const_user_iterator materialized_user_begin() const {
395 return const_user_iterator(UseList);
396 }
397 user_iterator user_begin() {
398 assertModuleIsMaterialized();
399 return materialized_user_begin();
400 }
401 const_user_iterator user_begin() const {
402 assertModuleIsMaterialized();
403 return materialized_user_begin();
404 }
405 user_iterator user_end() { return user_iterator(); }
406 const_user_iterator user_end() const { return const_user_iterator(); }
407 User *user_back() {
408 assertModuleIsMaterialized();
409 return *materialized_user_begin();
410 }
411 const User *user_back() const {
412 assertModuleIsMaterialized();
413 return *materialized_user_begin();
414 }
415 iterator_range<user_iterator> materialized_users() {
416 return make_range(materialized_user_begin(), user_end());
417 }
418 iterator_range<const_user_iterator> materialized_users() const {
419 return make_range(materialized_user_begin(), user_end());
420 }
421 iterator_range<user_iterator> users() {
422 assertModuleIsMaterialized();
423 return materialized_users();
424 }
425 iterator_range<const_user_iterator> users() const {
426 assertModuleIsMaterialized();
427 return materialized_users();
428 }
429
430 /// Return true if there is exactly one use of this value.
431 ///
432 /// This is specialized because it is a common request and does not require
433 /// traversing the whole use list.
434 bool hasOneUse() const { return hasSingleElement(uses()); }
435
436 /// Return true if this Value has exactly N uses.
437 bool hasNUses(unsigned N) const;
438
439 /// Return true if this value has N uses or more.
440 ///
441 /// This is logically equivalent to getNumUses() >= N.
442 bool hasNUsesOrMore(unsigned N) const;
443
444 /// Return true if there is exactly one user of this value.
445 ///
446 /// Note that this is not the same as "has one use". If a value has one use,
447 /// then there certainly is a single user. But if value has several uses,
448 /// it is possible that all uses are in a single user, or not.
449 ///
450 /// This check is potentially costly, since it requires traversing,
451 /// in the worst case, the whole use list of a value.
452 bool hasOneUser() const;
453
454 /// Return true if there is exactly one use of this value that cannot be
455 /// dropped.
456 Use *getSingleUndroppableUse();
457 const Use *getSingleUndroppableUse() const {
458 return const_cast<Value *>(this)->getSingleUndroppableUse();
459 }
460
461 /// Return true if there is exactly one unique user of this value that cannot be
462 /// dropped (that user can have multiple uses of this value).
463 User *getUniqueUndroppableUser();
464 const User *getUniqueUndroppableUser() const {
465 return const_cast<Value *>(this)->getUniqueUndroppableUser();
466 }
467
468 /// Return true if there this value.
469 ///
470 /// This is specialized because it is a common request and does not require
471 /// traversing the whole use list.
472 bool hasNUndroppableUses(unsigned N) const;
473
474 /// Return true if this value has N uses or more.
475 ///
476 /// This is logically equivalent to getNumUses() >= N.
477 bool hasNUndroppableUsesOrMore(unsigned N) const;
478
479 /// Remove every uses that can safely be removed.
480 ///
481 /// This will remove for example uses in llvm.assume.
482 /// This should be used when performing want to perform a tranformation but
483 /// some Droppable uses pervent it.
484 /// This function optionally takes a filter to only remove some droppable
485 /// uses.
486 void dropDroppableUses(llvm::function_ref<bool(const Use *)> ShouldDrop =
487 [](const Use *) { return true; });
488
489 /// Remove every use of this value in \p User that can safely be removed.
490 void dropDroppableUsesIn(User &Usr);
491
492 /// Remove the droppable use \p U.
493 static void dropDroppableUse(Use &U);
494
495 /// Check if this value is used in the specified basic block.
496 bool isUsedInBasicBlock(const BasicBlock *BB) const;
497
498 /// This method computes the number of uses of this Value.
499 ///
500 /// This is a linear time operation. Use hasOneUse, hasNUses, or
501 /// hasNUsesOrMore to check for specific values.
502 unsigned getNumUses() const;
503
504 /// This method should only be used by the Use class.
505 void addUse(Use &U) { U.addToList(&UseList); }
506
507 /// Concrete subclass of this.
508 ///
509 /// An enumeration for keeping track of the concrete subclass of Value that
510 /// is actually instantiated. Values of this enumeration are kept in the
511 /// Value classes SubclassID field. They are used for concrete type
512 /// identification.
513 enum ValueTy {
514#define HANDLE_VALUE(Name) Name##Val,
515#include "llvm/IR/Value.def"
516
517 // Markers:
518#define HANDLE_CONSTANT_MARKER(Marker, Constant) Marker = Constant##Val,
519#include "llvm/IR/Value.def"
520 };
521
522 /// Return an ID for the concrete type of this object.
523 ///
524 /// This is used to implement the classof checks. This should not be used
525 /// for any other purpose, as the values may change as LLVM evolves. Also,
526 /// note that for instructions, the Instruction's opcode is added to
527 /// InstructionVal. So this means three things:
528 /// # there is no value with code InstructionVal (no opcode==0).
529 /// # there are more possible values for the value type than in ValueTy enum.
530 /// # the InstructionVal enumerator must be the highest valued enumerator in
531 /// the ValueTy enum.
532 unsigned getValueID() const {
533 return SubclassID;
534 }
535
536 /// Return the raw optional flags value contained in this value.
537 ///
538 /// This should only be used when testing two Values for equivalence.
539 unsigned getRawSubclassOptionalData() const {
540 return SubclassOptionalData;
541 }
542
543 /// Clear the optional flags contained in this value.
544 void clearSubclassOptionalData() {
545 SubclassOptionalData = 0;
546 }
547
548 /// Check the optional flags for equality.
549 bool hasSameSubclassOptionalData(const Value *V) const {
550 return SubclassOptionalData == V->SubclassOptionalData;
551 }
552
553 /// Return true if there is a value handle associated with this value.
554 bool hasValueHandle() const { return HasValueHandle; }
555
556 /// Return true if there is metadata referencing this value.
557 bool isUsedByMetadata() const { return IsUsedByMD; }
558
559 // Return true if this value is only transitively referenced by metadata.
560 bool isTransitiveUsedByMetadataOnly() const;
561
562protected:
563 /// Get the current metadata attachments for the given kind, if any.
564 ///
565 /// These functions require that the value have at most a single attachment
566 /// of the given kind, and return \c nullptr if such an attachment is missing.
567 /// @{
568 MDNode *getMetadata(unsigned KindID) const;
569 MDNode *getMetadata(StringRef Kind) const;
570 /// @}
571
572 /// Appends all attachments with the given ID to \c MDs in insertion order.
573 /// If the Value has no attachments with the given ID, or if ID is invalid,
574 /// leaves MDs unchanged.
575 /// @{
576 void getMetadata(unsigned KindID, SmallVectorImpl<MDNode *> &MDs) const;
577 void getMetadata(StringRef Kind, SmallVectorImpl<MDNode *> &MDs) const;
578 /// @}
579
580 /// Appends all metadata attached to this value to \c MDs, sorting by
581 /// KindID. The first element of each pair returned is the KindID, the second
582 /// element is the metadata value. Attachments with the same ID appear in
583 /// insertion order.
584 void
585 getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const;
586
587 /// Return true if this value has any metadata attached to it.
588 bool hasMetadata() const { return (bool)HasMetadata; }
589
590 /// Return true if this value has the given type of metadata attached.
591 /// @{
592 bool hasMetadata(unsigned KindID) const {
593 return getMetadata(KindID) != nullptr;
594 }
595 bool hasMetadata(StringRef Kind) const {
596 return getMetadata(Kind) != nullptr;
597 }
598 /// @}
599
600 /// Set a particular kind of metadata attachment.
601 ///
602 /// Sets the given attachment to \c MD, erasing it if \c MD is \c nullptr or
603 /// replacing it if it already exists.
604 /// @{
605 void setMetadata(unsigned KindID, MDNode *Node);
606 void setMetadata(StringRef Kind, MDNode *Node);
607 /// @}
608
609 /// Add a metadata attachment.
610 /// @{
611 void addMetadata(unsigned KindID, MDNode &MD);
612 void addMetadata(StringRef Kind, MDNode &MD);
613 /// @}
614
615 /// Erase all metadata attachments with the given kind.
616 ///
617 /// \returns true if any metadata was removed.
618 bool eraseMetadata(unsigned KindID);
619
620 /// Erase all metadata attached to this Value.
621 void clearMetadata();
622
623public:
624 /// Return true if this value is a swifterror value.
625 ///
626 /// swifterror values can be either a function argument or an alloca with a
627 /// swifterror attribute.
628 bool isSwiftError() const;
629
630 /// Strip off pointer casts, all-zero GEPs and address space casts.
631 ///
632 /// Returns the original uncasted value. If this is called on a non-pointer
633 /// value, it returns 'this'.
634 const Value *stripPointerCasts() const;
635 Value *stripPointerCasts() {
636 return const_cast<Value *>(
637 static_cast<const Value *>(this)->stripPointerCasts());
638 }
639
640 /// Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
641 ///
642 /// Returns the original uncasted value. If this is called on a non-pointer
643 /// value, it returns 'this'.
644 const Value *stripPointerCastsAndAliases() const;
645 Value *stripPointerCastsAndAliases() {
646 return const_cast<Value *>(
647 static_cast<const Value *>(this)->stripPointerCastsAndAliases());
648 }
649
650 /// Strip off pointer casts, all-zero GEPs and address space casts
651 /// but ensures the representation of the result stays the same.
652 ///
653 /// Returns the original uncasted value with the same representation. If this
654 /// is called on a non-pointer value, it returns 'this'.
655 const Value *stripPointerCastsSameRepresentation() const;
656 Value *stripPointerCastsSameRepresentation() {
657 return const_cast<Value *>(static_cast<const Value *>(this)
658 ->stripPointerCastsSameRepresentation());
659 }
660
661 /// Strip off pointer casts, all-zero GEPs, single-argument phi nodes and
662 /// invariant group info.
663 ///
664 /// Returns the original uncasted value. If this is called on a non-pointer
665 /// value, it returns 'this'. This function should be used only in
666 /// Alias analysis.
667 const Value *stripPointerCastsForAliasAnalysis() const;
668 Value *stripPointerCastsForAliasAnalysis() {
669 return const_cast<Value *>(static_cast<const Value *>(this)
670 ->stripPointerCastsForAliasAnalysis());
671 }
672
673 /// Strip off pointer casts and all-constant inbounds GEPs.
674 ///
675 /// Returns the original pointer value. If this is called on a non-pointer
676 /// value, it returns 'this'.
677 const Value *stripInBoundsConstantOffsets() const;
678 Value *stripInBoundsConstantOffsets() {
679 return const_cast<Value *>(
680 static_cast<const Value *>(this)->stripInBoundsConstantOffsets());
681 }
682
683 /// Accumulate the constant offset this value has compared to a base pointer.
684 /// Only 'getelementptr' instructions (GEPs) are accumulated but other
685 /// instructions, e.g., casts, are stripped away as well.
686 /// The accumulated constant offset is added to \p Offset and the base
687 /// pointer is returned.
688 ///
689 /// The APInt \p Offset has to have a bit-width equal to the IntPtr type for
690 /// the address space of 'this' pointer value, e.g., use
691 /// DataLayout::getIndexTypeSizeInBits(Ty).
692 ///
693 /// If \p AllowNonInbounds is true, offsets in GEPs are stripped and
694 /// accumulated even if the GEP is not "inbounds".
695 ///
696 /// If \p AllowInvariantGroup is true then this method also looks through
697 /// strip.invariant.group and launder.invariant.group intrinsics.
698 ///
699 /// If \p ExternalAnalysis is provided it will be used to calculate a offset
700 /// when a operand of GEP is not constant.
701 /// For example, for a value \p ExternalAnalysis might try to calculate a
702 /// lower bound. If \p ExternalAnalysis is successful, it should return true.
703 ///
704 /// If this is called on a non-pointer value, it returns 'this' and the
705 /// \p Offset is not modified.
706 ///
707 /// Note that this function will never return a nullptr. It will also never
708 /// manipulate the \p Offset in a way that would not match the difference
709 /// between the underlying value and the returned one. Thus, if no constant
710 /// offset was found, the returned value is the underlying one and \p Offset
711 /// is unchanged.
712 const Value *stripAndAccumulateConstantOffsets(
713 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
714 bool AllowInvariantGroup = false,
715 function_ref<bool(Value &Value, APInt &Offset)> ExternalAnalysis =
716 nullptr) const;
717 Value *stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset,
718 bool AllowNonInbounds,
719 bool AllowInvariantGroup = false) {
720 return const_cast<Value *>(
721 static_cast<const Value *>(this)->stripAndAccumulateConstantOffsets(
722 DL, Offset, AllowNonInbounds, AllowInvariantGroup));
723 }
724
725 /// This is a wrapper around stripAndAccumulateConstantOffsets with the
726 /// in-bounds requirement set to false.
727 const Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
728 APInt &Offset) const {
729 return stripAndAccumulateConstantOffsets(DL, Offset,
730 /* AllowNonInbounds */ false);
731 }
732 Value *stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL,
733 APInt &Offset) {
734 return stripAndAccumulateConstantOffsets(DL, Offset,
735 /* AllowNonInbounds */ false);
736 }
737
738 /// Strip off pointer casts and inbounds GEPs.
739 ///
740 /// Returns the original pointer value. If this is called on a non-pointer
741 /// value, it returns 'this'.
742 const Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
743 [](const Value *) {}) const;
744 inline Value *stripInBoundsOffsets(function_ref<void(const Value *)> Func =
745 [](const Value *) {}) {
746 return const_cast<Value *>(
747 static_cast<const Value *>(this)->stripInBoundsOffsets(Func));
748 }
749
750 /// Return true if the memory object referred to by V can by freed in the
751 /// scope for which the SSA value defining the allocation is statically
752 /// defined. E.g. deallocation after the static scope of a value does not
753 /// count, but a deallocation before that does.
754 bool canBeFreed() const;
755
756 /// Returns the number of bytes known to be dereferenceable for the
757 /// pointer value.
758 ///
759 /// If CanBeNull is set by this function the pointer can either be null or be
760 /// dereferenceable up to the returned number of bytes.
761 ///
762 /// IF CanBeFreed is true, the pointer is known to be dereferenceable at
763 /// point of definition only. Caller must prove that allocation is not
764 /// deallocated between point of definition and use.
765 uint64_t getPointerDereferenceableBytes(const DataLayout &DL,
766 bool &CanBeNull,
767 bool &CanBeFreed) const;
768
769 /// Returns an alignment of the pointer value.
770 ///
771 /// Returns an alignment which is either specified explicitly, e.g. via
772 /// align attribute of a function argument, or guaranteed by DataLayout.
773 Align getPointerAlignment(const DataLayout &DL) const;
774
775 /// Translate PHI node to its predecessor from the given basic block.
776 ///
777 /// If this value is a PHI node with CurBB as its parent, return the value in
778 /// the PHI node corresponding to PredBB. If not, return ourself. This is
779 /// useful if you want to know the value something has in a predecessor
780 /// block.
781 const Value *DoPHITranslation(const BasicBlock *CurBB,
782 const BasicBlock *PredBB) const;
783 Value *DoPHITranslation(const BasicBlock *CurBB, const BasicBlock *PredBB) {
784 return const_cast<Value *>(
785 static_cast<const Value *>(this)->DoPHITranslation(CurBB, PredBB));
786 }
787
788 /// The maximum alignment for instructions.
789 ///
790 /// This is the greatest alignment value supported by load, store, and alloca
791 /// instructions, and global values.
792 static constexpr unsigned MaxAlignmentExponent = 32;
793 static constexpr uint64_t MaximumAlignment = 1ULL << MaxAlignmentExponent;
794
795 /// Mutate the type of this Value to be of the specified type.
796 ///
797 /// Note that this is an extremely dangerous operation which can create
798 /// completely invalid IR very easily. It is strongly recommended that you
799 /// recreate IR objects with the right types instead of mutating them in
800 /// place.
801 void mutateType(Type *Ty) {
802 VTy = Ty;
803 }
804
805 /// Sort the use-list.
806 ///
807 /// Sorts the Value's use-list by Cmp using a stable mergesort. Cmp is
808 /// expected to compare two \a Use references.
809 template <class Compare> void sortUseList(Compare Cmp);
810
811 /// Reverse the use-list.
812 void reverseUseList();
813
814private:
815 /// Merge two lists together.
816 ///
817 /// Merges \c L and \c R using \c Cmp. To enable stable sorts, always pushes
818 /// "equal" items from L before items from R.
819 ///
820 /// \return the first element in the list.
821 ///
822 /// \note Completely ignores \a Use::Prev (doesn't read, doesn't update).
823 template <class Compare>
824 static Use *mergeUseLists(Use *L, Use *R, Compare Cmp) {
825 Use *Merged;
826 Use **Next = &Merged;
827
828 while (true) {
829 if (!L) {
830 *Next = R;
831 break;
832 }
833 if (!R) {
834 *Next = L;
835 break;
836 }
837 if (Cmp(*R, *L)) {
838 *Next = R;
839 Next = &R->Next;
840 R = R->Next;
841 } else {
842 *Next = L;
843 Next = &L->Next;
844 L = L->Next;
845 }
846 }
847
848 return Merged;
849 }
850
851protected:
852 unsigned short getSubclassDataFromValue() const { return SubclassData; }
853 void setValueSubclassData(unsigned short D) { SubclassData = D; }
854};
855
856struct ValueDeleter { void operator()(Value *V) { V->deleteValue(); } };
857
858/// Use this instead of std::unique_ptr<Value> or std::unique_ptr<Instruction>.
859/// Those don't work because Value and Instruction's destructors are protected,
860/// aren't virtual, and won't destroy the complete object.
861using unique_value = std::unique_ptr<Value, ValueDeleter>;
862
863inline raw_ostream &operator<<(raw_ostream &OS, const Value &V) {
864 V.print(OS);
865 return OS;
866}
867
868void Use::set(Value *V) {
869 if (Val) removeFromList();
870 Val = V;
871 if (V) V->addUse(*this);
872}
873
874Value *Use::operator=(Value *RHS) {
875 set(RHS);
876 return RHS;
877}
878
879const Use &Use::operator=(const Use &RHS) {
880 set(RHS.Val);
881 return *this;
882}
883
884template <class Compare> void Value::sortUseList(Compare Cmp) {
885 if (!UseList || !UseList->Next)
886 // No need to sort 0 or 1 uses.
887 return;
888
889 // Note: this function completely ignores Prev pointers until the end when
890 // they're fixed en masse.
891
892 // Create a binomial vector of sorted lists, visiting uses one at a time and
893 // merging lists as necessary.
894 const unsigned MaxSlots = 32;
895 Use *Slots[MaxSlots];
896
897 // Collect the first use, turning it into a single-item list.
898 Use *Next = UseList->Next;
899 UseList->Next = nullptr;
900 unsigned NumSlots = 1;
901 Slots[0] = UseList;
902
903 // Collect all but the last use.
904 while (Next->Next) {
905 Use *Current = Next;
906 Next = Current->Next;
907
908 // Turn Current into a single-item list.
909 Current->Next = nullptr;
910
911 // Save Current in the first available slot, merging on collisions.
912 unsigned I;
913 for (I = 0; I < NumSlots; ++I) {
914 if (!Slots[I])
915 break;
916
917 // Merge two lists, doubling the size of Current and emptying slot I.
918 //
919 // Since the uses in Slots[I] originally preceded those in Current, send
920 // Slots[I] in as the left parameter to maintain a stable sort.
921 Current = mergeUseLists(Slots[I], Current, Cmp);
922 Slots[I] = nullptr;
923 }
924 // Check if this is a new slot.
925 if (I == NumSlots) {
926 ++NumSlots;
927 assert(NumSlots <= MaxSlots && "Use list bigger than 2^32")(static_cast <bool> (NumSlots <= MaxSlots &&
"Use list bigger than 2^32") ? void (0) : __assert_fail ("NumSlots <= MaxSlots && \"Use list bigger than 2^32\""
, "llvm/include/llvm/IR/Value.h", 927, __extension__ __PRETTY_FUNCTION__
))
;
928 }
929
930 // Found an open slot.
931 Slots[I] = Current;
932 }
933
934 // Merge all the lists together.
935 assert(Next && "Expected one more Use")(static_cast <bool> (Next && "Expected one more Use"
) ? void (0) : __assert_fail ("Next && \"Expected one more Use\""
, "llvm/include/llvm/IR/Value.h", 935, __extension__ __PRETTY_FUNCTION__
))
;
936 assert(!Next->Next && "Expected only one Use")(static_cast <bool> (!Next->Next && "Expected only one Use"
) ? void (0) : __assert_fail ("!Next->Next && \"Expected only one Use\""
, "llvm/include/llvm/IR/Value.h", 936, __extension__ __PRETTY_FUNCTION__
))
;
937 UseList = Next;
938 for (unsigned I = 0; I < NumSlots; ++I)
939 if (Slots[I])
940 // Since the uses in Slots[I] originally preceded those in UseList, send
941 // Slots[I] in as the left parameter to maintain a stable sort.
942 UseList = mergeUseLists(Slots[I], UseList, Cmp);
943
944 // Fix the Prev pointers.
945 for (Use *I = UseList, **Prev = &UseList; I; I = I->Next) {
946 I->Prev = Prev;
947 Prev = &I->Next;
948 }
949}
950
951// isa - Provide some specializations of isa so that we don't have to include
952// the subtype header files to test to see if the value is a subclass...
953//
954template <> struct isa_impl<Constant, Value> {
955 static inline bool doit(const Value &Val) {
956 static_assert(Value::ConstantFirstVal == 0, "Val.getValueID() >= Value::ConstantFirstVal");
957 return Val.getValueID() <= Value::ConstantLastVal;
958 }
959};
960
961template <> struct isa_impl<ConstantData, Value> {
962 static inline bool doit(const Value &Val) {
963 return Val.getValueID() >= Value::ConstantDataFirstVal &&
964 Val.getValueID() <= Value::ConstantDataLastVal;
965 }
966};
967
968template <> struct isa_impl<ConstantAggregate, Value> {
969 static inline bool doit(const Value &Val) {
970 return Val.getValueID() >= Value::ConstantAggregateFirstVal &&
971 Val.getValueID() <= Value::ConstantAggregateLastVal;
972 }
973};
974
975template <> struct isa_impl<Argument, Value> {
976 static inline bool doit (const Value &Val) {
977 return Val.getValueID() == Value::ArgumentVal;
978 }
979};
980
981template <> struct isa_impl<InlineAsm, Value> {
982 static inline bool doit(const Value &Val) {
983 return Val.getValueID() == Value::InlineAsmVal;
984 }
985};
986
987template <> struct isa_impl<Instruction, Value> {
988 static inline bool doit(const Value &Val) {
989 return Val.getValueID() >= Value::InstructionVal;
990 }
991};
992
993template <> struct isa_impl<BasicBlock, Value> {
994 static inline bool doit(const Value &Val) {
995 return Val.getValueID() == Value::BasicBlockVal;
996 }
997};
998
999template <> struct isa_impl<Function, Value> {
1000 static inline bool doit(const Value &Val) {
1001 return Val.getValueID() == Value::FunctionVal;
1002 }
1003};
1004
1005template <> struct isa_impl<GlobalVariable, Value> {
1006 static inline bool doit(const Value &Val) {
1007 return Val.getValueID() == Value::GlobalVariableVal;
1008 }
1009};
1010
1011template <> struct isa_impl<GlobalAlias, Value> {
1012 static inline bool doit(const Value &Val) {
1013 return Val.getValueID() == Value::GlobalAliasVal;
1014 }
1015};
1016
1017template <> struct isa_impl<GlobalIFunc, Value> {
1018 static inline bool doit(const Value &Val) {
1019 return Val.getValueID() == Value::GlobalIFuncVal;
1020 }
1021};
1022
1023template <> struct isa_impl<GlobalValue, Value> {
1024 static inline bool doit(const Value &Val) {
1025 return isa<GlobalObject>(Val) || isa<GlobalAlias>(Val);
1026 }
1027};
1028
1029template <> struct isa_impl<GlobalObject, Value> {
1030 static inline bool doit(const Value &Val) {
1031 return isa<GlobalVariable>(Val) || isa<Function>(Val) ||
1032 isa<GlobalIFunc>(Val);
1033 }
1034};
1035
1036// Create wrappers for C Binding types (see CBindingWrapping.h).
1037DEFINE_ISA_CONVERSION_FUNCTIONS(Value, LLVMValueRef)inline Value *unwrap(LLVMValueRef P) { return reinterpret_cast
<Value*>(P); } inline LLVMValueRef wrap(const Value *P)
{ return reinterpret_cast<LLVMValueRef>(const_cast<
Value*>(P)); } template<typename T> inline T *unwrap
(LLVMValueRef P) { return cast<T>(unwrap(P)); }
1038
1039// Specialized opaque value conversions.
1040inline Value **unwrap(LLVMValueRef *Vals) {
1041 return reinterpret_cast<Value**>(Vals);
1042}
1043
1044template<typename T>
1045inline T **unwrap(LLVMValueRef *Vals, unsigned Length) {
1046#ifndef NDEBUG
1047 for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
1048 unwrap<T>(*I); // For side effect of calling assert on invalid usage.
1049#endif
1050 (void)Length;
1051 return reinterpret_cast<T**>(Vals);
1052}
1053
1054inline LLVMValueRef *wrap(const Value **Vals) {
1055 return reinterpret_cast<LLVMValueRef*>(const_cast<Value**>(Vals));
1056}
1057
1058} // end namespace llvm
1059
1060#endif // LLVM_IR_VALUE_H