Bug Summary

File:lib/Transforms/IPO/LowerTypeTests.cpp
Warning:line 1455, column 9
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name LowerTypeTests.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-9/lib/clang/9.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO -I /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/include -I /build/llvm-toolchain-snapshot-9~svn362543/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/include/clang/9.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-9/lib/clang/9.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-9~svn362543/build-llvm/lib/Transforms/IPO -fdebug-prefix-map=/build/llvm-toolchain-snapshot-9~svn362543=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2019-06-05-060531-1271-1 -x c++ /build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp -faddrsig
1//===- LowerTypeTests.cpp - type metadata lowering pass -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass lowers type metadata and calls to the llvm.type.test intrinsic.
10// It also ensures that globals are properly laid out for the
11// llvm.icall.branch.funnel intrinsic.
12// See http://llvm.org/docs/TypeMetadata.html for more information.
13//
14//===----------------------------------------------------------------------===//
15
16#include "llvm/Transforms/IPO/LowerTypeTests.h"
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/EquivalenceClasses.h"
21#include "llvm/ADT/PointerUnion.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/ADT/TinyPtrVector.h"
27#include "llvm/ADT/Triple.h"
28#include "llvm/Analysis/TypeMetadataUtils.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/IR/Attributes.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/GlobalAlias.h"
38#include "llvm/IR/GlobalObject.h"
39#include "llvm/IR/GlobalValue.h"
40#include "llvm/IR/GlobalVariable.h"
41#include "llvm/IR/IRBuilder.h"
42#include "llvm/IR/InlineAsm.h"
43#include "llvm/IR/Instruction.h"
44#include "llvm/IR/Instructions.h"
45#include "llvm/IR/Intrinsics.h"
46#include "llvm/IR/LLVMContext.h"
47#include "llvm/IR/Metadata.h"
48#include "llvm/IR/Module.h"
49#include "llvm/IR/ModuleSummaryIndex.h"
50#include "llvm/IR/ModuleSummaryIndexYAML.h"
51#include "llvm/IR/Operator.h"
52#include "llvm/IR/PassManager.h"
53#include "llvm/IR/Type.h"
54#include "llvm/IR/Use.h"
55#include "llvm/IR/User.h"
56#include "llvm/IR/Value.h"
57#include "llvm/Pass.h"
58#include "llvm/Support/Allocator.h"
59#include "llvm/Support/Casting.h"
60#include "llvm/Support/CommandLine.h"
61#include "llvm/Support/Debug.h"
62#include "llvm/Support/Error.h"
63#include "llvm/Support/ErrorHandling.h"
64#include "llvm/Support/FileSystem.h"
65#include "llvm/Support/MathExtras.h"
66#include "llvm/Support/MemoryBuffer.h"
67#include "llvm/Support/TrailingObjects.h"
68#include "llvm/Support/YAMLTraits.h"
69#include "llvm/Support/raw_ostream.h"
70#include "llvm/Transforms/IPO.h"
71#include "llvm/Transforms/Utils/BasicBlockUtils.h"
72#include "llvm/Transforms/Utils/ModuleUtils.h"
73#include <algorithm>
74#include <cassert>
75#include <cstdint>
76#include <memory>
77#include <set>
78#include <string>
79#include <system_error>
80#include <utility>
81#include <vector>
82
83using namespace llvm;
84using namespace lowertypetests;
85
86#define DEBUG_TYPE"lowertypetests" "lowertypetests"
87
88STATISTIC(ByteArraySizeBits, "Byte array size in bits")static llvm::Statistic ByteArraySizeBits = {"lowertypetests",
"ByteArraySizeBits", "Byte array size in bits", {0}, {false}
}
;
89STATISTIC(ByteArraySizeBytes, "Byte array size in bytes")static llvm::Statistic ByteArraySizeBytes = {"lowertypetests"
, "ByteArraySizeBytes", "Byte array size in bytes", {0}, {false
}}
;
90STATISTIC(NumByteArraysCreated, "Number of byte arrays created")static llvm::Statistic NumByteArraysCreated = {"lowertypetests"
, "NumByteArraysCreated", "Number of byte arrays created", {0
}, {false}}
;
91STATISTIC(NumTypeTestCallsLowered, "Number of type test calls lowered")static llvm::Statistic NumTypeTestCallsLowered = {"lowertypetests"
, "NumTypeTestCallsLowered", "Number of type test calls lowered"
, {0}, {false}}
;
92STATISTIC(NumTypeIdDisjointSets, "Number of disjoint sets of type identifiers")static llvm::Statistic NumTypeIdDisjointSets = {"lowertypetests"
, "NumTypeIdDisjointSets", "Number of disjoint sets of type identifiers"
, {0}, {false}}
;
93
94static cl::opt<bool> AvoidReuse(
95 "lowertypetests-avoid-reuse",
96 cl::desc("Try to avoid reuse of byte array addresses using aliases"),
97 cl::Hidden, cl::init(true));
98
99static cl::opt<PassSummaryAction> ClSummaryAction(
100 "lowertypetests-summary-action",
101 cl::desc("What to do with the summary when running this pass"),
102 cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing")llvm::cl::OptionEnumValue { "none", int(PassSummaryAction::None
), "Do nothing" }
,
103 clEnumValN(PassSummaryAction::Import, "import",llvm::cl::OptionEnumValue { "import", int(PassSummaryAction::
Import), "Import typeid resolutions from summary and globals"
}
104 "Import typeid resolutions from summary and globals")llvm::cl::OptionEnumValue { "import", int(PassSummaryAction::
Import), "Import typeid resolutions from summary and globals"
}
,
105 clEnumValN(PassSummaryAction::Export, "export",llvm::cl::OptionEnumValue { "export", int(PassSummaryAction::
Export), "Export typeid resolutions to summary and globals" }
106 "Export typeid resolutions to summary and globals")llvm::cl::OptionEnumValue { "export", int(PassSummaryAction::
Export), "Export typeid resolutions to summary and globals" }
),
107 cl::Hidden);
108
109static cl::opt<std::string> ClReadSummary(
110 "lowertypetests-read-summary",
111 cl::desc("Read summary from given YAML file before running pass"),
112 cl::Hidden);
113
114static cl::opt<std::string> ClWriteSummary(
115 "lowertypetests-write-summary",
116 cl::desc("Write summary to given YAML file after running pass"),
117 cl::Hidden);
118
119bool BitSetInfo::containsGlobalOffset(uint64_t Offset) const {
120 if (Offset < ByteOffset)
121 return false;
122
123 if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0)
124 return false;
125
126 uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2;
127 if (BitOffset >= BitSize)
128 return false;
129
130 return Bits.count(BitOffset);
131}
132
133void BitSetInfo::print(raw_ostream &OS) const {
134 OS << "offset " << ByteOffset << " size " << BitSize << " align "
135 << (1 << AlignLog2);
136
137 if (isAllOnes()) {
138 OS << " all-ones\n";
139 return;
140 }
141
142 OS << " { ";
143 for (uint64_t B : Bits)
144 OS << B << ' ';
145 OS << "}\n";
146}
147
148BitSetInfo BitSetBuilder::build() {
149 if (Min > Max)
150 Min = 0;
151
152 // Normalize each offset against the minimum observed offset, and compute
153 // the bitwise OR of each of the offsets. The number of trailing zeros
154 // in the mask gives us the log2 of the alignment of all offsets, which
155 // allows us to compress the bitset by only storing one bit per aligned
156 // address.
157 uint64_t Mask = 0;
158 for (uint64_t &Offset : Offsets) {
159 Offset -= Min;
160 Mask |= Offset;
161 }
162
163 BitSetInfo BSI;
164 BSI.ByteOffset = Min;
165
166 BSI.AlignLog2 = 0;
167 if (Mask != 0)
168 BSI.AlignLog2 = countTrailingZeros(Mask, ZB_Undefined);
169
170 // Build the compressed bitset while normalizing the offsets against the
171 // computed alignment.
172 BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1;
173 for (uint64_t Offset : Offsets) {
174 Offset >>= BSI.AlignLog2;
175 BSI.Bits.insert(Offset);
176 }
177
178 return BSI;
179}
180
181void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) {
182 // Create a new fragment to hold the layout for F.
183 Fragments.emplace_back();
184 std::vector<uint64_t> &Fragment = Fragments.back();
185 uint64_t FragmentIndex = Fragments.size() - 1;
186
187 for (auto ObjIndex : F) {
188 uint64_t OldFragmentIndex = FragmentMap[ObjIndex];
189 if (OldFragmentIndex == 0) {
190 // We haven't seen this object index before, so just add it to the current
191 // fragment.
192 Fragment.push_back(ObjIndex);
193 } else {
194 // This index belongs to an existing fragment. Copy the elements of the
195 // old fragment into this one and clear the old fragment. We don't update
196 // the fragment map just yet, this ensures that any further references to
197 // indices from the old fragment in this fragment do not insert any more
198 // indices.
199 std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex];
200 Fragment.insert(Fragment.end(), OldFragment.begin(), OldFragment.end());
201 OldFragment.clear();
202 }
203 }
204
205 // Update the fragment map to point our object indices to this fragment.
206 for (uint64_t ObjIndex : Fragment)
207 FragmentMap[ObjIndex] = FragmentIndex;
208}
209
210void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits,
211 uint64_t BitSize, uint64_t &AllocByteOffset,
212 uint8_t &AllocMask) {
213 // Find the smallest current allocation.
214 unsigned Bit = 0;
215 for (unsigned I = 1; I != BitsPerByte; ++I)
216 if (BitAllocs[I] < BitAllocs[Bit])
217 Bit = I;
218
219 AllocByteOffset = BitAllocs[Bit];
220
221 // Add our size to it.
222 unsigned ReqSize = AllocByteOffset + BitSize;
223 BitAllocs[Bit] = ReqSize;
224 if (Bytes.size() < ReqSize)
225 Bytes.resize(ReqSize);
226
227 // Set our bits.
228 AllocMask = 1 << Bit;
229 for (uint64_t B : Bits)
230 Bytes[AllocByteOffset + B] |= AllocMask;
231}
232
233namespace {
234
235struct ByteArrayInfo {
236 std::set<uint64_t> Bits;
237 uint64_t BitSize;
238 GlobalVariable *ByteArray;
239 GlobalVariable *MaskGlobal;
240 uint8_t *MaskPtr = nullptr;
241};
242
243/// A POD-like structure that we use to store a global reference together with
244/// its metadata types. In this pass we frequently need to query the set of
245/// metadata types referenced by a global, which at the IR level is an expensive
246/// operation involving a map lookup; this data structure helps to reduce the
247/// number of times we need to do this lookup.
248class GlobalTypeMember final : TrailingObjects<GlobalTypeMember, MDNode *> {
249 friend TrailingObjects;
250
251 GlobalObject *GO;
252 size_t NTypes;
253
254 // For functions: true if this is a definition (either in the merged module or
255 // in one of the thinlto modules).
256 bool IsDefinition;
257
258 // For functions: true if this function is either defined or used in a thinlto
259 // module and its jumptable entry needs to be exported to thinlto backends.
260 bool IsExported;
261
262 size_t numTrailingObjects(OverloadToken<MDNode *>) const { return NTypes; }
263
264public:
265 static GlobalTypeMember *create(BumpPtrAllocator &Alloc, GlobalObject *GO,
266 bool IsDefinition, bool IsExported,
267 ArrayRef<MDNode *> Types) {
268 auto *GTM = static_cast<GlobalTypeMember *>(Alloc.Allocate(
269 totalSizeToAlloc<MDNode *>(Types.size()), alignof(GlobalTypeMember)));
270 GTM->GO = GO;
271 GTM->NTypes = Types.size();
272 GTM->IsDefinition = IsDefinition;
273 GTM->IsExported = IsExported;
274 std::uninitialized_copy(Types.begin(), Types.end(),
275 GTM->getTrailingObjects<MDNode *>());
276 return GTM;
277 }
278
279 GlobalObject *getGlobal() const {
280 return GO;
281 }
282
283 bool isDefinition() const {
284 return IsDefinition;
285 }
286
287 bool isExported() const {
288 return IsExported;
289 }
290
291 ArrayRef<MDNode *> types() const {
292 return makeArrayRef(getTrailingObjects<MDNode *>(), NTypes);
293 }
294};
295
296struct ICallBranchFunnel final
297 : TrailingObjects<ICallBranchFunnel, GlobalTypeMember *> {
298 static ICallBranchFunnel *create(BumpPtrAllocator &Alloc, CallInst *CI,
299 ArrayRef<GlobalTypeMember *> Targets,
300 unsigned UniqueId) {
301 auto *Call = static_cast<ICallBranchFunnel *>(
302 Alloc.Allocate(totalSizeToAlloc<GlobalTypeMember *>(Targets.size()),
303 alignof(ICallBranchFunnel)));
304 Call->CI = CI;
305 Call->UniqueId = UniqueId;
306 Call->NTargets = Targets.size();
307 std::uninitialized_copy(Targets.begin(), Targets.end(),
308 Call->getTrailingObjects<GlobalTypeMember *>());
309 return Call;
310 }
311
312 CallInst *CI;
313 ArrayRef<GlobalTypeMember *> targets() const {
314 return makeArrayRef(getTrailingObjects<GlobalTypeMember *>(), NTargets);
315 }
316
317 unsigned UniqueId;
318
319private:
320 size_t NTargets;
321};
322
323class LowerTypeTestsModule {
324 Module &M;
325
326 ModuleSummaryIndex *ExportSummary;
327 const ModuleSummaryIndex *ImportSummary;
328
329 Triple::ArchType Arch;
330 Triple::OSType OS;
331 Triple::ObjectFormatType ObjectFormat;
332
333 IntegerType *Int1Ty = Type::getInt1Ty(M.getContext());
334 IntegerType *Int8Ty = Type::getInt8Ty(M.getContext());
335 PointerType *Int8PtrTy = Type::getInt8PtrTy(M.getContext());
336 ArrayType *Int8Arr0Ty = ArrayType::get(Type::getInt8Ty(M.getContext()), 0);
337 IntegerType *Int32Ty = Type::getInt32Ty(M.getContext());
338 PointerType *Int32PtrTy = PointerType::getUnqual(Int32Ty);
339 IntegerType *Int64Ty = Type::getInt64Ty(M.getContext());
340 IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(M.getContext(), 0);
341
342 // Indirect function call index assignment counter for WebAssembly
343 uint64_t IndirectIndex = 1;
344
345 // Mapping from type identifiers to the call sites that test them, as well as
346 // whether the type identifier needs to be exported to ThinLTO backends as
347 // part of the regular LTO phase of the ThinLTO pipeline (see exportTypeId).
348 struct TypeIdUserInfo {
349 std::vector<CallInst *> CallSites;
350 bool IsExported = false;
351 };
352 DenseMap<Metadata *, TypeIdUserInfo> TypeIdUsers;
353
354 /// This structure describes how to lower type tests for a particular type
355 /// identifier. It is either built directly from the global analysis (during
356 /// regular LTO or the regular LTO phase of ThinLTO), or indirectly using type
357 /// identifier summaries and external symbol references (in ThinLTO backends).
358 struct TypeIdLowering {
359 TypeTestResolution::Kind TheKind = TypeTestResolution::Unsat;
360
361 /// All except Unsat: the start address within the combined global.
362 Constant *OffsetedGlobal;
363
364 /// ByteArray, Inline, AllOnes: log2 of the required global alignment
365 /// relative to the start address.
366 Constant *AlignLog2;
367
368 /// ByteArray, Inline, AllOnes: one less than the size of the memory region
369 /// covering members of this type identifier as a multiple of 2^AlignLog2.
370 Constant *SizeM1;
371
372 /// ByteArray: the byte array to test the address against.
373 Constant *TheByteArray;
374
375 /// ByteArray: the bit mask to apply to bytes loaded from the byte array.
376 Constant *BitMask;
377
378 /// Inline: the bit mask to test the address against.
379 Constant *InlineBits;
380 };
381
382 std::vector<ByteArrayInfo> ByteArrayInfos;
383
384 Function *WeakInitializerFn = nullptr;
385
386 bool shouldExportConstantsAsAbsoluteSymbols();
387 uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
388 TypeIdLowering importTypeId(StringRef TypeId);
389 void importTypeTest(CallInst *CI);
390 void importFunction(Function *F, bool isDefinition);
391
392 BitSetInfo
393 buildBitSet(Metadata *TypeId,
394 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
395 ByteArrayInfo *createByteArray(BitSetInfo &BSI);
396 void allocateByteArrays();
397 Value *createBitSetTest(IRBuilder<> &B, const TypeIdLowering &TIL,
398 Value *BitOffset);
399 void lowerTypeTestCalls(
400 ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
401 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
402 Value *lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
403 const TypeIdLowering &TIL);
404
405 void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> TypeIds,
406 ArrayRef<GlobalTypeMember *> Globals);
407 unsigned getJumpTableEntrySize();
408 Type *getJumpTableEntryType();
409 void createJumpTableEntry(raw_ostream &AsmOS, raw_ostream &ConstraintOS,
410 Triple::ArchType JumpTableArch,
411 SmallVectorImpl<Value *> &AsmArgs, Function *Dest);
412 void verifyTypeMDNode(GlobalObject *GO, MDNode *Type);
413 void buildBitSetsFromFunctions(ArrayRef<Metadata *> TypeIds,
414 ArrayRef<GlobalTypeMember *> Functions);
415 void buildBitSetsFromFunctionsNative(ArrayRef<Metadata *> TypeIds,
416 ArrayRef<GlobalTypeMember *> Functions);
417 void buildBitSetsFromFunctionsWASM(ArrayRef<Metadata *> TypeIds,
418 ArrayRef<GlobalTypeMember *> Functions);
419 void
420 buildBitSetsFromDisjointSet(ArrayRef<Metadata *> TypeIds,
421 ArrayRef<GlobalTypeMember *> Globals,
422 ArrayRef<ICallBranchFunnel *> ICallBranchFunnels);
423
424 void replaceWeakDeclarationWithJumpTablePtr(Function *F, Constant *JT, bool IsDefinition);
425 void moveInitializerToModuleConstructor(GlobalVariable *GV);
426 void findGlobalVariableUsersOf(Constant *C,
427 SmallSetVector<GlobalVariable *, 8> &Out);
428
429 void createJumpTable(Function *F, ArrayRef<GlobalTypeMember *> Functions);
430
431 /// replaceCfiUses - Go through the uses list for this definition
432 /// and make each use point to "V" instead of "this" when the use is outside
433 /// the block. 'This's use list is expected to have at least one element.
434 /// Unlike replaceAllUsesWith this function skips blockaddr and direct call
435 /// uses.
436 void replaceCfiUses(Function *Old, Value *New, bool IsDefinition);
437
438 /// replaceDirectCalls - Go through the uses list for this definition and
439 /// replace each use, which is a direct function call.
440 void replaceDirectCalls(Value *Old, Value *New);
441
442public:
443 LowerTypeTestsModule(Module &M, ModuleSummaryIndex *ExportSummary,
444 const ModuleSummaryIndex *ImportSummary);
445
446 bool lower();
447
448 // Lower the module using the action and summary passed as command line
449 // arguments. For testing purposes only.
450 static bool runForTesting(Module &M);
451};
452
453struct LowerTypeTests : public ModulePass {
454 static char ID;
455
456 bool UseCommandLine = false;
457
458 ModuleSummaryIndex *ExportSummary;
459 const ModuleSummaryIndex *ImportSummary;
460
461 LowerTypeTests() : ModulePass(ID), UseCommandLine(true) {
462 initializeLowerTypeTestsPass(*PassRegistry::getPassRegistry());
463 }
464
465 LowerTypeTests(ModuleSummaryIndex *ExportSummary,
466 const ModuleSummaryIndex *ImportSummary)
467 : ModulePass(ID), ExportSummary(ExportSummary),
468 ImportSummary(ImportSummary) {
469 initializeLowerTypeTestsPass(*PassRegistry::getPassRegistry());
470 }
471
472 bool runOnModule(Module &M) override {
473 if (UseCommandLine)
474 return LowerTypeTestsModule::runForTesting(M);
475 return LowerTypeTestsModule(M, ExportSummary, ImportSummary).lower();
476 }
477};
478
479} // end anonymous namespace
480
481char LowerTypeTests::ID = 0;
482
483INITIALIZE_PASS(LowerTypeTests, "lowertypetests", "Lower type metadata", false,static void *initializeLowerTypeTestsPassOnce(PassRegistry &
Registry) { PassInfo *PI = new PassInfo( "Lower type metadata"
, "lowertypetests", &LowerTypeTests::ID, PassInfo::NormalCtor_t
(callDefaultCtor<LowerTypeTests>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeLowerTypeTestsPassFlag; void llvm::initializeLowerTypeTestsPass
(PassRegistry &Registry) { llvm::call_once(InitializeLowerTypeTestsPassFlag
, initializeLowerTypeTestsPassOnce, std::ref(Registry)); }
484 false)static void *initializeLowerTypeTestsPassOnce(PassRegistry &
Registry) { PassInfo *PI = new PassInfo( "Lower type metadata"
, "lowertypetests", &LowerTypeTests::ID, PassInfo::NormalCtor_t
(callDefaultCtor<LowerTypeTests>), false, false); Registry
.registerPass(*PI, true); return PI; } static llvm::once_flag
InitializeLowerTypeTestsPassFlag; void llvm::initializeLowerTypeTestsPass
(PassRegistry &Registry) { llvm::call_once(InitializeLowerTypeTestsPassFlag
, initializeLowerTypeTestsPassOnce, std::ref(Registry)); }
485
486ModulePass *
487llvm::createLowerTypeTestsPass(ModuleSummaryIndex *ExportSummary,
488 const ModuleSummaryIndex *ImportSummary) {
489 return new LowerTypeTests(ExportSummary, ImportSummary);
490}
491
492/// Build a bit set for TypeId using the object layouts in
493/// GlobalLayout.
494BitSetInfo LowerTypeTestsModule::buildBitSet(
495 Metadata *TypeId,
496 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
497 BitSetBuilder BSB;
498
499 // Compute the byte offset of each address associated with this type
500 // identifier.
501 for (auto &GlobalAndOffset : GlobalLayout) {
502 for (MDNode *Type : GlobalAndOffset.first->types()) {
503 if (Type->getOperand(1) != TypeId)
504 continue;
505 uint64_t Offset =
506 cast<ConstantInt>(
507 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
508 ->getZExtValue();
509 BSB.addOffset(GlobalAndOffset.second + Offset);
510 }
511 }
512
513 return BSB.build();
514}
515
516/// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in
517/// Bits. This pattern matches to the bt instruction on x86.
518static Value *createMaskedBitTest(IRBuilder<> &B, Value *Bits,
519 Value *BitOffset) {
520 auto BitsType = cast<IntegerType>(Bits->getType());
521 unsigned BitWidth = BitsType->getBitWidth();
522
523 BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType);
524 Value *BitIndex =
525 B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1));
526 Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
527 Value *MaskedBits = B.CreateAnd(Bits, BitMask);
528 return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
529}
530
531ByteArrayInfo *LowerTypeTestsModule::createByteArray(BitSetInfo &BSI) {
532 // Create globals to stand in for byte arrays and masks. These never actually
533 // get initialized, we RAUW and erase them later in allocateByteArrays() once
534 // we know the offset and mask to use.
535 auto ByteArrayGlobal = new GlobalVariable(
536 M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
537 auto MaskGlobal = new GlobalVariable(M, Int8Ty, /*isConstant=*/true,
538 GlobalValue::PrivateLinkage, nullptr);
539
540 ByteArrayInfos.emplace_back();
541 ByteArrayInfo *BAI = &ByteArrayInfos.back();
542
543 BAI->Bits = BSI.Bits;
544 BAI->BitSize = BSI.BitSize;
545 BAI->ByteArray = ByteArrayGlobal;
546 BAI->MaskGlobal = MaskGlobal;
547 return BAI;
548}
549
550void LowerTypeTestsModule::allocateByteArrays() {
551 llvm::stable_sort(ByteArrayInfos,
552 [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
553 return BAI1.BitSize > BAI2.BitSize;
554 });
555
556 std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
557
558 ByteArrayBuilder BAB;
559 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
560 ByteArrayInfo *BAI = &ByteArrayInfos[I];
561
562 uint8_t Mask;
563 BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
564
565 BAI->MaskGlobal->replaceAllUsesWith(
566 ConstantExpr::getIntToPtr(ConstantInt::get(Int8Ty, Mask), Int8PtrTy));
567 BAI->MaskGlobal->eraseFromParent();
568 if (BAI->MaskPtr)
569 *BAI->MaskPtr = Mask;
570 }
571
572 Constant *ByteArrayConst = ConstantDataArray::get(M.getContext(), BAB.Bytes);
573 auto ByteArray =
574 new GlobalVariable(M, ByteArrayConst->getType(), /*isConstant=*/true,
575 GlobalValue::PrivateLinkage, ByteArrayConst);
576
577 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
578 ByteArrayInfo *BAI = &ByteArrayInfos[I];
579
580 Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0),
581 ConstantInt::get(IntPtrTy, ByteArrayOffsets[I])};
582 Constant *GEP = ConstantExpr::getInBoundsGetElementPtr(
583 ByteArrayConst->getType(), ByteArray, Idxs);
584
585 // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures
586 // that the pc-relative displacement is folded into the lea instead of the
587 // test instruction getting another displacement.
588 GlobalAlias *Alias = GlobalAlias::create(
589 Int8Ty, 0, GlobalValue::PrivateLinkage, "bits", GEP, &M);
590 BAI->ByteArray->replaceAllUsesWith(Alias);
591 BAI->ByteArray->eraseFromParent();
592 }
593
594 ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] +
595 BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] +
596 BAB.BitAllocs[6] + BAB.BitAllocs[7];
597 ByteArraySizeBytes = BAB.Bytes.size();
598}
599
600/// Build a test that bit BitOffset is set in the type identifier that was
601/// lowered to TIL, which must be either an Inline or a ByteArray.
602Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
603 const TypeIdLowering &TIL,
604 Value *BitOffset) {
605 if (TIL.TheKind == TypeTestResolution::Inline) {
606 // If the bit set is sufficiently small, we can avoid a load by bit testing
607 // a constant.
608 return createMaskedBitTest(B, TIL.InlineBits, BitOffset);
609 } else {
610 Constant *ByteArray = TIL.TheByteArray;
611 if (AvoidReuse && !ImportSummary) {
612 // Each use of the byte array uses a different alias. This makes the
613 // backend less likely to reuse previously computed byte array addresses,
614 // improving the security of the CFI mechanism based on this pass.
615 // This won't work when importing because TheByteArray is external.
616 ByteArray = GlobalAlias::create(Int8Ty, 0, GlobalValue::PrivateLinkage,
617 "bits_use", ByteArray, &M);
618 }
619
620 Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
621 Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
622
623 Value *ByteAndMask =
624 B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
625 return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
626 }
627}
628
629static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL,
630 Value *V, uint64_t COffset) {
631 if (auto GV = dyn_cast<GlobalObject>(V)) {
632 SmallVector<MDNode *, 2> Types;
633 GV->getMetadata(LLVMContext::MD_type, Types);
634 for (MDNode *Type : Types) {
635 if (Type->getOperand(1) != TypeId)
636 continue;
637 uint64_t Offset =
638 cast<ConstantInt>(
639 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
640 ->getZExtValue();
641 if (COffset == Offset)
642 return true;
643 }
644 return false;
645 }
646
647 if (auto GEP = dyn_cast<GEPOperator>(V)) {
648 APInt APOffset(DL.getPointerSizeInBits(0), 0);
649 bool Result = GEP->accumulateConstantOffset(DL, APOffset);
650 if (!Result)
651 return false;
652 COffset += APOffset.getZExtValue();
653 return isKnownTypeIdMember(TypeId, DL, GEP->getPointerOperand(), COffset);
654 }
655
656 if (auto Op = dyn_cast<Operator>(V)) {
657 if (Op->getOpcode() == Instruction::BitCast)
658 return isKnownTypeIdMember(TypeId, DL, Op->getOperand(0), COffset);
659
660 if (Op->getOpcode() == Instruction::Select)
661 return isKnownTypeIdMember(TypeId, DL, Op->getOperand(1), COffset) &&
662 isKnownTypeIdMember(TypeId, DL, Op->getOperand(2), COffset);
663 }
664
665 return false;
666}
667
668/// Lower a llvm.type.test call to its implementation. Returns the value to
669/// replace the call with.
670Value *LowerTypeTestsModule::lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
671 const TypeIdLowering &TIL) {
672 if (TIL.TheKind == TypeTestResolution::Unsat)
673 return ConstantInt::getFalse(M.getContext());
674
675 Value *Ptr = CI->getArgOperand(0);
676 const DataLayout &DL = M.getDataLayout();
677 if (isKnownTypeIdMember(TypeId, DL, Ptr, 0))
678 return ConstantInt::getTrue(M.getContext());
679
680 BasicBlock *InitialBB = CI->getParent();
681
682 IRBuilder<> B(CI);
683
684 Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
685
686 Constant *OffsetedGlobalAsInt =
687 ConstantExpr::getPtrToInt(TIL.OffsetedGlobal, IntPtrTy);
688 if (TIL.TheKind == TypeTestResolution::Single)
689 return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
690
691 Value *PtrOffset = B.CreateSub(PtrAsInt, OffsetedGlobalAsInt);
692
693 // We need to check that the offset both falls within our range and is
694 // suitably aligned. We can check both properties at the same time by
695 // performing a right rotate by log2(alignment) followed by an integer
696 // comparison against the bitset size. The rotate will move the lower
697 // order bits that need to be zero into the higher order bits of the
698 // result, causing the comparison to fail if they are nonzero. The rotate
699 // also conveniently gives us a bit offset to use during the load from
700 // the bitset.
701 Value *OffsetSHR =
702 B.CreateLShr(PtrOffset, ConstantExpr::getZExt(TIL.AlignLog2, IntPtrTy));
703 Value *OffsetSHL = B.CreateShl(
704 PtrOffset, ConstantExpr::getZExt(
705 ConstantExpr::getSub(
706 ConstantInt::get(Int8Ty, DL.getPointerSizeInBits(0)),
707 TIL.AlignLog2),
708 IntPtrTy));
709 Value *BitOffset = B.CreateOr(OffsetSHR, OffsetSHL);
710
711 Value *OffsetInRange = B.CreateICmpULE(BitOffset, TIL.SizeM1);
712
713 // If the bit set is all ones, testing against it is unnecessary.
714 if (TIL.TheKind == TypeTestResolution::AllOnes)
715 return OffsetInRange;
716
717 // See if the intrinsic is used in the following common pattern:
718 // br(llvm.type.test(...), thenbb, elsebb)
719 // where nothing happens between the type test and the br.
720 // If so, create slightly simpler IR.
721 if (CI->hasOneUse())
722 if (auto *Br = dyn_cast<BranchInst>(*CI->user_begin()))
723 if (CI->getNextNode() == Br) {
724 BasicBlock *Then = InitialBB->splitBasicBlock(CI->getIterator());
725 BasicBlock *Else = Br->getSuccessor(1);
726 BranchInst *NewBr = BranchInst::Create(Then, Else, OffsetInRange);
727 NewBr->setMetadata(LLVMContext::MD_prof,
728 Br->getMetadata(LLVMContext::MD_prof));
729 ReplaceInstWithInst(InitialBB->getTerminator(), NewBr);
730
731 // Update phis in Else resulting from InitialBB being split
732 for (auto &Phi : Else->phis())
733 Phi.addIncoming(Phi.getIncomingValueForBlock(Then), InitialBB);
734
735 IRBuilder<> ThenB(CI);
736 return createBitSetTest(ThenB, TIL, BitOffset);
737 }
738
739 IRBuilder<> ThenB(SplitBlockAndInsertIfThen(OffsetInRange, CI, false));
740
741 // Now that we know that the offset is in range and aligned, load the
742 // appropriate bit from the bitset.
743 Value *Bit = createBitSetTest(ThenB, TIL, BitOffset);
744
745 // The value we want is 0 if we came directly from the initial block
746 // (having failed the range or alignment checks), or the loaded bit if
747 // we came from the block in which we loaded it.
748 B.SetInsertPoint(CI);
749 PHINode *P = B.CreatePHI(Int1Ty, 2);
750 P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
751 P->addIncoming(Bit, ThenB.GetInsertBlock());
752 return P;
753}
754
755/// Given a disjoint set of type identifiers and globals, lay out the globals,
756/// build the bit sets and lower the llvm.type.test calls.
757void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
758 ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals) {
759 // Build a new global with the combined contents of the referenced globals.
760 // This global is a struct whose even-indexed elements contain the original
761 // contents of the referenced globals and whose odd-indexed elements contain
762 // any padding required to align the next element to the next power of 2.
763 std::vector<Constant *> GlobalInits;
764 const DataLayout &DL = M.getDataLayout();
765 for (GlobalTypeMember *G : Globals) {
766 GlobalVariable *GV = cast<GlobalVariable>(G->getGlobal());
767 GlobalInits.push_back(GV->getInitializer());
768 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
769
770 // Compute the amount of padding required.
771 uint64_t Padding = NextPowerOf2(InitSize - 1) - InitSize;
772
773 // Experiments of different caps with Chromium on both x64 and ARM64
774 // have shown that the 32-byte cap generates the smallest binary on
775 // both platforms while different caps yield similar performance.
776 // (see https://lists.llvm.org/pipermail/llvm-dev/2018-July/124694.html)
777 if (Padding > 32)
778 Padding = alignTo(InitSize, 32) - InitSize;
779
780 GlobalInits.push_back(
781 ConstantAggregateZero::get(ArrayType::get(Int8Ty, Padding)));
782 }
783 if (!GlobalInits.empty())
784 GlobalInits.pop_back();
785 Constant *NewInit = ConstantStruct::getAnon(M.getContext(), GlobalInits);
786 auto *CombinedGlobal =
787 new GlobalVariable(M, NewInit->getType(), /*isConstant=*/true,
788 GlobalValue::PrivateLinkage, NewInit);
789
790 StructType *NewTy = cast<StructType>(NewInit->getType());
791 const StructLayout *CombinedGlobalLayout = DL.getStructLayout(NewTy);
792
793 // Compute the offsets of the original globals within the new global.
794 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
795 for (unsigned I = 0; I != Globals.size(); ++I)
796 // Multiply by 2 to account for padding elements.
797 GlobalLayout[Globals[I]] = CombinedGlobalLayout->getElementOffset(I * 2);
798
799 lowerTypeTestCalls(TypeIds, CombinedGlobal, GlobalLayout);
800
801 // Build aliases pointing to offsets into the combined global for each
802 // global from which we built the combined global, and replace references
803 // to the original globals with references to the aliases.
804 for (unsigned I = 0; I != Globals.size(); ++I) {
805 GlobalVariable *GV = cast<GlobalVariable>(Globals[I]->getGlobal());
806
807 // Multiply by 2 to account for padding elements.
808 Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
809 ConstantInt::get(Int32Ty, I * 2)};
810 Constant *CombinedGlobalElemPtr = ConstantExpr::getGetElementPtr(
811 NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs);
812 assert(GV->getType()->getAddressSpace() == 0)((GV->getType()->getAddressSpace() == 0) ? static_cast<
void> (0) : __assert_fail ("GV->getType()->getAddressSpace() == 0"
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp"
, 812, __PRETTY_FUNCTION__))
;
813 GlobalAlias *GAlias =
814 GlobalAlias::create(NewTy->getElementType(I * 2), 0, GV->getLinkage(),
815 "", CombinedGlobalElemPtr, &M);
816 GAlias->setVisibility(GV->getVisibility());
817 GAlias->takeName(GV);
818 GV->replaceAllUsesWith(GAlias);
819 GV->eraseFromParent();
820 }
821}
822
823bool LowerTypeTestsModule::shouldExportConstantsAsAbsoluteSymbols() {
824 return (Arch == Triple::x86 || Arch == Triple::x86_64) &&
825 ObjectFormat == Triple::ELF;
826}
827
828/// Export the given type identifier so that ThinLTO backends may import it.
829/// Type identifiers are exported by adding coarse-grained information about how
830/// to test the type identifier to the summary, and creating symbols in the
831/// object file (aliases and absolute symbols) containing fine-grained
832/// information about the type identifier.
833///
834/// Returns a pointer to the location in which to store the bitmask, if
835/// applicable.
836uint8_t *LowerTypeTestsModule::exportTypeId(StringRef TypeId,
837 const TypeIdLowering &TIL) {
838 TypeTestResolution &TTRes =
839 ExportSummary->getOrInsertTypeIdSummary(TypeId).TTRes;
840 TTRes.TheKind = TIL.TheKind;
841
842 auto ExportGlobal = [&](StringRef Name, Constant *C) {
843 GlobalAlias *GA =
844 GlobalAlias::create(Int8Ty, 0, GlobalValue::ExternalLinkage,
845 "__typeid_" + TypeId + "_" + Name, C, &M);
846 GA->setVisibility(GlobalValue::HiddenVisibility);
847 };
848
849 auto ExportConstant = [&](StringRef Name, uint64_t &Storage, Constant *C) {
850 if (shouldExportConstantsAsAbsoluteSymbols())
851 ExportGlobal(Name, ConstantExpr::getIntToPtr(C, Int8PtrTy));
852 else
853 Storage = cast<ConstantInt>(C)->getZExtValue();
854 };
855
856 if (TIL.TheKind != TypeTestResolution::Unsat)
857 ExportGlobal("global_addr", TIL.OffsetedGlobal);
858
859 if (TIL.TheKind == TypeTestResolution::ByteArray ||
860 TIL.TheKind == TypeTestResolution::Inline ||
861 TIL.TheKind == TypeTestResolution::AllOnes) {
862 ExportConstant("align", TTRes.AlignLog2, TIL.AlignLog2);
863 ExportConstant("size_m1", TTRes.SizeM1, TIL.SizeM1);
864
865 uint64_t BitSize = cast<ConstantInt>(TIL.SizeM1)->getZExtValue() + 1;
866 if (TIL.TheKind == TypeTestResolution::Inline)
867 TTRes.SizeM1BitWidth = (BitSize <= 32) ? 5 : 6;
868 else
869 TTRes.SizeM1BitWidth = (BitSize <= 128) ? 7 : 32;
870 }
871
872 if (TIL.TheKind == TypeTestResolution::ByteArray) {
873 ExportGlobal("byte_array", TIL.TheByteArray);
874 if (shouldExportConstantsAsAbsoluteSymbols())
875 ExportGlobal("bit_mask", TIL.BitMask);
876 else
877 return &TTRes.BitMask;
878 }
879
880 if (TIL.TheKind == TypeTestResolution::Inline)
881 ExportConstant("inline_bits", TTRes.InlineBits, TIL.InlineBits);
882
883 return nullptr;
884}
885
886LowerTypeTestsModule::TypeIdLowering
887LowerTypeTestsModule::importTypeId(StringRef TypeId) {
888 const TypeIdSummary *TidSummary = ImportSummary->getTypeIdSummary(TypeId);
889 if (!TidSummary)
890 return {}; // Unsat: no globals match this type id.
891 const TypeTestResolution &TTRes = TidSummary->TTRes;
892
893 TypeIdLowering TIL;
894 TIL.TheKind = TTRes.TheKind;
895
896 auto ImportGlobal = [&](StringRef Name) {
897 // Give the global a type of length 0 so that it is not assumed not to alias
898 // with any other global.
899 Constant *C = M.getOrInsertGlobal(("__typeid_" + TypeId + "_" + Name).str(),
900 Int8Arr0Ty);
901 if (auto *GV = dyn_cast<GlobalVariable>(C))
902 GV->setVisibility(GlobalValue::HiddenVisibility);
903 C = ConstantExpr::getBitCast(C, Int8PtrTy);
904 return C;
905 };
906
907 auto ImportConstant = [&](StringRef Name, uint64_t Const, unsigned AbsWidth,
908 Type *Ty) {
909 if (!shouldExportConstantsAsAbsoluteSymbols()) {
910 Constant *C =
911 ConstantInt::get(isa<IntegerType>(Ty) ? Ty : Int64Ty, Const);
912 if (!isa<IntegerType>(Ty))
913 C = ConstantExpr::getIntToPtr(C, Ty);
914 return C;
915 }
916
917 Constant *C = ImportGlobal(Name);
918 auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
919 if (isa<IntegerType>(Ty))
920 C = ConstantExpr::getPtrToInt(C, Ty);
921 if (GV->getMetadata(LLVMContext::MD_absolute_symbol))
922 return C;
923
924 auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
925 auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
926 auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
927 GV->setMetadata(LLVMContext::MD_absolute_symbol,
928 MDNode::get(M.getContext(), {MinC, MaxC}));
929 };
930 if (AbsWidth == IntPtrTy->getBitWidth())
931 SetAbsRange(~0ull, ~0ull); // Full set.
932 else
933 SetAbsRange(0, 1ull << AbsWidth);
934 return C;
935 };
936
937 if (TIL.TheKind != TypeTestResolution::Unsat)
938 TIL.OffsetedGlobal = ImportGlobal("global_addr");
939
940 if (TIL.TheKind == TypeTestResolution::ByteArray ||
941 TIL.TheKind == TypeTestResolution::Inline ||
942 TIL.TheKind == TypeTestResolution::AllOnes) {
943 TIL.AlignLog2 = ImportConstant("align", TTRes.AlignLog2, 8, Int8Ty);
944 TIL.SizeM1 =
945 ImportConstant("size_m1", TTRes.SizeM1, TTRes.SizeM1BitWidth, IntPtrTy);
946 }
947
948 if (TIL.TheKind == TypeTestResolution::ByteArray) {
949 TIL.TheByteArray = ImportGlobal("byte_array");
950 TIL.BitMask = ImportConstant("bit_mask", TTRes.BitMask, 8, Int8PtrTy);
951 }
952
953 if (TIL.TheKind == TypeTestResolution::Inline)
954 TIL.InlineBits = ImportConstant(
955 "inline_bits", TTRes.InlineBits, 1 << TTRes.SizeM1BitWidth,
956 TTRes.SizeM1BitWidth <= 5 ? Int32Ty : Int64Ty);
957
958 return TIL;
959}
960
961void LowerTypeTestsModule::importTypeTest(CallInst *CI) {
962 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
963 if (!TypeIdMDVal)
964 report_fatal_error("Second argument of llvm.type.test must be metadata");
965
966 auto TypeIdStr = dyn_cast<MDString>(TypeIdMDVal->getMetadata());
967 if (!TypeIdStr)
968 report_fatal_error(
969 "Second argument of llvm.type.test must be a metadata string");
970
971 TypeIdLowering TIL = importTypeId(TypeIdStr->getString());
972 Value *Lowered = lowerTypeTestCall(TypeIdStr, CI, TIL);
973 CI->replaceAllUsesWith(Lowered);
974 CI->eraseFromParent();
975}
976
977// ThinLTO backend: the function F has a jump table entry; update this module
978// accordingly. isDefinition describes the type of the jump table entry.
979void LowerTypeTestsModule::importFunction(Function *F, bool isDefinition) {
980 assert(F->getType()->getAddressSpace() == 0)((F->getType()->getAddressSpace() == 0) ? static_cast<
void> (0) : __assert_fail ("F->getType()->getAddressSpace() == 0"
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp"
, 980, __PRETTY_FUNCTION__))
;
981
982 GlobalValue::VisibilityTypes Visibility = F->getVisibility();
983 std::string Name = F->getName();
984
985 if (F->isDeclarationForLinker() && isDefinition) {
986 // Non-dso_local functions may be overriden at run time,
987 // don't short curcuit them
988 if (F->isDSOLocal()) {
989 Function *RealF = Function::Create(F->getFunctionType(),
990 GlobalValue::ExternalLinkage,
991 F->getAddressSpace(),
992 Name + ".cfi", &M);
993 RealF->setVisibility(GlobalVariable::HiddenVisibility);
994 replaceDirectCalls(F, RealF);
995 }
996 return;
997 }
998
999 Function *FDecl;
1000 if (F->isDeclarationForLinker() && !isDefinition) {
1001 // Declaration of an external function.
1002 FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1003 F->getAddressSpace(), Name + ".cfi_jt", &M);
1004 FDecl->setVisibility(GlobalValue::HiddenVisibility);
1005 } else if (isDefinition) {
1006 F->setName(Name + ".cfi");
1007 F->setLinkage(GlobalValue::ExternalLinkage);
1008 FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1009 F->getAddressSpace(), Name, &M);
1010 FDecl->setVisibility(Visibility);
1011 Visibility = GlobalValue::HiddenVisibility;
1012
1013 // Delete aliases pointing to this function, they'll be re-created in the
1014 // merged output
1015 SmallVector<GlobalAlias*, 4> ToErase;
1016 for (auto &U : F->uses()) {
1017 if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
1018 Function *AliasDecl = Function::Create(
1019 F->getFunctionType(), GlobalValue::ExternalLinkage,
1020 F->getAddressSpace(), "", &M);
1021 AliasDecl->takeName(A);
1022 A->replaceAllUsesWith(AliasDecl);
1023 ToErase.push_back(A);
1024 }
1025 }
1026 for (auto *A : ToErase)
1027 A->eraseFromParent();
1028 } else {
1029 // Function definition without type metadata, where some other translation
1030 // unit contained a declaration with type metadata. This normally happens
1031 // during mixed CFI + non-CFI compilation. We do nothing with the function
1032 // so that it is treated the same way as a function defined outside of the
1033 // LTO unit.
1034 return;
1035 }
1036
1037 if (F->isWeakForLinker())
1038 replaceWeakDeclarationWithJumpTablePtr(F, FDecl, isDefinition);
1039 else
1040 replaceCfiUses(F, FDecl, isDefinition);
1041
1042 // Set visibility late because it's used in replaceCfiUses() to determine
1043 // whether uses need to to be replaced.
1044 F->setVisibility(Visibility);
1045}
1046
1047void LowerTypeTestsModule::lowerTypeTestCalls(
1048 ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
1049 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1050 CombinedGlobalAddr = ConstantExpr::getBitCast(CombinedGlobalAddr, Int8PtrTy);
1051
1052 // For each type identifier in this disjoint set...
1053 for (Metadata *TypeId : TypeIds) {
1054 // Build the bitset.
1055 BitSetInfo BSI = buildBitSet(TypeId, GlobalLayout);
1056 LLVM_DEBUG({do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lowertypetests")) { { if (auto MDS = dyn_cast<MDString>
(TypeId)) dbgs() << MDS->getString() << ": "; else
dbgs() << "<unnamed>: "; BSI.print(dbgs()); }; }
} while (false)
1057 if (auto MDS = dyn_cast<MDString>(TypeId))do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lowertypetests")) { { if (auto MDS = dyn_cast<MDString>
(TypeId)) dbgs() << MDS->getString() << ": "; else
dbgs() << "<unnamed>: "; BSI.print(dbgs()); }; }
} while (false)
1058 dbgs() << MDS->getString() << ": ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lowertypetests")) { { if (auto MDS = dyn_cast<MDString>
(TypeId)) dbgs() << MDS->getString() << ": "; else
dbgs() << "<unnamed>: "; BSI.print(dbgs()); }; }
} while (false)
1059 elsedo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lowertypetests")) { { if (auto MDS = dyn_cast<MDString>
(TypeId)) dbgs() << MDS->getString() << ": "; else
dbgs() << "<unnamed>: "; BSI.print(dbgs()); }; }
} while (false)
1060 dbgs() << "<unnamed>: ";do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lowertypetests")) { { if (auto MDS = dyn_cast<MDString>
(TypeId)) dbgs() << MDS->getString() << ": "; else
dbgs() << "<unnamed>: "; BSI.print(dbgs()); }; }
} while (false)
1061 BSI.print(dbgs());do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lowertypetests")) { { if (auto MDS = dyn_cast<MDString>
(TypeId)) dbgs() << MDS->getString() << ": "; else
dbgs() << "<unnamed>: "; BSI.print(dbgs()); }; }
} while (false)
1062 })do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("lowertypetests")) { { if (auto MDS = dyn_cast<MDString>
(TypeId)) dbgs() << MDS->getString() << ": "; else
dbgs() << "<unnamed>: "; BSI.print(dbgs()); }; }
} while (false)
;
1063
1064 ByteArrayInfo *BAI = nullptr;
1065 TypeIdLowering TIL;
1066 TIL.OffsetedGlobal = ConstantExpr::getGetElementPtr(
1067 Int8Ty, CombinedGlobalAddr, ConstantInt::get(IntPtrTy, BSI.ByteOffset)),
1068 TIL.AlignLog2 = ConstantInt::get(Int8Ty, BSI.AlignLog2);
1069 TIL.SizeM1 = ConstantInt::get(IntPtrTy, BSI.BitSize - 1);
1070 if (BSI.isAllOnes()) {
1071 TIL.TheKind = (BSI.BitSize == 1) ? TypeTestResolution::Single
1072 : TypeTestResolution::AllOnes;
1073 } else if (BSI.BitSize <= 64) {
1074 TIL.TheKind = TypeTestResolution::Inline;
1075 uint64_t InlineBits = 0;
1076 for (auto Bit : BSI.Bits)
1077 InlineBits |= uint64_t(1) << Bit;
1078 if (InlineBits == 0)
1079 TIL.TheKind = TypeTestResolution::Unsat;
1080 else
1081 TIL.InlineBits = ConstantInt::get(
1082 (BSI.BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
1083 } else {
1084 TIL.TheKind = TypeTestResolution::ByteArray;
1085 ++NumByteArraysCreated;
1086 BAI = createByteArray(BSI);
1087 TIL.TheByteArray = BAI->ByteArray;
1088 TIL.BitMask = BAI->MaskGlobal;
1089 }
1090
1091 TypeIdUserInfo &TIUI = TypeIdUsers[TypeId];
1092
1093 if (TIUI.IsExported) {
1094 uint8_t *MaskPtr = exportTypeId(cast<MDString>(TypeId)->getString(), TIL);
1095 if (BAI)
1096 BAI->MaskPtr = MaskPtr;
1097 }
1098
1099 // Lower each call to llvm.type.test for this type identifier.
1100 for (CallInst *CI : TIUI.CallSites) {
1101 ++NumTypeTestCallsLowered;
1102 Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
1103 CI->replaceAllUsesWith(Lowered);
1104 CI->eraseFromParent();
1105 }
1106 }
1107}
1108
1109void LowerTypeTestsModule::verifyTypeMDNode(GlobalObject *GO, MDNode *Type) {
1110 if (Type->getNumOperands() != 2)
1111 report_fatal_error("All operands of type metadata must have 2 elements");
1112
1113 if (GO->isThreadLocal())
1114 report_fatal_error("Bit set element may not be thread-local");
1115 if (isa<GlobalVariable>(GO) && GO->hasSection())
1116 report_fatal_error(
1117 "A member of a type identifier may not have an explicit section");
1118
1119 // FIXME: We previously checked that global var member of a type identifier
1120 // must be a definition, but the IR linker may leave type metadata on
1121 // declarations. We should restore this check after fixing PR31759.
1122
1123 auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Type->getOperand(0));
1124 if (!OffsetConstMD)
1125 report_fatal_error("Type offset must be a constant");
1126 auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
1127 if (!OffsetInt)
1128 report_fatal_error("Type offset must be an integer constant");
1129}
1130
1131static const unsigned kX86JumpTableEntrySize = 8;
1132static const unsigned kARMJumpTableEntrySize = 4;
1133
1134unsigned LowerTypeTestsModule::getJumpTableEntrySize() {
1135 switch (Arch) {
1136 case Triple::x86:
1137 case Triple::x86_64:
1138 return kX86JumpTableEntrySize;
1139 case Triple::arm:
1140 case Triple::thumb:
1141 case Triple::aarch64:
1142 return kARMJumpTableEntrySize;
1143 default:
1144 report_fatal_error("Unsupported architecture for jump tables");
1145 }
1146}
1147
1148// Create a jump table entry for the target. This consists of an instruction
1149// sequence containing a relative branch to Dest. Appends inline asm text,
1150// constraints and arguments to AsmOS, ConstraintOS and AsmArgs.
1151void LowerTypeTestsModule::createJumpTableEntry(
1152 raw_ostream &AsmOS, raw_ostream &ConstraintOS,
1153 Triple::ArchType JumpTableArch, SmallVectorImpl<Value *> &AsmArgs,
1154 Function *Dest) {
1155 unsigned ArgIndex = AsmArgs.size();
1156
1157 if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64) {
1158 AsmOS << "jmp ${" << ArgIndex << ":c}@plt\n";
1159 AsmOS << "int3\nint3\nint3\n";
1160 } else if (JumpTableArch == Triple::arm || JumpTableArch == Triple::aarch64) {
1161 AsmOS << "b $" << ArgIndex << "\n";
1162 } else if (JumpTableArch == Triple::thumb) {
1163 AsmOS << "b.w $" << ArgIndex << "\n";
1164 } else {
1165 report_fatal_error("Unsupported architecture for jump tables");
1166 }
1167
1168 ConstraintOS << (ArgIndex > 0 ? ",s" : "s");
1169 AsmArgs.push_back(Dest);
1170}
1171
1172Type *LowerTypeTestsModule::getJumpTableEntryType() {
1173 return ArrayType::get(Int8Ty, getJumpTableEntrySize());
1174}
1175
1176/// Given a disjoint set of type identifiers and functions, build the bit sets
1177/// and lower the llvm.type.test calls, architecture dependently.
1178void LowerTypeTestsModule::buildBitSetsFromFunctions(
1179 ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1180 if (Arch == Triple::x86 || Arch == Triple::x86_64 || Arch == Triple::arm ||
23
Assuming the condition is false
24
Assuming the condition is false
25
Assuming the condition is false
28
Taking true branch
1181 Arch == Triple::thumb || Arch == Triple::aarch64)
26
Assuming the condition is false
27
Assuming the condition is true
1182 buildBitSetsFromFunctionsNative(TypeIds, Functions);
29
Calling 'LowerTypeTestsModule::buildBitSetsFromFunctionsNative'
1183 else if (Arch == Triple::wasm32 || Arch == Triple::wasm64)
1184 buildBitSetsFromFunctionsWASM(TypeIds, Functions);
1185 else
1186 report_fatal_error("Unsupported architecture for jump tables");
1187}
1188
1189void LowerTypeTestsModule::moveInitializerToModuleConstructor(
1190 GlobalVariable *GV) {
1191 if (WeakInitializerFn == nullptr) {
1192 WeakInitializerFn = Function::Create(
1193 FunctionType::get(Type::getVoidTy(M.getContext()),
1194 /* IsVarArg */ false),
1195 GlobalValue::InternalLinkage,
1196 M.getDataLayout().getProgramAddressSpace(),
1197 "__cfi_global_var_init", &M);
1198 BasicBlock *BB =
1199 BasicBlock::Create(M.getContext(), "entry", WeakInitializerFn);
1200 ReturnInst::Create(M.getContext(), BB);
1201 WeakInitializerFn->setSection(
1202 ObjectFormat == Triple::MachO
1203 ? "__TEXT,__StaticInit,regular,pure_instructions"
1204 : ".text.startup");
1205 // This code is equivalent to relocation application, and should run at the
1206 // earliest possible time (i.e. with the highest priority).
1207 appendToGlobalCtors(M, WeakInitializerFn, /* Priority */ 0);
1208 }
1209
1210 IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
1211 GV->setConstant(false);
1212 IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlignment());
1213 GV->setInitializer(Constant::getNullValue(GV->getValueType()));
1214}
1215
1216void LowerTypeTestsModule::findGlobalVariableUsersOf(
1217 Constant *C, SmallSetVector<GlobalVariable *, 8> &Out) {
1218 for (auto *U : C->users()){
1219 if (auto *GV = dyn_cast<GlobalVariable>(U))
1220 Out.insert(GV);
1221 else if (auto *C2 = dyn_cast<Constant>(U))
1222 findGlobalVariableUsersOf(C2, Out);
1223 }
1224}
1225
1226// Replace all uses of F with (F ? JT : 0).
1227void LowerTypeTestsModule::replaceWeakDeclarationWithJumpTablePtr(
1228 Function *F, Constant *JT, bool IsDefinition) {
1229 // The target expression can not appear in a constant initializer on most
1230 // (all?) targets. Switch to a runtime initializer.
1231 SmallSetVector<GlobalVariable *, 8> GlobalVarUsers;
1232 findGlobalVariableUsersOf(F, GlobalVarUsers);
1233 for (auto GV : GlobalVarUsers)
1234 moveInitializerToModuleConstructor(GV);
1235
1236 // Can not RAUW F with an expression that uses F. Replace with a temporary
1237 // placeholder first.
1238 Function *PlaceholderFn =
1239 Function::Create(cast<FunctionType>(F->getValueType()),
1240 GlobalValue::ExternalWeakLinkage,
1241 F->getAddressSpace(), "", &M);
1242 replaceCfiUses(F, PlaceholderFn, IsDefinition);
1243
1244 Constant *Target = ConstantExpr::getSelect(
1245 ConstantExpr::getICmp(CmpInst::ICMP_NE, F,
1246 Constant::getNullValue(F->getType())),
1247 JT, Constant::getNullValue(F->getType()));
1248 PlaceholderFn->replaceAllUsesWith(Target);
1249 PlaceholderFn->eraseFromParent();
1250}
1251
1252static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch) {
1253 Attribute TFAttr = F->getFnAttribute("target-features");
1254 if (!TFAttr.hasAttribute(Attribute::None)) {
1255 SmallVector<StringRef, 6> Features;
1256 TFAttr.getValueAsString().split(Features, ',');
1257 for (StringRef Feature : Features) {
1258 if (Feature == "-thumb-mode")
1259 return false;
1260 else if (Feature == "+thumb-mode")
1261 return true;
1262 }
1263 }
1264
1265 return ModuleArch == Triple::thumb;
1266}
1267
1268// Each jump table must be either ARM or Thumb as a whole for the bit-test math
1269// to work. Pick one that matches the majority of members to minimize interop
1270// veneers inserted by the linker.
1271static Triple::ArchType
1272selectJumpTableArmEncoding(ArrayRef<GlobalTypeMember *> Functions,
1273 Triple::ArchType ModuleArch) {
1274 if (ModuleArch != Triple::arm && ModuleArch != Triple::thumb)
1275 return ModuleArch;
1276
1277 unsigned ArmCount = 0, ThumbCount = 0;
1278 for (const auto GTM : Functions) {
1279 if (!GTM->isDefinition()) {
1280 // PLT stubs are always ARM.
1281 ++ArmCount;
1282 continue;
1283 }
1284
1285 Function *F = cast<Function>(GTM->getGlobal());
1286 ++(isThumbFunction(F, ModuleArch) ? ThumbCount : ArmCount);
1287 }
1288
1289 return ArmCount > ThumbCount ? Triple::arm : Triple::thumb;
1290}
1291
1292void LowerTypeTestsModule::createJumpTable(
1293 Function *F, ArrayRef<GlobalTypeMember *> Functions) {
1294 std::string AsmStr, ConstraintStr;
1295 raw_string_ostream AsmOS(AsmStr), ConstraintOS(ConstraintStr);
1296 SmallVector<Value *, 16> AsmArgs;
1297 AsmArgs.reserve(Functions.size() * 2);
1298
1299 Triple::ArchType JumpTableArch = selectJumpTableArmEncoding(Functions, Arch);
1300
1301 for (unsigned I = 0; I != Functions.size(); ++I)
1302 createJumpTableEntry(AsmOS, ConstraintOS, JumpTableArch, AsmArgs,
1303 cast<Function>(Functions[I]->getGlobal()));
1304
1305 // Align the whole table by entry size.
1306 F->setAlignment(getJumpTableEntrySize());
1307 // Skip prologue.
1308 // Disabled on win32 due to https://llvm.org/bugs/show_bug.cgi?id=28641#c3.
1309 // Luckily, this function does not get any prologue even without the
1310 // attribute.
1311 if (OS != Triple::Win32)
1312 F->addFnAttr(Attribute::Naked);
1313 if (JumpTableArch == Triple::arm)
1314 F->addFnAttr("target-features", "-thumb-mode");
1315 if (JumpTableArch == Triple::thumb) {
1316 F->addFnAttr("target-features", "+thumb-mode");
1317 // Thumb jump table assembly needs Thumb2. The following attribute is added
1318 // by Clang for -march=armv7.
1319 F->addFnAttr("target-cpu", "cortex-a8");
1320 }
1321 // Make sure we don't emit .eh_frame for this function.
1322 F->addFnAttr(Attribute::NoUnwind);
1323
1324 BasicBlock *BB = BasicBlock::Create(M.getContext(), "entry", F);
1325 IRBuilder<> IRB(BB);
1326
1327 SmallVector<Type *, 16> ArgTypes;
1328 ArgTypes.reserve(AsmArgs.size());
1329 for (const auto &Arg : AsmArgs)
1330 ArgTypes.push_back(Arg->getType());
1331 InlineAsm *JumpTableAsm =
1332 InlineAsm::get(FunctionType::get(IRB.getVoidTy(), ArgTypes, false),
1333 AsmOS.str(), ConstraintOS.str(),
1334 /*hasSideEffects=*/true);
1335
1336 IRB.CreateCall(JumpTableAsm, AsmArgs);
1337 IRB.CreateUnreachable();
1338}
1339
1340/// Given a disjoint set of type identifiers and functions, build a jump table
1341/// for the functions, build the bit sets and lower the llvm.type.test calls.
1342void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
1343 ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1344 // Unlike the global bitset builder, the function bitset builder cannot
1345 // re-arrange functions in a particular order and base its calculations on the
1346 // layout of the functions' entry points, as we have no idea how large a
1347 // particular function will end up being (the size could even depend on what
1348 // this pass does!) Instead, we build a jump table, which is a block of code
1349 // consisting of one branch instruction for each of the functions in the bit
1350 // set that branches to the target function, and redirect any taken function
1351 // addresses to the corresponding jump table entry. In the object file's
1352 // symbol table, the symbols for the target functions also refer to the jump
1353 // table entries, so that addresses taken outside the module will pass any
1354 // verification done inside the module.
1355 //
1356 // In more concrete terms, suppose we have three functions f, g, h which are
1357 // of the same type, and a function foo that returns their addresses:
1358 //
1359 // f:
1360 // mov 0, %eax
1361 // ret
1362 //
1363 // g:
1364 // mov 1, %eax
1365 // ret
1366 //
1367 // h:
1368 // mov 2, %eax
1369 // ret
1370 //
1371 // foo:
1372 // mov f, %eax
1373 // mov g, %edx
1374 // mov h, %ecx
1375 // ret
1376 //
1377 // We output the jump table as module-level inline asm string. The end result
1378 // will (conceptually) look like this:
1379 //
1380 // f = .cfi.jumptable
1381 // g = .cfi.jumptable + 4
1382 // h = .cfi.jumptable + 8
1383 // .cfi.jumptable:
1384 // jmp f.cfi ; 5 bytes
1385 // int3 ; 1 byte
1386 // int3 ; 1 byte
1387 // int3 ; 1 byte
1388 // jmp g.cfi ; 5 bytes
1389 // int3 ; 1 byte
1390 // int3 ; 1 byte
1391 // int3 ; 1 byte
1392 // jmp h.cfi ; 5 bytes
1393 // int3 ; 1 byte
1394 // int3 ; 1 byte
1395 // int3 ; 1 byte
1396 //
1397 // f.cfi:
1398 // mov 0, %eax
1399 // ret
1400 //
1401 // g.cfi:
1402 // mov 1, %eax
1403 // ret
1404 //
1405 // h.cfi:
1406 // mov 2, %eax
1407 // ret
1408 //
1409 // foo:
1410 // mov f, %eax
1411 // mov g, %edx
1412 // mov h, %ecx
1413 // ret
1414 //
1415 // Because the addresses of f, g, h are evenly spaced at a power of 2, in the
1416 // normal case the check can be carried out using the same kind of simple
1417 // arithmetic that we normally use for globals.
1418
1419 // FIXME: find a better way to represent the jumptable in the IR.
1420 assert(!Functions.empty())((!Functions.empty()) ? static_cast<void> (0) : __assert_fail
("!Functions.empty()", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp"
, 1420, __PRETTY_FUNCTION__))
;
30
Assuming the condition is true
31
'?' condition is true
1421
1422 // Build a simple layout based on the regular layout of jump tables.
1423 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1424 unsigned EntrySize = getJumpTableEntrySize();
1425 for (unsigned I = 0; I != Functions.size(); ++I)
32
Assuming the condition is false
33
Loop condition is false. Execution continues on line 1429
1426 GlobalLayout[Functions[I]] = I * EntrySize;
1427
1428 Function *JumpTableFn =
1429 Function::Create(FunctionType::get(Type::getVoidTy(M.getContext()),
1430 /* IsVarArg */ false),
1431 GlobalValue::PrivateLinkage,
1432 M.getDataLayout().getProgramAddressSpace(),
1433 ".cfi.jumptable", &M);
1434 ArrayType *JumpTableType =
1435 ArrayType::get(getJumpTableEntryType(), Functions.size());
1436 auto JumpTable =
1437 ConstantExpr::getPointerCast(JumpTableFn, JumpTableType->getPointerTo(0));
1438
1439 lowerTypeTestCalls(TypeIds, JumpTable, GlobalLayout);
1440
1441 // Build aliases pointing to offsets into the jump table, and replace
1442 // references to the original functions with references to the aliases.
1443 for (unsigned I = 0; I != Functions.size(); ++I) {
34
Assuming the condition is true
35
Loop condition is true. Entering loop body
1444 Function *F = cast<Function>(Functions[I]->getGlobal());
1445 bool IsDefinition = Functions[I]->isDefinition();
1446
1447 Constant *CombinedGlobalElemPtr = ConstantExpr::getBitCast(
1448 ConstantExpr::getInBoundsGetElementPtr(
1449 JumpTableType, JumpTable,
1450 ArrayRef<Constant *>{ConstantInt::get(IntPtrTy, 0),
1451 ConstantInt::get(IntPtrTy, I)}),
1452 F->getType());
1453 if (Functions[I]->isExported()) {
36
Assuming the condition is true
37
Taking true branch
1454 if (IsDefinition) {
38
Assuming 'IsDefinition' is not equal to 0
39
Taking true branch
1455 ExportSummary->cfiFunctionDefs().insert(F->getName());
40
Called C++ object pointer is null
1456 } else {
1457 GlobalAlias *JtAlias = GlobalAlias::create(
1458 F->getValueType(), 0, GlobalValue::ExternalLinkage,
1459 F->getName() + ".cfi_jt", CombinedGlobalElemPtr, &M);
1460 JtAlias->setVisibility(GlobalValue::HiddenVisibility);
1461 ExportSummary->cfiFunctionDecls().insert(F->getName());
1462 }
1463 }
1464 if (!IsDefinition) {
1465 if (F->isWeakForLinker())
1466 replaceWeakDeclarationWithJumpTablePtr(F, CombinedGlobalElemPtr, IsDefinition);
1467 else
1468 replaceCfiUses(F, CombinedGlobalElemPtr, IsDefinition);
1469 } else {
1470 assert(F->getType()->getAddressSpace() == 0)((F->getType()->getAddressSpace() == 0) ? static_cast<
void> (0) : __assert_fail ("F->getType()->getAddressSpace() == 0"
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp"
, 1470, __PRETTY_FUNCTION__))
;
1471
1472 GlobalAlias *FAlias = GlobalAlias::create(
1473 F->getValueType(), 0, F->getLinkage(), "", CombinedGlobalElemPtr, &M);
1474 FAlias->setVisibility(F->getVisibility());
1475 FAlias->takeName(F);
1476 if (FAlias->hasName())
1477 F->setName(FAlias->getName() + ".cfi");
1478 replaceCfiUses(F, FAlias, IsDefinition);
1479 if (!F->hasLocalLinkage())
1480 F->setVisibility(GlobalVariable::HiddenVisibility);
1481 }
1482 }
1483
1484 createJumpTable(JumpTableFn, Functions);
1485}
1486
1487/// Assign a dummy layout using an incrementing counter, tag each function
1488/// with its index represented as metadata, and lower each type test to an
1489/// integer range comparison. During generation of the indirect function call
1490/// table in the backend, it will assign the given indexes.
1491/// Note: Dynamic linking is not supported, as the WebAssembly ABI has not yet
1492/// been finalized.
1493void LowerTypeTestsModule::buildBitSetsFromFunctionsWASM(
1494 ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Functions) {
1495 assert(!Functions.empty())((!Functions.empty()) ? static_cast<void> (0) : __assert_fail
("!Functions.empty()", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp"
, 1495, __PRETTY_FUNCTION__))
;
1496
1497 // Build consecutive monotonic integer ranges for each call target set
1498 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1499
1500 for (GlobalTypeMember *GTM : Functions) {
1501 Function *F = cast<Function>(GTM->getGlobal());
1502
1503 // Skip functions that are not address taken, to avoid bloating the table
1504 if (!F->hasAddressTaken())
1505 continue;
1506
1507 // Store metadata with the index for each function
1508 MDNode *MD = MDNode::get(F->getContext(),
1509 ArrayRef<Metadata *>(ConstantAsMetadata::get(
1510 ConstantInt::get(Int64Ty, IndirectIndex))));
1511 F->setMetadata("wasm.index", MD);
1512
1513 // Assign the counter value
1514 GlobalLayout[GTM] = IndirectIndex++;
1515 }
1516
1517 // The indirect function table index space starts at zero, so pass a NULL
1518 // pointer as the subtracted "jump table" offset.
1519 lowerTypeTestCalls(TypeIds, ConstantPointerNull::get(Int32PtrTy),
1520 GlobalLayout);
1521}
1522
1523void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
1524 ArrayRef<Metadata *> TypeIds, ArrayRef<GlobalTypeMember *> Globals,
1525 ArrayRef<ICallBranchFunnel *> ICallBranchFunnels) {
1526 DenseMap<Metadata *, uint64_t> TypeIdIndices;
1527 for (unsigned I = 0; I != TypeIds.size(); ++I)
16
Assuming the condition is false
17
Loop condition is false. Execution continues on line 1532
1528 TypeIdIndices[TypeIds[I]] = I;
1529
1530 // For each type identifier, build a set of indices that refer to members of
1531 // the type identifier.
1532 std::vector<std::set<uint64_t>> TypeMembers(TypeIds.size());
1533 unsigned GlobalIndex = 0;
1534 DenseMap<GlobalTypeMember *, uint64_t> GlobalIndices;
1535 for (GlobalTypeMember *GTM : Globals) {
18
Assuming '__begin1' is equal to '__end1'
1536 for (MDNode *Type : GTM->types()) {
1537 // Type = { offset, type identifier }
1538 auto I = TypeIdIndices.find(Type->getOperand(1));
1539 if (I != TypeIdIndices.end())
1540 TypeMembers[I->second].insert(GlobalIndex);
1541 }
1542 GlobalIndices[GTM] = GlobalIndex;
1543 GlobalIndex++;
1544 }
1545
1546 for (ICallBranchFunnel *JT : ICallBranchFunnels) {
19
Assuming '__begin1' is equal to '__end1'
1547 TypeMembers.emplace_back();
1548 std::set<uint64_t> &TMSet = TypeMembers.back();
1549 for (GlobalTypeMember *T : JT->targets())
1550 TMSet.insert(GlobalIndices[T]);
1551 }
1552
1553 // Order the sets of indices by size. The GlobalLayoutBuilder works best
1554 // when given small index sets first.
1555 llvm::stable_sort(TypeMembers, [](const std::set<uint64_t> &O1,
1556 const std::set<uint64_t> &O2) {
1557 return O1.size() < O2.size();
1558 });
1559
1560 // Create a GlobalLayoutBuilder and provide it with index sets as layout
1561 // fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
1562 // close together as possible.
1563 GlobalLayoutBuilder GLB(Globals.size());
1564 for (auto &&MemSet : TypeMembers)
1565 GLB.addFragment(MemSet);
1566
1567 // Build a vector of globals with the computed layout.
1568 bool IsGlobalSet =
1569 Globals.empty() || isa<GlobalVariable>(Globals[0]->getGlobal());
20
Assuming the condition is false
1570 std::vector<GlobalTypeMember *> OrderedGTMs(Globals.size());
1571 auto OGTMI = OrderedGTMs.begin();
1572 for (auto &&F : GLB.Fragments) {
1573 for (auto &&Offset : F) {
1574 if (IsGlobalSet != isa<GlobalVariable>(Globals[Offset]->getGlobal()))
1575 report_fatal_error("Type identifier may not contain both global "
1576 "variables and functions");
1577 *OGTMI++ = Globals[Offset];
1578 }
1579 }
1580
1581 // Build the bitsets from this disjoint set.
1582 if (IsGlobalSet)
21
Taking false branch
1583 buildBitSetsFromGlobalVariables(TypeIds, OrderedGTMs);
1584 else
1585 buildBitSetsFromFunctions(TypeIds, OrderedGTMs);
22
Calling 'LowerTypeTestsModule::buildBitSetsFromFunctions'
1586}
1587
1588/// Lower all type tests in this module.
1589LowerTypeTestsModule::LowerTypeTestsModule(
1590 Module &M, ModuleSummaryIndex *ExportSummary,
1591 const ModuleSummaryIndex *ImportSummary)
1592 : M(M), ExportSummary(ExportSummary), ImportSummary(ImportSummary) {
1593 assert(!(ExportSummary && ImportSummary))((!(ExportSummary && ImportSummary)) ? static_cast<
void> (0) : __assert_fail ("!(ExportSummary && ImportSummary)"
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp"
, 1593, __PRETTY_FUNCTION__))
;
1594 Triple TargetTriple(M.getTargetTriple());
1595 Arch = TargetTriple.getArch();
1596 OS = TargetTriple.getOS();
1597 ObjectFormat = TargetTriple.getObjectFormat();
1598}
1599
1600bool LowerTypeTestsModule::runForTesting(Module &M) {
1601 ModuleSummaryIndex Summary(/*HaveGVs=*/false);
1602
1603 // Handle the command-line summary arguments. This code is for testing
1604 // purposes only, so we handle errors directly.
1605 if (!ClReadSummary.empty()) {
1606 ExitOnError ExitOnErr("-lowertypetests-read-summary: " + ClReadSummary +
1607 ": ");
1608 auto ReadSummaryFile =
1609 ExitOnErr(errorOrToExpected(MemoryBuffer::getFile(ClReadSummary)));
1610
1611 yaml::Input In(ReadSummaryFile->getBuffer());
1612 In >> Summary;
1613 ExitOnErr(errorCodeToError(In.error()));
1614 }
1615
1616 bool Changed =
1617 LowerTypeTestsModule(
1618 M, ClSummaryAction == PassSummaryAction::Export ? &Summary : nullptr,
1619 ClSummaryAction == PassSummaryAction::Import ? &Summary : nullptr)
1620 .lower();
1621
1622 if (!ClWriteSummary.empty()) {
1623 ExitOnError ExitOnErr("-lowertypetests-write-summary: " + ClWriteSummary +
1624 ": ");
1625 std::error_code EC;
1626 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::F_Text);
1627 ExitOnErr(errorCodeToError(EC));
1628
1629 yaml::Output Out(OS);
1630 Out << Summary;
1631 }
1632
1633 return Changed;
1634}
1635
1636static bool isDirectCall(Use& U) {
1637 auto *Usr = dyn_cast<CallInst>(U.getUser());
1638 if (Usr) {
1639 CallSite CS(Usr);
1640 if (CS.isCallee(&U))
1641 return true;
1642 }
1643 return false;
1644}
1645
1646void LowerTypeTestsModule::replaceCfiUses(Function *Old, Value *New, bool IsDefinition) {
1647 SmallSetVector<Constant *, 4> Constants;
1648 auto UI = Old->use_begin(), E = Old->use_end();
1649 for (; UI != E;) {
1650 Use &U = *UI;
1651 ++UI;
1652
1653 // Skip block addresses
1654 if (isa<BlockAddress>(U.getUser()))
1655 continue;
1656
1657 // Skip direct calls to externally defined or non-dso_local functions
1658 if (isDirectCall(U) && (Old->isDSOLocal() || !IsDefinition))
1659 continue;
1660
1661 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
1662 // constant because they are uniqued.
1663 if (auto *C = dyn_cast<Constant>(U.getUser())) {
1664 if (!isa<GlobalValue>(C)) {
1665 // Save unique users to avoid processing operand replacement
1666 // more than once.
1667 Constants.insert(C);
1668 continue;
1669 }
1670 }
1671
1672 U.set(New);
1673 }
1674
1675 // Process operand replacement of saved constants.
1676 for (auto *C : Constants)
1677 C->handleOperandChange(Old, New);
1678}
1679
1680void LowerTypeTestsModule::replaceDirectCalls(Value *Old, Value *New) {
1681 auto UI = Old->use_begin(), E = Old->use_end();
1682 for (; UI != E;) {
1683 Use &U = *UI;
1684 ++UI;
1685
1686 if (!isDirectCall(U))
1687 continue;
1688
1689 U.set(New);
1690 }
1691}
1692
1693bool LowerTypeTestsModule::lower() {
1694 // If only some of the modules were split, we cannot correctly perform
1695 // this transformation. We already checked for the presense of type tests
1696 // with partially split modules during the thin link, and would have emitted
1697 // an error if any were found, so here we can simply return.
1698 if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
1
Assuming pointer value is null
1699 (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
2
Assuming the condition is false
1700 return false;
1701
1702 Function *TypeTestFunc =
1703 M.getFunction(Intrinsic::getName(Intrinsic::type_test));
1704 Function *ICallBranchFunnelFunc =
1705 M.getFunction(Intrinsic::getName(Intrinsic::icall_branch_funnel));
1706 if ((!TypeTestFunc || TypeTestFunc->use_empty()) &&
3
Assuming 'TypeTestFunc' is non-null
1707 (!ICallBranchFunnelFunc || ICallBranchFunnelFunc->use_empty()) &&
1708 !ExportSummary && !ImportSummary)
1709 return false;
1710
1711 if (ImportSummary) {
4
Taking false branch
1712 if (TypeTestFunc) {
1713 for (auto UI = TypeTestFunc->use_begin(), UE = TypeTestFunc->use_end();
1714 UI != UE;) {
1715 auto *CI = cast<CallInst>((*UI++).getUser());
1716 importTypeTest(CI);
1717 }
1718 }
1719
1720 if (ICallBranchFunnelFunc && !ICallBranchFunnelFunc->use_empty())
1721 report_fatal_error(
1722 "unexpected call to llvm.icall.branch.funnel during import phase");
1723
1724 SmallVector<Function *, 8> Defs;
1725 SmallVector<Function *, 8> Decls;
1726 for (auto &F : M) {
1727 // CFI functions are either external, or promoted. A local function may
1728 // have the same name, but it's not the one we are looking for.
1729 if (F.hasLocalLinkage())
1730 continue;
1731 if (ImportSummary->cfiFunctionDefs().count(F.getName()))
1732 Defs.push_back(&F);
1733 else if (ImportSummary->cfiFunctionDecls().count(F.getName()))
1734 Decls.push_back(&F);
1735 }
1736
1737 for (auto F : Defs)
1738 importFunction(F, /*isDefinition*/ true);
1739 for (auto F : Decls)
1740 importFunction(F, /*isDefinition*/ false);
1741
1742 return true;
1743 }
1744
1745 // Equivalence class set containing type identifiers and the globals that
1746 // reference them. This is used to partition the set of type identifiers in
1747 // the module into disjoint sets.
1748 using GlobalClassesTy = EquivalenceClasses<
1749 PointerUnion3<GlobalTypeMember *, Metadata *, ICallBranchFunnel *>>;
1750 GlobalClassesTy GlobalClasses;
1751
1752 // Verify the type metadata and build a few data structures to let us
1753 // efficiently enumerate the type identifiers associated with a global:
1754 // a list of GlobalTypeMembers (a GlobalObject stored alongside a vector
1755 // of associated type metadata) and a mapping from type identifiers to their
1756 // list of GlobalTypeMembers and last observed index in the list of globals.
1757 // The indices will be used later to deterministically order the list of type
1758 // identifiers.
1759 BumpPtrAllocator Alloc;
1760 struct TIInfo {
1761 unsigned UniqueId;
1762 std::vector<GlobalTypeMember *> RefGlobals;
1763 };
1764 DenseMap<Metadata *, TIInfo> TypeIdInfo;
1765 unsigned CurUniqueId = 0;
1766 SmallVector<MDNode *, 2> Types;
1767
1768 // Cross-DSO CFI emits jumptable entries for exported functions as well as
1769 // address taken functions in case they are address taken in other modules.
1770 const bool CrossDsoCfi = M.getModuleFlag("Cross-DSO CFI") != nullptr;
5
Assuming the condition is false
1771
1772 struct ExportedFunctionInfo {
1773 CfiFunctionLinkage Linkage;
1774 MDNode *FuncMD; // {name, linkage, type[, type...]}
1775 };
1776 DenseMap<StringRef, ExportedFunctionInfo> ExportedFunctions;
1777 if (ExportSummary) {
6
Taking false branch
1778 // A set of all functions that are address taken by a live global object.
1779 DenseSet<GlobalValue::GUID> AddressTaken;
1780 for (auto &I : *ExportSummary)
1781 for (auto &GVS : I.second.SummaryList)
1782 if (GVS->isLive())
1783 for (auto &Ref : GVS->refs())
1784 AddressTaken.insert(Ref.getGUID());
1785
1786 NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions");
1787 if (CfiFunctionsMD) {
1788 for (auto FuncMD : CfiFunctionsMD->operands()) {
1789 assert(FuncMD->getNumOperands() >= 2)((FuncMD->getNumOperands() >= 2) ? static_cast<void>
(0) : __assert_fail ("FuncMD->getNumOperands() >= 2", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp"
, 1789, __PRETTY_FUNCTION__))
;
1790 StringRef FunctionName =
1791 cast<MDString>(FuncMD->getOperand(0))->getString();
1792 CfiFunctionLinkage Linkage = static_cast<CfiFunctionLinkage>(
1793 cast<ConstantAsMetadata>(FuncMD->getOperand(1))
1794 ->getValue()
1795 ->getUniqueInteger()
1796 .getZExtValue());
1797 const GlobalValue::GUID GUID = GlobalValue::getGUID(
1798 GlobalValue::dropLLVMManglingEscape(FunctionName));
1799 // Do not emit jumptable entries for functions that are not-live and
1800 // have no live references (and are not exported with cross-DSO CFI.)
1801 if (!ExportSummary->isGUIDLive(GUID))
1802 continue;
1803 if (!AddressTaken.count(GUID)) {
1804 if (!CrossDsoCfi || Linkage != CFL_Definition)
1805 continue;
1806
1807 bool Exported = false;
1808 if (auto VI = ExportSummary->getValueInfo(GUID))
1809 for (auto &GVS : VI.getSummaryList())
1810 if (GVS->isLive() && !GlobalValue::isLocalLinkage(GVS->linkage()))
1811 Exported = true;
1812
1813 if (!Exported)
1814 continue;
1815 }
1816 auto P = ExportedFunctions.insert({FunctionName, {Linkage, FuncMD}});
1817 if (!P.second && P.first->second.Linkage != CFL_Definition)
1818 P.first->second = {Linkage, FuncMD};
1819 }
1820
1821 for (const auto &P : ExportedFunctions) {
1822 StringRef FunctionName = P.first;
1823 CfiFunctionLinkage Linkage = P.second.Linkage;
1824 MDNode *FuncMD = P.second.FuncMD;
1825 Function *F = M.getFunction(FunctionName);
1826 if (!F)
1827 F = Function::Create(
1828 FunctionType::get(Type::getVoidTy(M.getContext()), false),
1829 GlobalVariable::ExternalLinkage,
1830 M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
1831
1832 // If the function is available_externally, remove its definition so
1833 // that it is handled the same way as a declaration. Later we will try
1834 // to create an alias using this function's linkage, which will fail if
1835 // the linkage is available_externally. This will also result in us
1836 // following the code path below to replace the type metadata.
1837 if (F->hasAvailableExternallyLinkage()) {
1838 F->setLinkage(GlobalValue::ExternalLinkage);
1839 F->deleteBody();
1840 F->setComdat(nullptr);
1841 F->clearMetadata();
1842 }
1843
1844 // Update the linkage for extern_weak declarations when a definition
1845 // exists.
1846 if (Linkage == CFL_Definition && F->hasExternalWeakLinkage())
1847 F->setLinkage(GlobalValue::ExternalLinkage);
1848
1849 // If the function in the full LTO module is a declaration, replace its
1850 // type metadata with the type metadata we found in cfi.functions. That
1851 // metadata is presumed to be more accurate than the metadata attached
1852 // to the declaration.
1853 if (F->isDeclaration()) {
1854 if (Linkage == CFL_WeakDeclaration)
1855 F->setLinkage(GlobalValue::ExternalWeakLinkage);
1856
1857 F->eraseMetadata(LLVMContext::MD_type);
1858 for (unsigned I = 2; I < FuncMD->getNumOperands(); ++I)
1859 F->addMetadata(LLVMContext::MD_type,
1860 *cast<MDNode>(FuncMD->getOperand(I).get()));
1861 }
1862 }
1863 }
1864 }
1865
1866 DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
1867 for (GlobalObject &GO : M.global_objects()) {
1868 if (isa<GlobalVariable>(GO) && GO.isDeclarationForLinker())
1869 continue;
1870
1871 Types.clear();
1872 GO.getMetadata(LLVMContext::MD_type, Types);
1873
1874 bool IsDefinition = !GO.isDeclarationForLinker();
1875 bool IsExported = false;
1876 if (Function *F = dyn_cast<Function>(&GO)) {
1877 if (ExportedFunctions.count(F->getName())) {
1878 IsDefinition |= ExportedFunctions[F->getName()].Linkage == CFL_Definition;
1879 IsExported = true;
1880 // TODO: The logic here checks only that the function is address taken,
1881 // not that the address takers are live. This can be updated to check
1882 // their liveness and emit fewer jumptable entries once monolithic LTO
1883 // builds also emit summaries.
1884 } else if (!F->hasAddressTaken()) {
1885 if (!CrossDsoCfi || !IsDefinition || F->hasLocalLinkage())
1886 continue;
1887 }
1888 }
1889
1890 auto *GTM =
1891 GlobalTypeMember::create(Alloc, &GO, IsDefinition, IsExported, Types);
1892 GlobalTypeMembers[&GO] = GTM;
1893 for (MDNode *Type : Types) {
1894 verifyTypeMDNode(&GO, Type);
1895 auto &Info = TypeIdInfo[Type->getOperand(1)];
1896 Info.UniqueId = ++CurUniqueId;
1897 Info.RefGlobals.push_back(GTM);
1898 }
1899 }
1900
1901 auto AddTypeIdUse = [&](Metadata *TypeId) -> TypeIdUserInfo & {
1902 // Add the call site to the list of call sites for this type identifier. We
1903 // also use TypeIdUsers to keep track of whether we have seen this type
1904 // identifier before. If we have, we don't need to re-add the referenced
1905 // globals to the equivalence class.
1906 auto Ins = TypeIdUsers.insert({TypeId, {}});
1907 if (Ins.second) {
1908 // Add the type identifier to the equivalence class.
1909 GlobalClassesTy::iterator GCI = GlobalClasses.insert(TypeId);
1910 GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
1911
1912 // Add the referenced globals to the type identifier's equivalence class.
1913 for (GlobalTypeMember *GTM : TypeIdInfo[TypeId].RefGlobals)
1914 CurSet = GlobalClasses.unionSets(
1915 CurSet, GlobalClasses.findLeader(GlobalClasses.insert(GTM)));
1916 }
1917
1918 return Ins.first->second;
1919 };
1920
1921 if (TypeTestFunc) {
7
Taking true branch
1922 for (const Use &U : TypeTestFunc->uses()) {
1923 auto CI = cast<CallInst>(U.getUser());
1924
1925 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
1926 if (!TypeIdMDVal)
1927 report_fatal_error("Second argument of llvm.type.test must be metadata");
1928 auto TypeId = TypeIdMDVal->getMetadata();
1929 AddTypeIdUse(TypeId).CallSites.push_back(CI);
1930 }
1931 }
1932
1933 if (ICallBranchFunnelFunc) {
8
Assuming 'ICallBranchFunnelFunc' is null
9
Taking false branch
1934 for (const Use &U : ICallBranchFunnelFunc->uses()) {
1935 if (Arch != Triple::x86_64)
1936 report_fatal_error(
1937 "llvm.icall.branch.funnel not supported on this target");
1938
1939 auto CI = cast<CallInst>(U.getUser());
1940
1941 std::vector<GlobalTypeMember *> Targets;
1942 if (CI->getNumArgOperands() % 2 != 1)
1943 report_fatal_error("number of arguments should be odd");
1944
1945 GlobalClassesTy::member_iterator CurSet;
1946 for (unsigned I = 1; I != CI->getNumArgOperands(); I += 2) {
1947 int64_t Offset;
1948 auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
1949 CI->getOperand(I), Offset, M.getDataLayout()));
1950 if (!Base)
1951 report_fatal_error(
1952 "Expected branch funnel operand to be global value");
1953
1954 GlobalTypeMember *GTM = GlobalTypeMembers[Base];
1955 Targets.push_back(GTM);
1956 GlobalClassesTy::member_iterator NewSet =
1957 GlobalClasses.findLeader(GlobalClasses.insert(GTM));
1958 if (I == 1)
1959 CurSet = NewSet;
1960 else
1961 CurSet = GlobalClasses.unionSets(CurSet, NewSet);
1962 }
1963
1964 GlobalClasses.unionSets(
1965 CurSet, GlobalClasses.findLeader(
1966 GlobalClasses.insert(ICallBranchFunnel::create(
1967 Alloc, CI, Targets, ++CurUniqueId))));
1968 }
1969 }
1970
1971 if (ExportSummary) {
10
Taking false branch
1972 DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
1973 for (auto &P : TypeIdInfo) {
1974 if (auto *TypeId = dyn_cast<MDString>(P.first))
1975 MetadataByGUID[GlobalValue::getGUID(TypeId->getString())].push_back(
1976 TypeId);
1977 }
1978
1979 for (auto &P : *ExportSummary) {
1980 for (auto &S : P.second.SummaryList) {
1981 if (!ExportSummary->isGlobalValueLive(S.get()))
1982 continue;
1983 if (auto *FS = dyn_cast<FunctionSummary>(S->getBaseObject()))
1984 for (GlobalValue::GUID G : FS->type_tests())
1985 for (Metadata *MD : MetadataByGUID[G])
1986 AddTypeIdUse(MD).IsExported = true;
1987 }
1988 }
1989 }
1990
1991 if (GlobalClasses.empty())
11
Assuming the condition is false
12
Taking false branch
1992 return false;
1993
1994 // Build a list of disjoint sets ordered by their maximum global index for
1995 // determinism.
1996 std::vector<std::pair<GlobalClassesTy::iterator, unsigned>> Sets;
1997 for (GlobalClassesTy::iterator I = GlobalClasses.begin(),
13
Loop condition is false. Execution continues on line 2014
1998 E = GlobalClasses.end();
1999 I != E; ++I) {
2000 if (!I->isLeader())
2001 continue;
2002 ++NumTypeIdDisjointSets;
2003
2004 unsigned MaxUniqueId = 0;
2005 for (GlobalClassesTy::member_iterator MI = GlobalClasses.member_begin(I);
2006 MI != GlobalClasses.member_end(); ++MI) {
2007 if (auto *MD = MI->dyn_cast<Metadata *>())
2008 MaxUniqueId = std::max(MaxUniqueId, TypeIdInfo[MD].UniqueId);
2009 else if (auto *BF = MI->dyn_cast<ICallBranchFunnel *>())
2010 MaxUniqueId = std::max(MaxUniqueId, BF->UniqueId);
2011 }
2012 Sets.emplace_back(I, MaxUniqueId);
2013 }
2014 llvm::sort(Sets,
2015 [](const std::pair<GlobalClassesTy::iterator, unsigned> &S1,
2016 const std::pair<GlobalClassesTy::iterator, unsigned> &S2) {
2017 return S1.second < S2.second;
2018 });
2019
2020 // For each disjoint set we found...
2021 for (const auto &S : Sets) {
2022 // Build the list of type identifiers in this disjoint set.
2023 std::vector<Metadata *> TypeIds;
2024 std::vector<GlobalTypeMember *> Globals;
2025 std::vector<ICallBranchFunnel *> ICallBranchFunnels;
2026 for (GlobalClassesTy::member_iterator MI =
14
Loop condition is false. Execution continues on line 2039
2027 GlobalClasses.member_begin(S.first);
2028 MI != GlobalClasses.member_end(); ++MI) {
2029 if (MI->is<Metadata *>())
2030 TypeIds.push_back(MI->get<Metadata *>());
2031 else if (MI->is<GlobalTypeMember *>())
2032 Globals.push_back(MI->get<GlobalTypeMember *>());
2033 else
2034 ICallBranchFunnels.push_back(MI->get<ICallBranchFunnel *>());
2035 }
2036
2037 // Order type identifiers by unique ID for determinism. This ordering is
2038 // stable as there is a one-to-one mapping between metadata and unique IDs.
2039 llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
2040 return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
2041 });
2042
2043 // Same for the branch funnels.
2044 llvm::sort(ICallBranchFunnels,
2045 [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
2046 return F1->UniqueId < F2->UniqueId;
2047 });
2048
2049 // Build bitsets for this disjoint set.
2050 buildBitSetsFromDisjointSet(TypeIds, Globals, ICallBranchFunnels);
15
Calling 'LowerTypeTestsModule::buildBitSetsFromDisjointSet'
2051 }
2052
2053 allocateByteArrays();
2054
2055 // Parse alias data to replace stand-in function declarations for aliases
2056 // with an alias to the intended target.
2057 if (ExportSummary) {
2058 if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
2059 for (auto AliasMD : AliasesMD->operands()) {
2060 assert(AliasMD->getNumOperands() >= 4)((AliasMD->getNumOperands() >= 4) ? static_cast<void
> (0) : __assert_fail ("AliasMD->getNumOperands() >= 4"
, "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp"
, 2060, __PRETTY_FUNCTION__))
;
2061 StringRef AliasName =
2062 cast<MDString>(AliasMD->getOperand(0))->getString();
2063 StringRef Aliasee = cast<MDString>(AliasMD->getOperand(1))->getString();
2064
2065 if (!ExportedFunctions.count(Aliasee) ||
2066 ExportedFunctions[Aliasee].Linkage != CFL_Definition ||
2067 !M.getNamedAlias(Aliasee))
2068 continue;
2069
2070 GlobalValue::VisibilityTypes Visibility =
2071 static_cast<GlobalValue::VisibilityTypes>(
2072 cast<ConstantAsMetadata>(AliasMD->getOperand(2))
2073 ->getValue()
2074 ->getUniqueInteger()
2075 .getZExtValue());
2076 bool Weak =
2077 static_cast<bool>(cast<ConstantAsMetadata>(AliasMD->getOperand(3))
2078 ->getValue()
2079 ->getUniqueInteger()
2080 .getZExtValue());
2081
2082 auto *Alias = GlobalAlias::create("", M.getNamedAlias(Aliasee));
2083 Alias->setVisibility(Visibility);
2084 if (Weak)
2085 Alias->setLinkage(GlobalValue::WeakAnyLinkage);
2086
2087 if (auto *F = M.getFunction(AliasName)) {
2088 Alias->takeName(F);
2089 F->replaceAllUsesWith(Alias);
2090 F->eraseFromParent();
2091 } else {
2092 Alias->setName(AliasName);
2093 }
2094 }
2095 }
2096 }
2097
2098 // Emit .symver directives for exported functions, if they exist.
2099 if (ExportSummary) {
2100 if (NamedMDNode *SymversMD = M.getNamedMetadata("symvers")) {
2101 for (auto Symver : SymversMD->operands()) {
2102 assert(Symver->getNumOperands() >= 2)((Symver->getNumOperands() >= 2) ? static_cast<void>
(0) : __assert_fail ("Symver->getNumOperands() >= 2", "/build/llvm-toolchain-snapshot-9~svn362543/lib/Transforms/IPO/LowerTypeTests.cpp"
, 2102, __PRETTY_FUNCTION__))
;
2103 StringRef SymbolName =
2104 cast<MDString>(Symver->getOperand(0))->getString();
2105 StringRef Alias = cast<MDString>(Symver->getOperand(1))->getString();
2106
2107 if (!ExportedFunctions.count(SymbolName))
2108 continue;
2109
2110 M.appendModuleInlineAsm(
2111 (llvm::Twine(".symver ") + SymbolName + ", " + Alias).str());
2112 }
2113 }
2114 }
2115
2116 return true;
2117}
2118
2119PreservedAnalyses LowerTypeTestsPass::run(Module &M,
2120 ModuleAnalysisManager &AM) {
2121 bool Changed = LowerTypeTestsModule(M, ExportSummary, ImportSummary).lower();
2122 if (!Changed)
2123 return PreservedAnalyses::all();
2124 return PreservedAnalyses::none();
2125}