LLVM 22.0.0git
LowerTypeTests.cpp
Go to the documentation of this file.
1//===- LowerTypeTests.cpp - type metadata lowering pass -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass lowers type metadata and calls to the llvm.type.test intrinsic.
10// It also ensures that globals are properly laid out for the
11// llvm.icall.branch.funnel intrinsic.
12// See http://llvm.org/docs/TypeMetadata.html for more information.
13//
14//===----------------------------------------------------------------------===//
15
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SetVector.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
33#include "llvm/IR/Attributes.h"
34#include "llvm/IR/BasicBlock.h"
35#include "llvm/IR/Constant.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/GlobalAlias.h"
42#include "llvm/IR/GlobalValue.h"
44#include "llvm/IR/IRBuilder.h"
45#include "llvm/IR/InlineAsm.h"
46#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Intrinsics.h"
50#include "llvm/IR/LLVMContext.h"
51#include "llvm/IR/Metadata.h"
52#include "llvm/IR/Module.h"
55#include "llvm/IR/Operator.h"
56#include "llvm/IR/PassManager.h"
58#include "llvm/IR/Type.h"
59#include "llvm/IR/Use.h"
60#include "llvm/IR/User.h"
61#include "llvm/IR/Value.h"
65#include "llvm/Support/Debug.h"
66#include "llvm/Support/Error.h"
75#include "llvm/Transforms/IPO.h"
78#include <algorithm>
79#include <cassert>
80#include <cstdint>
81#include <set>
82#include <string>
83#include <system_error>
84#include <utility>
85#include <vector>
86
87using namespace llvm;
88using namespace lowertypetests;
89
90#define DEBUG_TYPE "lowertypetests"
91
92STATISTIC(ByteArraySizeBits, "Byte array size in bits");
93STATISTIC(ByteArraySizeBytes, "Byte array size in bytes");
94STATISTIC(NumByteArraysCreated, "Number of byte arrays created");
95STATISTIC(NumTypeTestCallsLowered, "Number of type test calls lowered");
96STATISTIC(NumTypeIdDisjointSets, "Number of disjoint sets of type identifiers");
97
99 "lowertypetests-avoid-reuse",
100 cl::desc("Try to avoid reuse of byte array addresses using aliases"),
101 cl::Hidden, cl::init(true));
102
104 "lowertypetests-summary-action",
105 cl::desc("What to do with the summary when running this pass"),
106 cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"),
108 "Import typeid resolutions from summary and globals"),
110 "Export typeid resolutions to summary and globals")),
111 cl::Hidden);
112
114 "lowertypetests-read-summary",
115 cl::desc("Read summary from given YAML file before running pass"),
116 cl::Hidden);
117
119 "lowertypetests-write-summary",
120 cl::desc("Write summary to given YAML file after running pass"),
121 cl::Hidden);
122
124 ClDropTypeTests("lowertypetests-drop-type-tests",
125 cl::desc("Simply drop type test sequences"),
126 cl::values(clEnumValN(DropTestKind::None, "none",
127 "Do not drop any type tests"),
128 clEnumValN(DropTestKind::Assume, "assume",
129 "Drop type test assume sequences"),
130 clEnumValN(DropTestKind::All, "all",
131 "Drop all type test sequences")),
132 cl::Hidden, cl::init(DropTestKind::None));
133
135 if (Offset < ByteOffset)
136 return false;
137
138 if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0)
139 return false;
140
141 uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2;
142 if (BitOffset >= BitSize)
143 return false;
144
145 return Bits.count(BitSize - 1 - BitOffset);
146}
147
149 OS << "offset " << ByteOffset << " size " << BitSize << " align "
150 << (1 << AlignLog2);
151
152 if (isAllOnes()) {
153 OS << " all-ones\n";
154 return;
155 }
156
157 OS << " { ";
158 for (uint64_t B : Bits)
159 OS << B << ' ';
160 OS << "}\n";
161}
162
164 if (Min > Max)
165 Min = 0;
166
167 // Normalize each offset against the minimum observed offset, and compute
168 // the bitwise OR of each of the offsets. The number of trailing zeros
169 // in the mask gives us the log2 of the alignment of all offsets, which
170 // allows us to compress the bitset by only storing one bit per aligned
171 // address.
172 uint64_t Mask = 0;
173 for (uint64_t &Offset : Offsets) {
174 Offset -= Min;
175 Mask |= Offset;
176 }
177
178 BitSetInfo BSI;
179 BSI.ByteOffset = Min;
180
181 BSI.AlignLog2 = 0;
182 if (Mask != 0)
183 BSI.AlignLog2 = llvm::countr_zero(Mask);
184
185 // Build the compressed bitset while normalizing the offsets against the
186 // computed alignment.
187 BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1;
188 for (uint64_t Offset : Offsets) {
189 Offset >>= BSI.AlignLog2;
190 // We invert the order of bits when adding them to the bitset. This is
191 // because the offset that we test against is computed by subtracting the
192 // address that we are testing from the global's address, which means that
193 // the offset increases as the tested address decreases.
194 BSI.Bits.insert(BSI.BitSize - 1 - Offset);
195 }
196
197 return BSI;
198}
199
200void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) {
201 // Create a new fragment to hold the layout for F.
202 Fragments.emplace_back();
203 std::vector<uint64_t> &Fragment = Fragments.back();
204 uint64_t FragmentIndex = Fragments.size() - 1;
205
206 for (auto ObjIndex : F) {
207 uint64_t OldFragmentIndex = FragmentMap[ObjIndex];
208 if (OldFragmentIndex == 0) {
209 // We haven't seen this object index before, so just add it to the current
210 // fragment.
211 Fragment.push_back(ObjIndex);
212 } else {
213 // This index belongs to an existing fragment. Copy the elements of the
214 // old fragment into this one and clear the old fragment. We don't update
215 // the fragment map just yet, this ensures that any further references to
216 // indices from the old fragment in this fragment do not insert any more
217 // indices.
218 std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex];
219 llvm::append_range(Fragment, OldFragment);
220 OldFragment.clear();
221 }
222 }
223
224 // Update the fragment map to point our object indices to this fragment.
225 for (uint64_t ObjIndex : Fragment)
226 FragmentMap[ObjIndex] = FragmentIndex;
227}
228
229void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits,
230 uint64_t BitSize, uint64_t &AllocByteOffset,
231 uint8_t &AllocMask) {
232 // Find the smallest current allocation.
233 unsigned Bit = 0;
234 for (unsigned I = 1; I != BitsPerByte; ++I)
235 if (BitAllocs[I] < BitAllocs[Bit])
236 Bit = I;
237
238 AllocByteOffset = BitAllocs[Bit];
239
240 // Add our size to it.
241 unsigned ReqSize = AllocByteOffset + BitSize;
242 BitAllocs[Bit] = ReqSize;
243 if (Bytes.size() < ReqSize)
244 Bytes.resize(ReqSize);
245
246 // Set our bits.
247 AllocMask = 1 << Bit;
248 for (uint64_t B : Bits)
249 Bytes[AllocByteOffset + B] |= AllocMask;
250}
251
253 if (F->isDeclarationForLinker())
254 return false;
256 F->getParent()->getModuleFlag("CFI Canonical Jump Tables"));
257 if (!CI || !CI->isZero())
258 return true;
259 return F->hasFnAttribute("cfi-canonical-jump-table");
260}
261
262namespace {
263
264struct ByteArrayInfo {
265 std::set<uint64_t> Bits;
266 uint64_t BitSize;
267 GlobalVariable *ByteArray;
268 GlobalVariable *MaskGlobal;
269 uint8_t *MaskPtr = nullptr;
270};
271
272/// A POD-like structure that we use to store a global reference together with
273/// its metadata types. In this pass we frequently need to query the set of
274/// metadata types referenced by a global, which at the IR level is an expensive
275/// operation involving a map lookup; this data structure helps to reduce the
276/// number of times we need to do this lookup.
277class GlobalTypeMember final : TrailingObjects<GlobalTypeMember, MDNode *> {
278 friend TrailingObjects;
279
280 GlobalObject *GO;
281 size_t NTypes;
282
283 // For functions: true if the jump table is canonical. This essentially means
284 // whether the canonical address (i.e. the symbol table entry) of the function
285 // is provided by the local jump table. This is normally the same as whether
286 // the function is defined locally, but if canonical jump tables are disabled
287 // by the user then the jump table never provides a canonical definition.
288 bool IsJumpTableCanonical;
289
290 // For functions: true if this function is either defined or used in a thinlto
291 // module and its jumptable entry needs to be exported to thinlto backends.
292 bool IsExported;
293
294public:
295 static GlobalTypeMember *create(BumpPtrAllocator &Alloc, GlobalObject *GO,
296 bool IsJumpTableCanonical, bool IsExported,
297 ArrayRef<MDNode *> Types) {
298 auto *GTM = static_cast<GlobalTypeMember *>(Alloc.Allocate(
299 totalSizeToAlloc<MDNode *>(Types.size()), alignof(GlobalTypeMember)));
300 GTM->GO = GO;
301 GTM->NTypes = Types.size();
302 GTM->IsJumpTableCanonical = IsJumpTableCanonical;
303 GTM->IsExported = IsExported;
304 llvm::copy(Types, GTM->getTrailingObjects());
305 return GTM;
306 }
307
308 GlobalObject *getGlobal() const {
309 return GO;
310 }
311
312 bool isJumpTableCanonical() const {
313 return IsJumpTableCanonical;
314 }
315
316 bool isExported() const {
317 return IsExported;
318 }
319
320 ArrayRef<MDNode *> types() const { return getTrailingObjects(NTypes); }
321};
322
323struct ICallBranchFunnel final
324 : TrailingObjects<ICallBranchFunnel, GlobalTypeMember *> {
325 static ICallBranchFunnel *create(BumpPtrAllocator &Alloc, CallInst *CI,
327 unsigned UniqueId) {
328 auto *Call = static_cast<ICallBranchFunnel *>(
329 Alloc.Allocate(totalSizeToAlloc<GlobalTypeMember *>(Targets.size()),
330 alignof(ICallBranchFunnel)));
331 Call->CI = CI;
332 Call->UniqueId = UniqueId;
333 Call->NTargets = Targets.size();
334 llvm::copy(Targets, Call->getTrailingObjects());
335 return Call;
336 }
337
338 CallInst *CI;
339 ArrayRef<GlobalTypeMember *> targets() const {
340 return getTrailingObjects(NTargets);
341 }
342
343 unsigned UniqueId;
344
345private:
346 size_t NTargets;
347};
348
349struct ScopedSaveAliaseesAndUsed {
350 Module &M;
352 std::vector<std::pair<GlobalAlias *, Function *>> FunctionAliases;
353 std::vector<std::pair<GlobalIFunc *, Function *>> ResolverIFuncs;
354
355 // This function only removes functions from llvm.used and llvm.compiler.used.
356 // We cannot remove global variables because they need to follow RAUW, as
357 // they may be deleted by buildBitSetsFromGlobalVariables.
358 void collectAndEraseUsedFunctions(Module &M,
359 SmallVectorImpl<GlobalValue *> &Vec,
360 bool CompilerUsed) {
361 auto *GV = collectUsedGlobalVariables(M, Vec, CompilerUsed);
362 if (!GV)
363 return;
364 // There's no API to only remove certain array elements from
365 // llvm.used/llvm.compiler.used, so we remove all of them and add back only
366 // the non-functions.
367 GV->eraseFromParent();
368 auto NonFuncBegin =
369 std::stable_partition(Vec.begin(), Vec.end(), [](GlobalValue *GV) {
370 return isa<Function>(GV);
371 });
372 if (CompilerUsed)
373 appendToCompilerUsed(M, {NonFuncBegin, Vec.end()});
374 else
375 appendToUsed(M, {NonFuncBegin, Vec.end()});
376 Vec.resize(NonFuncBegin - Vec.begin());
377 }
378
379 ScopedSaveAliaseesAndUsed(Module &M) : M(M) {
380 // The users of this class want to replace all function references except
381 // for aliases and llvm.used/llvm.compiler.used with references to a jump
382 // table. We avoid replacing aliases in order to avoid introducing a double
383 // indirection (or an alias pointing to a declaration in ThinLTO mode), and
384 // we avoid replacing llvm.used/llvm.compiler.used because these global
385 // variables describe properties of the global, not the jump table (besides,
386 // offseted references to the jump table in llvm.used are invalid).
387 // Unfortunately, LLVM doesn't have a "RAUW except for these (possibly
388 // indirect) users", so what we do is save the list of globals referenced by
389 // llvm.used/llvm.compiler.used and aliases, erase the used lists, let RAUW
390 // replace the aliasees and then set them back to their original values at
391 // the end.
392 collectAndEraseUsedFunctions(M, Used, false);
393 collectAndEraseUsedFunctions(M, CompilerUsed, true);
394
395 for (auto &GA : M.aliases()) {
396 // FIXME: This should look past all aliases not just interposable ones,
397 // see discussion on D65118.
398 if (auto *F = dyn_cast<Function>(GA.getAliasee()->stripPointerCasts()))
399 FunctionAliases.push_back({&GA, F});
400 }
401
402 for (auto &GI : M.ifuncs())
403 if (auto *F = dyn_cast<Function>(GI.getResolver()->stripPointerCasts()))
404 ResolverIFuncs.push_back({&GI, F});
405 }
406
407 ~ScopedSaveAliaseesAndUsed() {
408 appendToUsed(M, Used);
409 appendToCompilerUsed(M, CompilerUsed);
410
411 for (auto P : FunctionAliases)
412 P.first->setAliasee(P.second);
413
414 for (auto P : ResolverIFuncs) {
415 // This does not preserve pointer casts that may have been stripped by the
416 // constructor, but the resolver's type is different from that of the
417 // ifunc anyway.
418 P.first->setResolver(P.second);
419 }
420 }
421};
422
423class LowerTypeTestsModule {
424 Module &M;
425
426 ModuleSummaryIndex *ExportSummary;
427 const ModuleSummaryIndex *ImportSummary;
428 // Set when the client has invoked this to simply drop all type test assume
429 // sequences.
430 DropTestKind DropTypeTests;
431
432 Triple::ArchType Arch;
434 Triple::ObjectFormatType ObjectFormat;
435
436 // Determines which kind of Thumb jump table we generate. If arch is
437 // either 'arm' or 'thumb' we need to find this out, because
438 // selectJumpTableArmEncoding may decide to use Thumb in either case.
439 bool CanUseArmJumpTable = false, CanUseThumbBWJumpTable = false;
440
441 // Cache variable used by hasBranchTargetEnforcement().
442 int HasBranchTargetEnforcement = -1;
443
444 IntegerType *Int1Ty = Type::getInt1Ty(M.getContext());
445 IntegerType *Int8Ty = Type::getInt8Ty(M.getContext());
446 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
447 ArrayType *Int8Arr0Ty = ArrayType::get(Type::getInt8Ty(M.getContext()), 0);
448 IntegerType *Int32Ty = Type::getInt32Ty(M.getContext());
449 IntegerType *Int64Ty = Type::getInt64Ty(M.getContext());
450 IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(M.getContext(), 0);
451
452 // Indirect function call index assignment counter for WebAssembly
453 uint64_t IndirectIndex = 1;
454
455 // Mapping from type identifiers to the call sites that test them, as well as
456 // whether the type identifier needs to be exported to ThinLTO backends as
457 // part of the regular LTO phase of the ThinLTO pipeline (see exportTypeId).
458 struct TypeIdUserInfo {
459 std::vector<CallInst *> CallSites;
460 bool IsExported = false;
461 };
462 DenseMap<Metadata *, TypeIdUserInfo> TypeIdUsers;
463
464 /// This structure describes how to lower type tests for a particular type
465 /// identifier. It is either built directly from the global analysis (during
466 /// regular LTO or the regular LTO phase of ThinLTO), or indirectly using type
467 /// identifier summaries and external symbol references (in ThinLTO backends).
468 struct TypeIdLowering {
470
471 /// All except Unsat: the address of the last element within the combined
472 /// global.
473 Constant *OffsetedGlobal;
474
475 /// ByteArray, Inline, AllOnes: log2 of the required global alignment
476 /// relative to the start address.
477 Constant *AlignLog2;
478
479 /// ByteArray, Inline, AllOnes: one less than the size of the memory region
480 /// covering members of this type identifier as a multiple of 2^AlignLog2.
481 Constant *SizeM1;
482
483 /// ByteArray: the byte array to test the address against.
484 Constant *TheByteArray;
485
486 /// ByteArray: the bit mask to apply to bytes loaded from the byte array.
487 Constant *BitMask;
488
489 /// Inline: the bit mask to test the address against.
490 Constant *InlineBits;
491 };
492
493 std::vector<ByteArrayInfo> ByteArrayInfos;
494
495 Function *WeakInitializerFn = nullptr;
496
497 GlobalVariable *GlobalAnnotation;
498 DenseSet<Value *> FunctionAnnotations;
499
500 bool shouldExportConstantsAsAbsoluteSymbols();
501 uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
502 TypeIdLowering importTypeId(StringRef TypeId);
503 void importTypeTest(CallInst *CI);
504 void importFunction(Function *F, bool isJumpTableCanonical);
505
506 ByteArrayInfo *createByteArray(const BitSetInfo &BSI);
507 void allocateByteArrays();
508 Value *createBitSetTest(IRBuilder<> &B, const TypeIdLowering &TIL,
509 Value *BitOffset);
510 void lowerTypeTestCalls(
511 ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
512 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
513 Value *lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
514 const TypeIdLowering &TIL);
515
516 void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> TypeIds,
519 selectJumpTableArmEncoding(ArrayRef<GlobalTypeMember *> Functions);
520 bool hasBranchTargetEnforcement();
521 unsigned getJumpTableEntrySize(Triple::ArchType JumpTableArch);
522 InlineAsm *createJumpTableEntryAsm(Triple::ArchType JumpTableArch);
523 void verifyTypeMDNode(GlobalObject *GO, MDNode *Type);
524 void buildBitSetsFromFunctions(ArrayRef<Metadata *> TypeIds,
526 void buildBitSetsFromFunctionsNative(ArrayRef<Metadata *> TypeIds,
528 void buildBitSetsFromFunctionsWASM(ArrayRef<Metadata *> TypeIds,
530 void
531 buildBitSetsFromDisjointSet(ArrayRef<Metadata *> TypeIds,
533 ArrayRef<ICallBranchFunnel *> ICallBranchFunnels);
534
535 void replaceWeakDeclarationWithJumpTablePtr(Function *F, Constant *JT,
536 bool IsJumpTableCanonical);
537 void moveInitializerToModuleConstructor(GlobalVariable *GV);
538 void findGlobalVariableUsersOf(Constant *C,
539 SmallSetVector<GlobalVariable *, 8> &Out);
540
541 void createJumpTable(Function *F, ArrayRef<GlobalTypeMember *> Functions,
542 Triple::ArchType JumpTableArch);
543
544 /// replaceCfiUses - Go through the uses list for this definition
545 /// and make each use point to "V" instead of "this" when the use is outside
546 /// the block. 'This's use list is expected to have at least one element.
547 /// Unlike replaceAllUsesWith this function skips blockaddr and direct call
548 /// uses.
549 void replaceCfiUses(Function *Old, Value *New, bool IsJumpTableCanonical);
550
551 /// replaceDirectCalls - Go through the uses list for this definition and
552 /// replace each use, which is a direct function call.
553 void replaceDirectCalls(Value *Old, Value *New);
554
555 bool isFunctionAnnotation(Value *V) const {
556 return FunctionAnnotations.contains(V);
557 }
558
559 void maybeReplaceComdat(Function *F, StringRef OriginalName);
560
561public:
562 LowerTypeTestsModule(Module &M, ModuleAnalysisManager &AM,
563 ModuleSummaryIndex *ExportSummary,
564 const ModuleSummaryIndex *ImportSummary,
565 DropTestKind DropTypeTests);
566
567 bool lower();
568
569 // Lower the module using the action and summary passed as command line
570 // arguments. For testing purposes only.
571 static bool runForTesting(Module &M, ModuleAnalysisManager &AM);
572};
573} // end anonymous namespace
574
575/// Build a bit set for list of offsets.
577 // Compute the byte offset of each address associated with this type
578 // identifier.
579 return BitSetBuilder(Offsets).build();
580}
581
582/// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in
583/// Bits. This pattern matches to the bt instruction on x86.
585 Value *BitOffset) {
586 auto BitsType = cast<IntegerType>(Bits->getType());
587 unsigned BitWidth = BitsType->getBitWidth();
588
589 BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType);
590 Value *BitIndex =
591 B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1));
592 Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
593 Value *MaskedBits = B.CreateAnd(Bits, BitMask);
594 return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
595}
596
597ByteArrayInfo *LowerTypeTestsModule::createByteArray(const BitSetInfo &BSI) {
598 // Create globals to stand in for byte arrays and masks. These never actually
599 // get initialized, we RAUW and erase them later in allocateByteArrays() once
600 // we know the offset and mask to use.
601 auto ByteArrayGlobal = new GlobalVariable(
602 M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
603 auto MaskGlobal = new GlobalVariable(M, Int8Ty, /*isConstant=*/true,
605
606 ByteArrayInfos.emplace_back();
607 ByteArrayInfo *BAI = &ByteArrayInfos.back();
608
609 BAI->Bits = BSI.Bits;
610 BAI->BitSize = BSI.BitSize;
611 BAI->ByteArray = ByteArrayGlobal;
612 BAI->MaskGlobal = MaskGlobal;
613 return BAI;
614}
615
616void LowerTypeTestsModule::allocateByteArrays() {
617 llvm::stable_sort(ByteArrayInfos,
618 [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
619 return BAI1.BitSize > BAI2.BitSize;
620 });
621
622 std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
623
625 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
626 ByteArrayInfo *BAI = &ByteArrayInfos[I];
627
628 uint8_t Mask;
629 BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
630
631 BAI->MaskGlobal->replaceAllUsesWith(
632 ConstantExpr::getIntToPtr(ConstantInt::get(Int8Ty, Mask), PtrTy));
633 BAI->MaskGlobal->eraseFromParent();
634 if (BAI->MaskPtr)
635 *BAI->MaskPtr = Mask;
636 }
637
638 Constant *ByteArrayConst = ConstantDataArray::get(M.getContext(), BAB.Bytes);
639 auto ByteArray =
640 new GlobalVariable(M, ByteArrayConst->getType(), /*isConstant=*/true,
641 GlobalValue::PrivateLinkage, ByteArrayConst);
642
643 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
644 ByteArrayInfo *BAI = &ByteArrayInfos[I];
645
646 Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0),
647 ConstantInt::get(IntPtrTy, ByteArrayOffsets[I])};
649 ByteArrayConst->getType(), ByteArray, Idxs);
650
651 // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures
652 // that the pc-relative displacement is folded into the lea instead of the
653 // test instruction getting another displacement.
654 GlobalAlias *Alias = GlobalAlias::create(
655 Int8Ty, 0, GlobalValue::PrivateLinkage, "bits", GEP, &M);
656 BAI->ByteArray->replaceAllUsesWith(Alias);
657 BAI->ByteArray->eraseFromParent();
658 }
659
660 ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] +
661 BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] +
662 BAB.BitAllocs[6] + BAB.BitAllocs[7];
663 ByteArraySizeBytes = BAB.Bytes.size();
664}
665
666/// Build a test that bit BitOffset is set in the type identifier that was
667/// lowered to TIL, which must be either an Inline or a ByteArray.
668Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
669 const TypeIdLowering &TIL,
670 Value *BitOffset) {
671 if (TIL.TheKind == TypeTestResolution::Inline) {
672 // If the bit set is sufficiently small, we can avoid a load by bit testing
673 // a constant.
674 return createMaskedBitTest(B, TIL.InlineBits, BitOffset);
675 } else {
676 Constant *ByteArray = TIL.TheByteArray;
677 if (AvoidReuse && !ImportSummary) {
678 // Each use of the byte array uses a different alias. This makes the
679 // backend less likely to reuse previously computed byte array addresses,
680 // improving the security of the CFI mechanism based on this pass.
681 // This won't work when importing because TheByteArray is external.
683 "bits_use", ByteArray, &M);
684 }
685
686 Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
687 Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
688
689 Value *ByteAndMask =
690 B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
691 return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
692 }
693}
694
695static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL,
696 Value *V, uint64_t COffset) {
697 if (auto GV = dyn_cast<GlobalObject>(V)) {
699 GV->getMetadata(LLVMContext::MD_type, Types);
700 for (MDNode *Type : Types) {
701 if (Type->getOperand(1) != TypeId)
702 continue;
705 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
706 ->getZExtValue();
707 if (COffset == Offset)
708 return true;
709 }
710 return false;
711 }
712
713 if (auto GEP = dyn_cast<GEPOperator>(V)) {
714 APInt APOffset(DL.getIndexSizeInBits(0), 0);
715 bool Result = GEP->accumulateConstantOffset(DL, APOffset);
716 if (!Result)
717 return false;
718 COffset += APOffset.getZExtValue();
719 return isKnownTypeIdMember(TypeId, DL, GEP->getPointerOperand(), COffset);
720 }
721
722 if (auto Op = dyn_cast<Operator>(V)) {
723 if (Op->getOpcode() == Instruction::BitCast)
724 return isKnownTypeIdMember(TypeId, DL, Op->getOperand(0), COffset);
725
726 if (Op->getOpcode() == Instruction::Select)
727 return isKnownTypeIdMember(TypeId, DL, Op->getOperand(1), COffset) &&
728 isKnownTypeIdMember(TypeId, DL, Op->getOperand(2), COffset);
729 }
730
731 return false;
732}
733
734/// Lower a llvm.type.test call to its implementation. Returns the value to
735/// replace the call with.
736Value *LowerTypeTestsModule::lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
737 const TypeIdLowering &TIL) {
738 // Delay lowering if the resolution is currently unknown.
739 if (TIL.TheKind == TypeTestResolution::Unknown)
740 return nullptr;
741 if (TIL.TheKind == TypeTestResolution::Unsat)
742 return ConstantInt::getFalse(M.getContext());
743
744 Value *Ptr = CI->getArgOperand(0);
745 const DataLayout &DL = M.getDataLayout();
746 if (isKnownTypeIdMember(TypeId, DL, Ptr, 0))
747 return ConstantInt::getTrue(M.getContext());
748
749 BasicBlock *InitialBB = CI->getParent();
750
751 IRBuilder<> B(CI);
752
753 Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
754
755 Constant *OffsetedGlobalAsInt =
756 ConstantExpr::getPtrToInt(TIL.OffsetedGlobal, IntPtrTy);
757 if (TIL.TheKind == TypeTestResolution::Single)
758 return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
759
760 // Here we compute `last element - address`. The reason why we do this instead
761 // of computing `address - first element` is that it leads to a slightly
762 // shorter instruction sequence on x86. Because it doesn't matter how we do
763 // the subtraction on other architectures, we do so unconditionally.
764 Value *PtrOffset = B.CreateSub(OffsetedGlobalAsInt, PtrAsInt);
765
766 // We need to check that the offset both falls within our range and is
767 // suitably aligned. We can check both properties at the same time by
768 // performing a right rotate by log2(alignment) followed by an integer
769 // comparison against the bitset size. The rotate will move the lower
770 // order bits that need to be zero into the higher order bits of the
771 // result, causing the comparison to fail if they are nonzero. The rotate
772 // also conveniently gives us a bit offset to use during the load from
773 // the bitset.
774 Value *BitOffset = B.CreateIntrinsic(IntPtrTy, Intrinsic::fshr,
775 {PtrOffset, PtrOffset, TIL.AlignLog2});
776
777 Value *OffsetInRange = B.CreateICmpULE(BitOffset, TIL.SizeM1);
778
779 // If the bit set is all ones, testing against it is unnecessary.
780 if (TIL.TheKind == TypeTestResolution::AllOnes)
781 return OffsetInRange;
782
783 // See if the intrinsic is used in the following common pattern:
784 // br(llvm.type.test(...), thenbb, elsebb)
785 // where nothing happens between the type test and the br.
786 // If so, create slightly simpler IR.
787 if (CI->hasOneUse())
788 if (auto *Br = dyn_cast<BranchInst>(*CI->user_begin()))
789 if (CI->getNextNode() == Br) {
790 BasicBlock *Then = InitialBB->splitBasicBlock(CI->getIterator());
791 BasicBlock *Else = Br->getSuccessor(1);
792 BranchInst *NewBr = BranchInst::Create(Then, Else, OffsetInRange);
793 NewBr->setMetadata(LLVMContext::MD_prof,
794 Br->getMetadata(LLVMContext::MD_prof));
795 ReplaceInstWithInst(InitialBB->getTerminator(), NewBr);
796
797 // Update phis in Else resulting from InitialBB being split
798 for (auto &Phi : Else->phis())
799 Phi.addIncoming(Phi.getIncomingValueForBlock(Then), InitialBB);
800
801 IRBuilder<> ThenB(CI);
802 return createBitSetTest(ThenB, TIL, BitOffset);
803 }
804
805 IRBuilder<> ThenB(SplitBlockAndInsertIfThen(OffsetInRange, CI, false));
806
807 // Now that we know that the offset is in range and aligned, load the
808 // appropriate bit from the bitset.
809 Value *Bit = createBitSetTest(ThenB, TIL, BitOffset);
810
811 // The value we want is 0 if we came directly from the initial block
812 // (having failed the range or alignment checks), or the loaded bit if
813 // we came from the block in which we loaded it.
814 B.SetInsertPoint(CI);
815 PHINode *P = B.CreatePHI(Int1Ty, 2);
816 P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
817 P->addIncoming(Bit, ThenB.GetInsertBlock());
818 return P;
819}
820
821/// Given a disjoint set of type identifiers and globals, lay out the globals,
822/// build the bit sets and lower the llvm.type.test calls.
823void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
825 // Build a new global with the combined contents of the referenced globals.
826 // This global is a struct whose even-indexed elements contain the original
827 // contents of the referenced globals and whose odd-indexed elements contain
828 // any padding required to align the next element to the next power of 2 plus
829 // any additional padding required to meet its alignment requirements.
830 std::vector<Constant *> GlobalInits;
831 const DataLayout &DL = M.getDataLayout();
832 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
833 Align MaxAlign;
834 uint64_t CurOffset = 0;
835 uint64_t DesiredPadding = 0;
836 for (GlobalTypeMember *G : Globals) {
837 auto *GV = cast<GlobalVariable>(G->getGlobal());
838 Align Alignment =
839 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
840 MaxAlign = std::max(MaxAlign, Alignment);
841 uint64_t GVOffset = alignTo(CurOffset + DesiredPadding, Alignment);
842 GlobalLayout[G] = GVOffset;
843 if (GVOffset != 0) {
844 uint64_t Padding = GVOffset - CurOffset;
845 GlobalInits.push_back(
847 }
848
849 GlobalInits.push_back(GV->getInitializer());
850 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
851 CurOffset = GVOffset + InitSize;
852
853 // Compute the amount of padding that we'd like for the next element.
854 DesiredPadding = NextPowerOf2(InitSize - 1) - InitSize;
855
856 // Experiments of different caps with Chromium on both x64 and ARM64
857 // have shown that the 32-byte cap generates the smallest binary on
858 // both platforms while different caps yield similar performance.
859 // (see https://lists.llvm.org/pipermail/llvm-dev/2018-July/124694.html)
860 if (DesiredPadding > 32)
861 DesiredPadding = alignTo(InitSize, 32) - InitSize;
862 }
863
864 Constant *NewInit = ConstantStruct::getAnon(M.getContext(), GlobalInits);
865 auto *CombinedGlobal =
866 new GlobalVariable(M, NewInit->getType(), /*isConstant=*/true,
868 CombinedGlobal->setAlignment(MaxAlign);
869
870 StructType *NewTy = cast<StructType>(NewInit->getType());
871 lowerTypeTestCalls(TypeIds, CombinedGlobal, GlobalLayout);
872
873 // Build aliases pointing to offsets into the combined global for each
874 // global from which we built the combined global, and replace references
875 // to the original globals with references to the aliases.
876 for (unsigned I = 0; I != Globals.size(); ++I) {
877 GlobalVariable *GV = cast<GlobalVariable>(Globals[I]->getGlobal());
878
879 // Multiply by 2 to account for padding elements.
880 Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
881 ConstantInt::get(Int32Ty, I * 2)};
882 Constant *CombinedGlobalElemPtr = ConstantExpr::getInBoundsGetElementPtr(
883 NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs);
884 assert(GV->getType()->getAddressSpace() == 0);
885 GlobalAlias *GAlias =
886 GlobalAlias::create(NewTy->getElementType(I * 2), 0, GV->getLinkage(),
887 "", CombinedGlobalElemPtr, &M);
888 GAlias->setVisibility(GV->getVisibility());
889 GAlias->takeName(GV);
890 GV->replaceAllUsesWith(GAlias);
891 GV->eraseFromParent();
892 }
893}
894
895bool LowerTypeTestsModule::shouldExportConstantsAsAbsoluteSymbols() {
896 return (Arch == Triple::x86 || Arch == Triple::x86_64) &&
897 ObjectFormat == Triple::ELF;
898}
899
900/// Export the given type identifier so that ThinLTO backends may import it.
901/// Type identifiers are exported by adding coarse-grained information about how
902/// to test the type identifier to the summary, and creating symbols in the
903/// object file (aliases and absolute symbols) containing fine-grained
904/// information about the type identifier.
905///
906/// Returns a pointer to the location in which to store the bitmask, if
907/// applicable.
908uint8_t *LowerTypeTestsModule::exportTypeId(StringRef TypeId,
909 const TypeIdLowering &TIL) {
910 TypeTestResolution &TTRes =
911 ExportSummary->getOrInsertTypeIdSummary(TypeId).TTRes;
912 TTRes.TheKind = TIL.TheKind;
913
914 auto ExportGlobal = [&](StringRef Name, Constant *C) {
915 GlobalAlias *GA =
917 "__typeid_" + TypeId + "_" + Name, C, &M);
919 };
920
921 auto ExportConstant = [&](StringRef Name, uint64_t &Storage, Constant *C) {
922 if (shouldExportConstantsAsAbsoluteSymbols())
923 ExportGlobal(Name, ConstantExpr::getIntToPtr(C, PtrTy));
924 else
925 Storage = cast<ConstantInt>(C)->getZExtValue();
926 };
927
928 if (TIL.TheKind != TypeTestResolution::Unsat)
929 ExportGlobal("global_addr", TIL.OffsetedGlobal);
930
931 if (TIL.TheKind == TypeTestResolution::ByteArray ||
932 TIL.TheKind == TypeTestResolution::Inline ||
933 TIL.TheKind == TypeTestResolution::AllOnes) {
934 ExportConstant("align", TTRes.AlignLog2, TIL.AlignLog2);
935 ExportConstant("size_m1", TTRes.SizeM1, TIL.SizeM1);
936
937 uint64_t BitSize = cast<ConstantInt>(TIL.SizeM1)->getZExtValue() + 1;
938 if (TIL.TheKind == TypeTestResolution::Inline)
939 TTRes.SizeM1BitWidth = (BitSize <= 32) ? 5 : 6;
940 else
941 TTRes.SizeM1BitWidth = (BitSize <= 128) ? 7 : 32;
942 }
943
944 if (TIL.TheKind == TypeTestResolution::ByteArray) {
945 ExportGlobal("byte_array", TIL.TheByteArray);
946 if (shouldExportConstantsAsAbsoluteSymbols())
947 ExportGlobal("bit_mask", TIL.BitMask);
948 else
949 return &TTRes.BitMask;
950 }
951
952 if (TIL.TheKind == TypeTestResolution::Inline)
953 ExportConstant("inline_bits", TTRes.InlineBits, TIL.InlineBits);
954
955 return nullptr;
956}
957
958LowerTypeTestsModule::TypeIdLowering
959LowerTypeTestsModule::importTypeId(StringRef TypeId) {
960 const TypeIdSummary *TidSummary = ImportSummary->getTypeIdSummary(TypeId);
961 if (!TidSummary)
962 return {}; // Unsat: no globals match this type id.
963 const TypeTestResolution &TTRes = TidSummary->TTRes;
964
965 TypeIdLowering TIL;
966 TIL.TheKind = TTRes.TheKind;
967
968 auto ImportGlobal = [&](StringRef Name) {
969 // Give the global a type of length 0 so that it is not assumed not to alias
970 // with any other global.
971 GlobalVariable *GV = M.getOrInsertGlobal(
972 ("__typeid_" + TypeId + "_" + Name).str(), Int8Arr0Ty);
974 return GV;
975 };
976
977 auto ImportConstant = [&](StringRef Name, uint64_t Const, unsigned AbsWidth,
978 Type *Ty) {
979 if (!shouldExportConstantsAsAbsoluteSymbols()) {
980 Constant *C =
981 ConstantInt::get(isa<IntegerType>(Ty) ? Ty : Int64Ty, Const);
982 if (!isa<IntegerType>(Ty))
984 return C;
985 }
986
987 Constant *C = ImportGlobal(Name);
988 auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
989 if (isa<IntegerType>(Ty))
991 if (GV->getMetadata(LLVMContext::MD_absolute_symbol))
992 return C;
993
994 auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
995 auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
996 auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
997 GV->setMetadata(LLVMContext::MD_absolute_symbol,
998 MDNode::get(M.getContext(), {MinC, MaxC}));
999 };
1000 if (AbsWidth == IntPtrTy->getBitWidth())
1001 SetAbsRange(~0ull, ~0ull); // Full set.
1002 else
1003 SetAbsRange(0, 1ull << AbsWidth);
1004 return C;
1005 };
1006
1007 if (TIL.TheKind != TypeTestResolution::Unsat) {
1008 auto *GV = ImportGlobal("global_addr");
1009 // This is either a vtable (in .data.rel.ro) or a jump table (in .text).
1010 // Either way it's expected to be in the low 2 GiB, so set the small code
1011 // model.
1012 //
1013 // For .data.rel.ro, we currently place all such sections in the low 2 GiB
1014 // [1], and for .text the sections are expected to be in the low 2 GiB under
1015 // the small and medium code models [2] and this pass only supports those
1016 // code models (e.g. jump tables use jmp instead of movabs/jmp).
1017 //
1018 // [1]https://github.com/llvm/llvm-project/pull/137742
1019 // [2]https://maskray.me/blog/2023-05-14-relocation-overflow-and-code-models
1021 TIL.OffsetedGlobal = GV;
1022 }
1023
1024 if (TIL.TheKind == TypeTestResolution::ByteArray ||
1025 TIL.TheKind == TypeTestResolution::Inline ||
1026 TIL.TheKind == TypeTestResolution::AllOnes) {
1027 TIL.AlignLog2 = ImportConstant("align", TTRes.AlignLog2, 8, IntPtrTy);
1028 TIL.SizeM1 =
1029 ImportConstant("size_m1", TTRes.SizeM1, TTRes.SizeM1BitWidth, IntPtrTy);
1030 }
1031
1032 if (TIL.TheKind == TypeTestResolution::ByteArray) {
1033 TIL.TheByteArray = ImportGlobal("byte_array");
1034 TIL.BitMask = ImportConstant("bit_mask", TTRes.BitMask, 8, PtrTy);
1035 }
1036
1037 if (TIL.TheKind == TypeTestResolution::Inline)
1038 TIL.InlineBits = ImportConstant(
1039 "inline_bits", TTRes.InlineBits, 1 << TTRes.SizeM1BitWidth,
1040 TTRes.SizeM1BitWidth <= 5 ? Int32Ty : Int64Ty);
1041
1042 return TIL;
1043}
1044
1045void LowerTypeTestsModule::importTypeTest(CallInst *CI) {
1046 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
1047 if (!TypeIdMDVal)
1048 report_fatal_error("Second argument of llvm.type.test must be metadata");
1049
1050 auto TypeIdStr = dyn_cast<MDString>(TypeIdMDVal->getMetadata());
1051 // If this is a local unpromoted type, which doesn't have a metadata string,
1052 // treat as Unknown and delay lowering, so that we can still utilize it for
1053 // later optimizations.
1054 if (!TypeIdStr)
1055 return;
1056
1057 TypeIdLowering TIL = importTypeId(TypeIdStr->getString());
1058 Value *Lowered = lowerTypeTestCall(TypeIdStr, CI, TIL);
1059 if (Lowered) {
1060 CI->replaceAllUsesWith(Lowered);
1061 CI->eraseFromParent();
1062 }
1063}
1064
1065void LowerTypeTestsModule::maybeReplaceComdat(Function *F,
1066 StringRef OriginalName) {
1067 // For COFF we should also rename the comdat if this function also
1068 // happens to be the key function. Even if the comdat name changes, this
1069 // should still be fine since comdat and symbol resolution happens
1070 // before LTO, so all symbols which would prevail have been selected.
1071 if (F->hasComdat() && ObjectFormat == Triple::COFF &&
1072 F->getComdat()->getName() == OriginalName) {
1073 Comdat *OldComdat = F->getComdat();
1074 Comdat *NewComdat = M.getOrInsertComdat(F->getName());
1075 for (GlobalObject &GO : M.global_objects()) {
1076 if (GO.getComdat() == OldComdat)
1077 GO.setComdat(NewComdat);
1078 }
1079 }
1080}
1081
1082// ThinLTO backend: the function F has a jump table entry; update this module
1083// accordingly. isJumpTableCanonical describes the type of the jump table entry.
1084void LowerTypeTestsModule::importFunction(Function *F,
1085 bool isJumpTableCanonical) {
1086 assert(F->getType()->getAddressSpace() == 0);
1087
1088 GlobalValue::VisibilityTypes Visibility = F->getVisibility();
1089 std::string Name = std::string(F->getName());
1090
1091 if (F->isDeclarationForLinker() && isJumpTableCanonical) {
1092 // Non-dso_local functions may be overriden at run time,
1093 // don't short curcuit them
1094 if (F->isDSOLocal()) {
1095 Function *RealF = Function::Create(F->getFunctionType(),
1097 F->getAddressSpace(),
1098 Name + ".cfi", &M);
1100 replaceDirectCalls(F, RealF);
1101 }
1102 return;
1103 }
1104
1105 Function *FDecl;
1106 if (!isJumpTableCanonical) {
1107 // Either a declaration of an external function or a reference to a locally
1108 // defined jump table.
1109 FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1110 F->getAddressSpace(), Name + ".cfi_jt", &M);
1112 } else {
1113 F->setName(Name + ".cfi");
1114 maybeReplaceComdat(F, Name);
1115 FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1116 F->getAddressSpace(), Name, &M);
1117 FDecl->setVisibility(Visibility);
1118 Visibility = GlobalValue::HiddenVisibility;
1119
1120 // Update aliases pointing to this function to also include the ".cfi" suffix,
1121 // We expect the jump table entry to either point to the real function or an
1122 // alias. Redirect all other users to the jump table entry.
1123 for (auto &U : F->uses()) {
1124 if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
1125 std::string AliasName = A->getName().str() + ".cfi";
1126 Function *AliasDecl = Function::Create(
1127 F->getFunctionType(), GlobalValue::ExternalLinkage,
1128 F->getAddressSpace(), "", &M);
1129 AliasDecl->takeName(A);
1130 A->replaceAllUsesWith(AliasDecl);
1131 A->setName(AliasName);
1132 }
1133 }
1134 }
1135
1136 if (F->hasExternalWeakLinkage())
1137 replaceWeakDeclarationWithJumpTablePtr(F, FDecl, isJumpTableCanonical);
1138 else
1139 replaceCfiUses(F, FDecl, isJumpTableCanonical);
1140
1141 // Set visibility late because it's used in replaceCfiUses() to determine
1142 // whether uses need to be replaced.
1143 F->setVisibility(Visibility);
1144}
1145
1146static auto
1148 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1150 // Pre-populate the map with interesting type identifiers.
1151 for (Metadata *TypeId : TypeIds)
1152 OffsetsByTypeID[TypeId];
1153 for (const auto &[Mem, MemOff] : GlobalLayout) {
1154 for (MDNode *Type : Mem->types()) {
1155 auto It = OffsetsByTypeID.find(Type->getOperand(1));
1156 if (It == OffsetsByTypeID.end())
1157 continue;
1160 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
1161 ->getZExtValue();
1162 It->second.push_back(MemOff + Offset);
1163 }
1164 }
1165
1167 BitSets.reserve(TypeIds.size());
1168 for (Metadata *TypeId : TypeIds) {
1169 BitSets.emplace_back(TypeId, buildBitSet(OffsetsByTypeID[TypeId]));
1170 LLVM_DEBUG({
1171 if (auto MDS = dyn_cast<MDString>(TypeId))
1172 dbgs() << MDS->getString() << ": ";
1173 else
1174 dbgs() << "<unnamed>: ";
1175 BitSets.back().second.print(dbgs());
1176 });
1177 }
1178
1179 return BitSets;
1180}
1181
1182void LowerTypeTestsModule::lowerTypeTestCalls(
1183 ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
1184 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1185 // For each type identifier in this disjoint set...
1186 for (const auto &[TypeId, BSI] : buildBitSets(TypeIds, GlobalLayout)) {
1187 ByteArrayInfo *BAI = nullptr;
1188 TypeIdLowering TIL;
1189
1190 uint64_t GlobalOffset =
1191 BSI.ByteOffset + ((BSI.BitSize - 1) << BSI.AlignLog2);
1192 TIL.OffsetedGlobal = ConstantExpr::getGetElementPtr(
1193 Int8Ty, CombinedGlobalAddr, ConstantInt::get(IntPtrTy, GlobalOffset)),
1194 TIL.AlignLog2 = ConstantInt::get(IntPtrTy, BSI.AlignLog2);
1195 TIL.SizeM1 = ConstantInt::get(IntPtrTy, BSI.BitSize - 1);
1196 if (BSI.isAllOnes()) {
1197 TIL.TheKind = (BSI.BitSize == 1) ? TypeTestResolution::Single
1198 : TypeTestResolution::AllOnes;
1199 } else if (BSI.BitSize <= IntPtrTy->getBitWidth()) {
1200 TIL.TheKind = TypeTestResolution::Inline;
1201 uint64_t InlineBits = 0;
1202 for (auto Bit : BSI.Bits)
1203 InlineBits |= uint64_t(1) << Bit;
1204 if (InlineBits == 0)
1205 TIL.TheKind = TypeTestResolution::Unsat;
1206 else
1207 TIL.InlineBits = ConstantInt::get(
1208 (BSI.BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
1209 } else {
1210 TIL.TheKind = TypeTestResolution::ByteArray;
1211 ++NumByteArraysCreated;
1212 BAI = createByteArray(BSI);
1213 TIL.TheByteArray = BAI->ByteArray;
1214 TIL.BitMask = BAI->MaskGlobal;
1215 }
1216
1217 TypeIdUserInfo &TIUI = TypeIdUsers[TypeId];
1218
1219 if (TIUI.IsExported) {
1220 uint8_t *MaskPtr = exportTypeId(cast<MDString>(TypeId)->getString(), TIL);
1221 if (BAI)
1222 BAI->MaskPtr = MaskPtr;
1223 }
1224
1225 // Lower each call to llvm.type.test for this type identifier.
1226 for (CallInst *CI : TIUI.CallSites) {
1227 ++NumTypeTestCallsLowered;
1228 Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
1229 if (Lowered) {
1230 CI->replaceAllUsesWith(Lowered);
1231 CI->eraseFromParent();
1232 }
1233 }
1234 }
1235}
1236
1237void LowerTypeTestsModule::verifyTypeMDNode(GlobalObject *GO, MDNode *Type) {
1238 if (Type->getNumOperands() != 2)
1239 report_fatal_error("All operands of type metadata must have 2 elements");
1240
1241 if (GO->isThreadLocal())
1242 report_fatal_error("Bit set element may not be thread-local");
1243 if (isa<GlobalVariable>(GO) && GO->hasSection())
1245 "A member of a type identifier may not have an explicit section");
1246
1247 // FIXME: We previously checked that global var member of a type identifier
1248 // must be a definition, but the IR linker may leave type metadata on
1249 // declarations. We should restore this check after fixing PR31759.
1250
1251 auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Type->getOperand(0));
1252 if (!OffsetConstMD)
1253 report_fatal_error("Type offset must be a constant");
1254 auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
1255 if (!OffsetInt)
1256 report_fatal_error("Type offset must be an integer constant");
1257}
1258
1259static const unsigned kX86JumpTableEntrySize = 8;
1260static const unsigned kX86IBTJumpTableEntrySize = 16;
1261static const unsigned kARMJumpTableEntrySize = 4;
1262static const unsigned kARMBTIJumpTableEntrySize = 8;
1263static const unsigned kARMv6MJumpTableEntrySize = 16;
1264static const unsigned kRISCVJumpTableEntrySize = 8;
1265static const unsigned kLOONGARCH64JumpTableEntrySize = 8;
1266
1267bool LowerTypeTestsModule::hasBranchTargetEnforcement() {
1268 if (HasBranchTargetEnforcement == -1) {
1269 // First time this query has been called. Find out the answer by checking
1270 // the module flags.
1271 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1272 M.getModuleFlag("branch-target-enforcement")))
1273 HasBranchTargetEnforcement = !BTE->isZero();
1274 else
1275 HasBranchTargetEnforcement = 0;
1276 }
1277 return HasBranchTargetEnforcement;
1278}
1279
1280unsigned
1281LowerTypeTestsModule::getJumpTableEntrySize(Triple::ArchType JumpTableArch) {
1282 switch (JumpTableArch) {
1283 case Triple::x86:
1284 case Triple::x86_64:
1285 if (const auto *MD = mdconst::extract_or_null<ConstantInt>(
1286 M.getModuleFlag("cf-protection-branch")))
1287 if (MD->getZExtValue())
1290 case Triple::arm:
1292 case Triple::thumb:
1293 if (CanUseThumbBWJumpTable) {
1294 if (hasBranchTargetEnforcement())
1297 } else {
1299 }
1300 case Triple::aarch64:
1301 if (hasBranchTargetEnforcement())
1304 case Triple::riscv32:
1305 case Triple::riscv64:
1309 default:
1310 report_fatal_error("Unsupported architecture for jump tables");
1311 }
1312}
1313
1314// Create an inline asm constant representing a jump table entry for the target.
1315// This consists of an instruction sequence containing a relative branch to
1316// Dest.
1317InlineAsm *
1318LowerTypeTestsModule::createJumpTableEntryAsm(Triple::ArchType JumpTableArch) {
1319 std::string Asm;
1320 raw_string_ostream AsmOS(Asm);
1321
1322 if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64) {
1323 bool Endbr = false;
1324 if (const auto *MD = mdconst::extract_or_null<ConstantInt>(
1325 M.getModuleFlag("cf-protection-branch")))
1326 Endbr = !MD->isZero();
1327 if (Endbr)
1328 AsmOS << (JumpTableArch == Triple::x86 ? "endbr32\n" : "endbr64\n");
1329 AsmOS << "jmp ${0:c}@plt\n";
1330 if (Endbr)
1331 AsmOS << ".balign 16, 0xcc\n";
1332 else
1333 AsmOS << "int3\nint3\nint3\n";
1334 } else if (JumpTableArch == Triple::arm) {
1335 AsmOS << "b $0\n";
1336 } else if (JumpTableArch == Triple::aarch64) {
1337 if (hasBranchTargetEnforcement())
1338 AsmOS << "bti c\n";
1339 AsmOS << "b $0\n";
1340 } else if (JumpTableArch == Triple::thumb) {
1341 if (!CanUseThumbBWJumpTable) {
1342 // In Armv6-M, this sequence will generate a branch without corrupting
1343 // any registers. We use two stack words; in the second, we construct the
1344 // address we'll pop into pc, and the first is used to save and restore
1345 // r0 which we use as a temporary register.
1346 //
1347 // To support position-independent use cases, the offset of the target
1348 // function is stored as a relative offset (which will expand into an
1349 // R_ARM_REL32 relocation in ELF, and presumably the equivalent in other
1350 // object file types), and added to pc after we load it. (The alternative
1351 // B.W is automatically pc-relative.)
1352 //
1353 // There are five 16-bit Thumb instructions here, so the .balign 4 adds a
1354 // sixth halfword of padding, and then the offset consumes a further 4
1355 // bytes, for a total of 16, which is very convenient since entries in
1356 // this jump table need to have power-of-two size.
1357 AsmOS << "push {r0,r1}\n"
1358 << "ldr r0, 1f\n"
1359 << "0: add r0, r0, pc\n"
1360 << "str r0, [sp, #4]\n"
1361 << "pop {r0,pc}\n"
1362 << ".balign 4\n"
1363 << "1: .word $0 - (0b + 4)\n";
1364 } else {
1365 if (hasBranchTargetEnforcement())
1366 AsmOS << "bti\n";
1367 AsmOS << "b.w $0\n";
1368 }
1369 } else if (JumpTableArch == Triple::riscv32 ||
1370 JumpTableArch == Triple::riscv64) {
1371 AsmOS << "tail $0@plt\n";
1372 } else if (JumpTableArch == Triple::loongarch64) {
1373 AsmOS << "pcalau12i $$t0, %pc_hi20($0)\n"
1374 << "jirl $$r0, $$t0, %pc_lo12($0)\n";
1375 } else {
1376 report_fatal_error("Unsupported architecture for jump tables");
1377 }
1378
1379 return InlineAsm::get(
1380 FunctionType::get(Type::getVoidTy(M.getContext()), PtrTy, false),
1381 AsmOS.str(), "s",
1382 /*hasSideEffects=*/true);
1383}
1384
1385/// Given a disjoint set of type identifiers and functions, build the bit sets
1386/// and lower the llvm.type.test calls, architecture dependently.
1387void LowerTypeTestsModule::buildBitSetsFromFunctions(
1389 if (Arch == Triple::x86 || Arch == Triple::x86_64 || Arch == Triple::arm ||
1390 Arch == Triple::thumb || Arch == Triple::aarch64 ||
1391 Arch == Triple::riscv32 || Arch == Triple::riscv64 ||
1392 Arch == Triple::loongarch64)
1393 buildBitSetsFromFunctionsNative(TypeIds, Functions);
1394 else if (Arch == Triple::wasm32 || Arch == Triple::wasm64)
1395 buildBitSetsFromFunctionsWASM(TypeIds, Functions);
1396 else
1397 report_fatal_error("Unsupported architecture for jump tables");
1398}
1399
1400void LowerTypeTestsModule::moveInitializerToModuleConstructor(
1401 GlobalVariable *GV) {
1402 if (WeakInitializerFn == nullptr) {
1403 WeakInitializerFn = Function::Create(
1404 FunctionType::get(Type::getVoidTy(M.getContext()),
1405 /* IsVarArg */ false),
1407 M.getDataLayout().getProgramAddressSpace(),
1408 "__cfi_global_var_init", &M);
1409 BasicBlock *BB =
1410 BasicBlock::Create(M.getContext(), "entry", WeakInitializerFn);
1411 ReturnInst::Create(M.getContext(), BB);
1412 WeakInitializerFn->setSection(
1413 ObjectFormat == Triple::MachO
1414 ? "__TEXT,__StaticInit,regular,pure_instructions"
1415 : ".text.startup");
1416 // This code is equivalent to relocation application, and should run at the
1417 // earliest possible time (i.e. with the highest priority).
1418 appendToGlobalCtors(M, WeakInitializerFn, /* Priority */ 0);
1419 }
1420
1421 IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
1422 GV->setConstant(false);
1423 IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlign());
1425}
1426
1427void LowerTypeTestsModule::findGlobalVariableUsersOf(
1428 Constant *C, SmallSetVector<GlobalVariable *, 8> &Out) {
1429 for (auto *U : C->users()){
1430 if (auto *GV = dyn_cast<GlobalVariable>(U))
1431 Out.insert(GV);
1432 else if (auto *C2 = dyn_cast<Constant>(U))
1433 findGlobalVariableUsersOf(C2, Out);
1434 }
1435}
1436
1437// Replace all uses of F with (F ? JT : 0).
1438void LowerTypeTestsModule::replaceWeakDeclarationWithJumpTablePtr(
1439 Function *F, Constant *JT, bool IsJumpTableCanonical) {
1440 // The target expression can not appear in a constant initializer on most
1441 // (all?) targets. Switch to a runtime initializer.
1442 SmallSetVector<GlobalVariable *, 8> GlobalVarUsers;
1443 findGlobalVariableUsersOf(F, GlobalVarUsers);
1444 for (auto *GV : GlobalVarUsers) {
1445 if (GV == GlobalAnnotation)
1446 continue;
1447 moveInitializerToModuleConstructor(GV);
1448 }
1449
1450 // Can not RAUW F with an expression that uses F. Replace with a temporary
1451 // placeholder first.
1452 Function *PlaceholderFn =
1453 Function::Create(cast<FunctionType>(F->getValueType()),
1455 F->getAddressSpace(), "", &M);
1456 replaceCfiUses(F, PlaceholderFn, IsJumpTableCanonical);
1457
1459 // Don't use range based loop, because use list will be modified.
1460 while (!PlaceholderFn->use_empty()) {
1461 Use &U = *PlaceholderFn->use_begin();
1462 auto *InsertPt = dyn_cast<Instruction>(U.getUser());
1463 assert(InsertPt && "Non-instruction users should have been eliminated");
1464 auto *PN = dyn_cast<PHINode>(InsertPt);
1465 if (PN)
1466 InsertPt = PN->getIncomingBlock(U)->getTerminator();
1467 IRBuilder Builder(InsertPt);
1468 Value *ICmp = Builder.CreateICmp(CmpInst::ICMP_NE, F,
1469 Constant::getNullValue(F->getType()));
1470 Value *Select = Builder.CreateSelect(ICmp, JT,
1471 Constant::getNullValue(F->getType()));
1472 // For phi nodes, we need to update the incoming value for all operands
1473 // with the same predecessor.
1474 if (PN)
1475 PN->setIncomingValueForBlock(InsertPt->getParent(), Select);
1476 else
1477 U.set(Select);
1478 }
1479 PlaceholderFn->eraseFromParent();
1480}
1481
1482static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch) {
1483 Attribute TFAttr = F->getFnAttribute("target-features");
1484 if (TFAttr.isValid()) {
1486 TFAttr.getValueAsString().split(Features, ',');
1487 for (StringRef Feature : Features) {
1488 if (Feature == "-thumb-mode")
1489 return false;
1490 else if (Feature == "+thumb-mode")
1491 return true;
1492 }
1493 }
1494
1495 return ModuleArch == Triple::thumb;
1496}
1497
1498// Each jump table must be either ARM or Thumb as a whole for the bit-test math
1499// to work. Pick one that matches the majority of members to minimize interop
1500// veneers inserted by the linker.
1501Triple::ArchType LowerTypeTestsModule::selectJumpTableArmEncoding(
1502 ArrayRef<GlobalTypeMember *> Functions) {
1503 if (Arch != Triple::arm && Arch != Triple::thumb)
1504 return Arch;
1505
1506 if (!CanUseThumbBWJumpTable && CanUseArmJumpTable) {
1507 // In architectures that provide Arm and Thumb-1 but not Thumb-2,
1508 // we should always prefer the Arm jump table format, because the
1509 // Thumb-1 one is larger and slower.
1510 return Triple::arm;
1511 }
1512
1513 // Otherwise, go with majority vote.
1514 unsigned ArmCount = 0, ThumbCount = 0;
1515 for (const auto GTM : Functions) {
1516 if (!GTM->isJumpTableCanonical()) {
1517 // PLT stubs are always ARM.
1518 // FIXME: This is the wrong heuristic for non-canonical jump tables.
1519 ++ArmCount;
1520 continue;
1521 }
1522
1523 Function *F = cast<Function>(GTM->getGlobal());
1524 ++(isThumbFunction(F, Arch) ? ThumbCount : ArmCount);
1525 }
1526
1527 return ArmCount > ThumbCount ? Triple::arm : Triple::thumb;
1528}
1529
1530void LowerTypeTestsModule::createJumpTable(
1531 Function *F, ArrayRef<GlobalTypeMember *> Functions,
1532 Triple::ArchType JumpTableArch) {
1533 BasicBlock *BB = BasicBlock::Create(M.getContext(), "entry", F);
1534 IRBuilder<> IRB(BB);
1535
1536 InlineAsm *JumpTableAsm = createJumpTableEntryAsm(JumpTableArch);
1537
1538 // Check if all entries have the NoUnwind attribute.
1539 // If all entries have it, we can safely mark the
1540 // cfi.jumptable as NoUnwind, otherwise, direct calls
1541 // to the jump table will not handle exceptions properly
1542 bool areAllEntriesNounwind = true;
1543 for (GlobalTypeMember *GTM : Functions) {
1544 if (!llvm::cast<llvm::Function>(GTM->getGlobal())
1545 ->hasFnAttribute(llvm::Attribute::NoUnwind)) {
1546 areAllEntriesNounwind = false;
1547 }
1548 IRB.CreateCall(JumpTableAsm, GTM->getGlobal());
1549 }
1550 IRB.CreateUnreachable();
1551
1552 // Align the whole table by entry size.
1553 F->setAlignment(Align(getJumpTableEntrySize(JumpTableArch)));
1554 // Skip prologue.
1555 // Disabled on win32 due to https://llvm.org/bugs/show_bug.cgi?id=28641#c3.
1556 // Luckily, this function does not get any prologue even without the
1557 // attribute.
1558 if (OS != Triple::Win32)
1559 F->addFnAttr(Attribute::Naked);
1560 if (JumpTableArch == Triple::arm)
1561 F->addFnAttr("target-features", "-thumb-mode");
1562 if (JumpTableArch == Triple::thumb) {
1563 if (hasBranchTargetEnforcement()) {
1564 // If we're generating a Thumb jump table with BTI, add a target-features
1565 // setting to ensure BTI can be assembled.
1566 F->addFnAttr("target-features", "+thumb-mode,+pacbti");
1567 } else {
1568 F->addFnAttr("target-features", "+thumb-mode");
1569 if (CanUseThumbBWJumpTable) {
1570 // Thumb jump table assembly needs Thumb2. The following attribute is
1571 // added by Clang for -march=armv7.
1572 F->addFnAttr("target-cpu", "cortex-a8");
1573 }
1574 }
1575 }
1576 // When -mbranch-protection= is used, the inline asm adds a BTI. Suppress BTI
1577 // for the function to avoid double BTI. This is a no-op without
1578 // -mbranch-protection=.
1579 if (JumpTableArch == Triple::aarch64 || JumpTableArch == Triple::thumb) {
1580 if (F->hasFnAttribute("branch-target-enforcement"))
1581 F->removeFnAttr("branch-target-enforcement");
1582 if (F->hasFnAttribute("sign-return-address"))
1583 F->removeFnAttr("sign-return-address");
1584 }
1585 if (JumpTableArch == Triple::riscv32 || JumpTableArch == Triple::riscv64) {
1586 // Make sure the jump table assembly is not modified by the assembler or
1587 // the linker.
1588 F->addFnAttr("target-features", "-c,-relax");
1589 }
1590 // When -fcf-protection= is used, the inline asm adds an ENDBR. Suppress ENDBR
1591 // for the function to avoid double ENDBR. This is a no-op without
1592 // -fcf-protection=.
1593 if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64)
1594 F->addFnAttr(Attribute::NoCfCheck);
1595
1596 // Make sure we don't emit .eh_frame for this function if it isn't needed.
1597 if (areAllEntriesNounwind)
1598 F->addFnAttr(Attribute::NoUnwind);
1599
1600 // Make sure we do not inline any calls to the cfi.jumptable.
1601 F->addFnAttr(Attribute::NoInline);
1602}
1603
1604/// Given a disjoint set of type identifiers and functions, build a jump table
1605/// for the functions, build the bit sets and lower the llvm.type.test calls.
1606void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
1608 // Unlike the global bitset builder, the function bitset builder cannot
1609 // re-arrange functions in a particular order and base its calculations on the
1610 // layout of the functions' entry points, as we have no idea how large a
1611 // particular function will end up being (the size could even depend on what
1612 // this pass does!) Instead, we build a jump table, which is a block of code
1613 // consisting of one branch instruction for each of the functions in the bit
1614 // set that branches to the target function, and redirect any taken function
1615 // addresses to the corresponding jump table entry. In the object file's
1616 // symbol table, the symbols for the target functions also refer to the jump
1617 // table entries, so that addresses taken outside the module will pass any
1618 // verification done inside the module.
1619 //
1620 // In more concrete terms, suppose we have three functions f, g, h which are
1621 // of the same type, and a function foo that returns their addresses:
1622 //
1623 // f:
1624 // mov 0, %eax
1625 // ret
1626 //
1627 // g:
1628 // mov 1, %eax
1629 // ret
1630 //
1631 // h:
1632 // mov 2, %eax
1633 // ret
1634 //
1635 // foo:
1636 // mov f, %eax
1637 // mov g, %edx
1638 // mov h, %ecx
1639 // ret
1640 //
1641 // We output the jump table as module-level inline asm string. The end result
1642 // will (conceptually) look like this:
1643 //
1644 // f = .cfi.jumptable
1645 // g = .cfi.jumptable + 4
1646 // h = .cfi.jumptable + 8
1647 // .cfi.jumptable:
1648 // jmp f.cfi ; 5 bytes
1649 // int3 ; 1 byte
1650 // int3 ; 1 byte
1651 // int3 ; 1 byte
1652 // jmp g.cfi ; 5 bytes
1653 // int3 ; 1 byte
1654 // int3 ; 1 byte
1655 // int3 ; 1 byte
1656 // jmp h.cfi ; 5 bytes
1657 // int3 ; 1 byte
1658 // int3 ; 1 byte
1659 // int3 ; 1 byte
1660 //
1661 // f.cfi:
1662 // mov 0, %eax
1663 // ret
1664 //
1665 // g.cfi:
1666 // mov 1, %eax
1667 // ret
1668 //
1669 // h.cfi:
1670 // mov 2, %eax
1671 // ret
1672 //
1673 // foo:
1674 // mov f, %eax
1675 // mov g, %edx
1676 // mov h, %ecx
1677 // ret
1678 //
1679 // Because the addresses of f, g, h are evenly spaced at a power of 2, in the
1680 // normal case the check can be carried out using the same kind of simple
1681 // arithmetic that we normally use for globals.
1682
1683 // FIXME: find a better way to represent the jumptable in the IR.
1684 assert(!Functions.empty());
1685
1686 // Decide on the jump table encoding, so that we know how big the
1687 // entries will be.
1688 Triple::ArchType JumpTableArch = selectJumpTableArmEncoding(Functions);
1689
1690 // Build a simple layout based on the regular layout of jump tables.
1691 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1692 unsigned EntrySize = getJumpTableEntrySize(JumpTableArch);
1693 for (unsigned I = 0; I != Functions.size(); ++I)
1694 GlobalLayout[Functions[I]] = I * EntrySize;
1695
1696 Function *JumpTableFn =
1698 /* IsVarArg */ false),
1700 M.getDataLayout().getProgramAddressSpace(),
1701 ".cfi.jumptable", &M);
1702 ArrayType *JumpTableEntryType = ArrayType::get(Int8Ty, EntrySize);
1704 ArrayType::get(JumpTableEntryType, Functions.size());
1706 JumpTableFn, PointerType::getUnqual(M.getContext()));
1707
1708 lowerTypeTestCalls(TypeIds, JumpTable, GlobalLayout);
1709
1710 // Build aliases pointing to offsets into the jump table, and replace
1711 // references to the original functions with references to the aliases.
1712 for (unsigned I = 0; I != Functions.size(); ++I) {
1713 Function *F = cast<Function>(Functions[I]->getGlobal());
1714 bool IsJumpTableCanonical = Functions[I]->isJumpTableCanonical();
1715
1716 Constant *CombinedGlobalElemPtr = ConstantExpr::getInBoundsGetElementPtr(
1717 JumpTableType, JumpTable,
1718 ArrayRef<Constant *>{ConstantInt::get(IntPtrTy, 0),
1719 ConstantInt::get(IntPtrTy, I)});
1720
1721 const bool IsExported = Functions[I]->isExported();
1722 if (!IsJumpTableCanonical) {
1725 GlobalAlias *JtAlias = GlobalAlias::create(JumpTableEntryType, 0, LT,
1726 F->getName() + ".cfi_jt",
1727 CombinedGlobalElemPtr, &M);
1728 if (IsExported)
1730 else
1731 appendToUsed(M, {JtAlias});
1732 }
1733
1734 if (IsExported) {
1735 if (IsJumpTableCanonical)
1736 ExportSummary->cfiFunctionDefs().emplace(F->getName());
1737 else
1738 ExportSummary->cfiFunctionDecls().emplace(F->getName());
1739 }
1740
1741 if (!IsJumpTableCanonical) {
1742 if (F->hasExternalWeakLinkage())
1743 replaceWeakDeclarationWithJumpTablePtr(F, CombinedGlobalElemPtr,
1744 IsJumpTableCanonical);
1745 else
1746 replaceCfiUses(F, CombinedGlobalElemPtr, IsJumpTableCanonical);
1747 } else {
1748 assert(F->getType()->getAddressSpace() == 0);
1749
1750 GlobalAlias *FAlias =
1751 GlobalAlias::create(JumpTableEntryType, 0, F->getLinkage(), "",
1752 CombinedGlobalElemPtr, &M);
1753 FAlias->setVisibility(F->getVisibility());
1754 FAlias->takeName(F);
1755 if (FAlias->hasName()) {
1756 F->setName(FAlias->getName() + ".cfi");
1757 maybeReplaceComdat(F, FAlias->getName());
1758 }
1759 replaceCfiUses(F, FAlias, IsJumpTableCanonical);
1760 if (!F->hasLocalLinkage())
1761 F->setVisibility(GlobalVariable::HiddenVisibility);
1762 }
1763 }
1764
1765 createJumpTable(JumpTableFn, Functions, JumpTableArch);
1766}
1767
1768/// Assign a dummy layout using an incrementing counter, tag each function
1769/// with its index represented as metadata, and lower each type test to an
1770/// integer range comparison. During generation of the indirect function call
1771/// table in the backend, it will assign the given indexes.
1772/// Note: Dynamic linking is not supported, as the WebAssembly ABI has not yet
1773/// been finalized.
1774void LowerTypeTestsModule::buildBitSetsFromFunctionsWASM(
1776 assert(!Functions.empty());
1777
1778 // Build consecutive monotonic integer ranges for each call target set
1779 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1780
1781 for (GlobalTypeMember *GTM : Functions) {
1782 Function *F = cast<Function>(GTM->getGlobal());
1783
1784 // Skip functions that are not address taken, to avoid bloating the table
1785 if (!F->hasAddressTaken())
1786 continue;
1787
1788 // Store metadata with the index for each function
1789 MDNode *MD = MDNode::get(F->getContext(),
1791 ConstantInt::get(Int64Ty, IndirectIndex))));
1792 F->setMetadata("wasm.index", MD);
1793
1794 // Assign the counter value
1795 GlobalLayout[GTM] = IndirectIndex++;
1796 }
1797
1798 // The indirect function table index space starts at zero, so pass a NULL
1799 // pointer as the subtracted "jump table" offset.
1800 lowerTypeTestCalls(TypeIds, ConstantPointerNull::get(PtrTy),
1801 GlobalLayout);
1802}
1803
1804void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
1806 ArrayRef<ICallBranchFunnel *> ICallBranchFunnels) {
1807 DenseMap<Metadata *, uint64_t> TypeIdIndices;
1808 for (unsigned I = 0; I != TypeIds.size(); ++I)
1809 TypeIdIndices[TypeIds[I]] = I;
1810
1811 // For each type identifier, build a set of indices that refer to members of
1812 // the type identifier.
1813 std::vector<std::set<uint64_t>> TypeMembers(TypeIds.size());
1814 unsigned GlobalIndex = 0;
1815 DenseMap<GlobalTypeMember *, uint64_t> GlobalIndices;
1816 for (GlobalTypeMember *GTM : Globals) {
1817 for (MDNode *Type : GTM->types()) {
1818 // Type = { offset, type identifier }
1819 auto I = TypeIdIndices.find(Type->getOperand(1));
1820 if (I != TypeIdIndices.end())
1821 TypeMembers[I->second].insert(GlobalIndex);
1822 }
1823 GlobalIndices[GTM] = GlobalIndex;
1824 GlobalIndex++;
1825 }
1826
1827 for (ICallBranchFunnel *JT : ICallBranchFunnels) {
1828 TypeMembers.emplace_back();
1829 std::set<uint64_t> &TMSet = TypeMembers.back();
1830 for (GlobalTypeMember *T : JT->targets())
1831 TMSet.insert(GlobalIndices[T]);
1832 }
1833
1834 // Order the sets of indices by size. The GlobalLayoutBuilder works best
1835 // when given small index sets first.
1836 llvm::stable_sort(TypeMembers, [](const std::set<uint64_t> &O1,
1837 const std::set<uint64_t> &O2) {
1838 return O1.size() < O2.size();
1839 });
1840
1841 // Create a GlobalLayoutBuilder and provide it with index sets as layout
1842 // fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
1843 // close together as possible.
1844 GlobalLayoutBuilder GLB(Globals.size());
1845 for (auto &&MemSet : TypeMembers)
1846 GLB.addFragment(MemSet);
1847
1848 // Build a vector of globals with the computed layout.
1849 bool IsGlobalSet =
1850 Globals.empty() || isa<GlobalVariable>(Globals[0]->getGlobal());
1851 std::vector<GlobalTypeMember *> OrderedGTMs(Globals.size());
1852 auto OGTMI = OrderedGTMs.begin();
1853 for (auto &&F : GLB.Fragments) {
1854 for (auto &&Offset : F) {
1855 if (IsGlobalSet != isa<GlobalVariable>(Globals[Offset]->getGlobal()))
1856 report_fatal_error("Type identifier may not contain both global "
1857 "variables and functions");
1858 *OGTMI++ = Globals[Offset];
1859 }
1860 }
1861
1862 // Build the bitsets from this disjoint set.
1863 if (IsGlobalSet)
1864 buildBitSetsFromGlobalVariables(TypeIds, OrderedGTMs);
1865 else
1866 buildBitSetsFromFunctions(TypeIds, OrderedGTMs);
1867}
1868
1869/// Lower all type tests in this module.
1870LowerTypeTestsModule::LowerTypeTestsModule(
1871 Module &M, ModuleAnalysisManager &AM, ModuleSummaryIndex *ExportSummary,
1872 const ModuleSummaryIndex *ImportSummary, DropTestKind DropTypeTests)
1873 : M(M), ExportSummary(ExportSummary), ImportSummary(ImportSummary),
1874 DropTypeTests(ClDropTypeTests > DropTypeTests ? ClDropTypeTests
1875 : DropTypeTests) {
1876 assert(!(ExportSummary && ImportSummary));
1877 Triple TargetTriple(M.getTargetTriple());
1878 Arch = TargetTriple.getArch();
1879 if (Arch == Triple::arm)
1880 CanUseArmJumpTable = true;
1881 if (Arch == Triple::arm || Arch == Triple::thumb) {
1882 auto &FAM =
1884 for (Function &F : M) {
1885 // Skip declarations since we should not query the TTI for them.
1886 if (F.isDeclaration())
1887 continue;
1888 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
1889 if (TTI.hasArmWideBranch(false))
1890 CanUseArmJumpTable = true;
1891 if (TTI.hasArmWideBranch(true))
1892 CanUseThumbBWJumpTable = true;
1893 }
1894 }
1895 OS = TargetTriple.getOS();
1896 ObjectFormat = TargetTriple.getObjectFormat();
1897
1898 // Function annotation describes or applies to function itself, and
1899 // shouldn't be associated with jump table thunk generated for CFI.
1900 GlobalAnnotation = M.getGlobalVariable("llvm.global.annotations");
1901 if (GlobalAnnotation && GlobalAnnotation->hasInitializer()) {
1902 const ConstantArray *CA =
1903 cast<ConstantArray>(GlobalAnnotation->getInitializer());
1904 FunctionAnnotations.insert_range(CA->operands());
1905 }
1906}
1907
1908bool LowerTypeTestsModule::runForTesting(Module &M, ModuleAnalysisManager &AM) {
1909 ModuleSummaryIndex Summary(/*HaveGVs=*/false);
1910
1911 // Handle the command-line summary arguments. This code is for testing
1912 // purposes only, so we handle errors directly.
1913 if (!ClReadSummary.empty()) {
1914 ExitOnError ExitOnErr("-lowertypetests-read-summary: " + ClReadSummary +
1915 ": ");
1916 auto ReadSummaryFile = ExitOnErr(errorOrToExpected(
1917 MemoryBuffer::getFile(ClReadSummary, /*IsText=*/true)));
1918
1919 yaml::Input In(ReadSummaryFile->getBuffer());
1920 In >> Summary;
1921 ExitOnErr(errorCodeToError(In.error()));
1922 }
1923
1924 bool Changed =
1925 LowerTypeTestsModule(
1926 M, AM,
1927 ClSummaryAction == PassSummaryAction::Export ? &Summary : nullptr,
1928 ClSummaryAction == PassSummaryAction::Import ? &Summary : nullptr,
1929 /*DropTypeTests=*/DropTestKind::None)
1930 .lower();
1931
1932 if (!ClWriteSummary.empty()) {
1933 ExitOnError ExitOnErr("-lowertypetests-write-summary: " + ClWriteSummary +
1934 ": ");
1935 std::error_code EC;
1936 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF);
1937 ExitOnErr(errorCodeToError(EC));
1938
1939 yaml::Output Out(OS);
1940 Out << Summary;
1941 }
1942
1943 return Changed;
1944}
1945
1946static bool isDirectCall(Use& U) {
1947 auto *Usr = dyn_cast<CallInst>(U.getUser());
1948 if (Usr) {
1949 auto *CB = dyn_cast<CallBase>(Usr);
1950 if (CB && CB->isCallee(&U))
1951 return true;
1952 }
1953 return false;
1954}
1955
1956void LowerTypeTestsModule::replaceCfiUses(Function *Old, Value *New,
1957 bool IsJumpTableCanonical) {
1958 SmallSetVector<Constant *, 4> Constants;
1959 for (Use &U : llvm::make_early_inc_range(Old->uses())) {
1960 // Skip no_cfi values, which refer to the function body instead of the jump
1961 // table.
1962 if (isa<NoCFIValue>(U.getUser()))
1963 continue;
1964
1965 // Skip direct calls to externally defined or non-dso_local functions.
1966 if (isDirectCall(U) && (Old->isDSOLocal() || !IsJumpTableCanonical))
1967 continue;
1968
1969 // Skip function annotation.
1970 if (isFunctionAnnotation(U.getUser()))
1971 continue;
1972
1973 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
1974 // constant because they are uniqued.
1975 if (auto *C = dyn_cast<Constant>(U.getUser())) {
1976 if (!isa<GlobalValue>(C)) {
1977 // Save unique users to avoid processing operand replacement
1978 // more than once.
1979 Constants.insert(C);
1980 continue;
1981 }
1982 }
1983
1984 U.set(New);
1985 }
1986
1987 // Process operand replacement of saved constants.
1988 for (auto *C : Constants)
1989 C->handleOperandChange(Old, New);
1990}
1991
1992void LowerTypeTestsModule::replaceDirectCalls(Value *Old, Value *New) {
1994}
1995
1996static void dropTypeTests(Module &M, Function &TypeTestFunc,
1997 bool ShouldDropAll) {
1998 for (Use &U : llvm::make_early_inc_range(TypeTestFunc.uses())) {
1999 auto *CI = cast<CallInst>(U.getUser());
2000 // Find and erase llvm.assume intrinsics for this llvm.type.test call.
2001 for (Use &CIU : llvm::make_early_inc_range(CI->uses()))
2002 if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
2003 Assume->eraseFromParent();
2004 // If the assume was merged with another assume, we might have a use on a
2005 // phi (which will feed the assume). Simply replace the use on the phi
2006 // with "true" and leave the merged assume.
2007 //
2008 // If ShouldDropAll is set, then we we need to update any remaining uses,
2009 // regardless of the instruction type.
2010 if (!CI->use_empty()) {
2011 assert(ShouldDropAll || all_of(CI->users(), [](User *U) -> bool {
2012 return isa<PHINode>(U);
2013 }));
2014 CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
2015 }
2016 CI->eraseFromParent();
2017 }
2018}
2019
2020bool LowerTypeTestsModule::lower() {
2021 Function *TypeTestFunc =
2022 Intrinsic::getDeclarationIfExists(&M, Intrinsic::type_test);
2023
2024 if (DropTypeTests != DropTestKind::None) {
2025 bool ShouldDropAll = DropTypeTests == DropTestKind::All;
2026 if (TypeTestFunc)
2027 dropTypeTests(M, *TypeTestFunc, ShouldDropAll);
2028 // Normally we'd have already removed all @llvm.public.type.test calls,
2029 // except for in the case where we originally were performing ThinLTO but
2030 // decided not to in the backend.
2031 Function *PublicTypeTestFunc =
2032 Intrinsic::getDeclarationIfExists(&M, Intrinsic::public_type_test);
2033 if (PublicTypeTestFunc)
2034 dropTypeTests(M, *PublicTypeTestFunc, ShouldDropAll);
2035 if (TypeTestFunc || PublicTypeTestFunc) {
2036 // We have deleted the type intrinsics, so we no longer have enough
2037 // information to reason about the liveness of virtual function pointers
2038 // in GlobalDCE.
2039 for (GlobalVariable &GV : M.globals())
2040 GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
2041 return true;
2042 }
2043 return false;
2044 }
2045
2046 // If only some of the modules were split, we cannot correctly perform
2047 // this transformation. We already checked for the presense of type tests
2048 // with partially split modules during the thin link, and would have emitted
2049 // an error if any were found, so here we can simply return.
2050 if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
2051 (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
2052 return false;
2053
2054 Function *ICallBranchFunnelFunc =
2055 Intrinsic::getDeclarationIfExists(&M, Intrinsic::icall_branch_funnel);
2056 if ((!TypeTestFunc || TypeTestFunc->use_empty()) &&
2057 (!ICallBranchFunnelFunc || ICallBranchFunnelFunc->use_empty()) &&
2058 !ExportSummary && !ImportSummary)
2059 return false;
2060
2061 if (ImportSummary) {
2062 if (TypeTestFunc)
2063 for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses()))
2064 importTypeTest(cast<CallInst>(U.getUser()));
2065
2066 if (ICallBranchFunnelFunc && !ICallBranchFunnelFunc->use_empty())
2068 "unexpected call to llvm.icall.branch.funnel during import phase");
2069
2072 for (auto &F : M) {
2073 // CFI functions are either external, or promoted. A local function may
2074 // have the same name, but it's not the one we are looking for.
2075 if (F.hasLocalLinkage())
2076 continue;
2077 if (ImportSummary->cfiFunctionDefs().count(F.getName()))
2078 Defs.push_back(&F);
2079 else if (ImportSummary->cfiFunctionDecls().count(F.getName()))
2080 Decls.push_back(&F);
2081 }
2082
2083 {
2084 ScopedSaveAliaseesAndUsed S(M);
2085 for (auto *F : Defs)
2086 importFunction(F, /*isJumpTableCanonical*/ true);
2087 for (auto *F : Decls)
2088 importFunction(F, /*isJumpTableCanonical*/ false);
2089 }
2090
2091 return true;
2092 }
2093
2094 // Equivalence class set containing type identifiers and the globals that
2095 // reference them. This is used to partition the set of type identifiers in
2096 // the module into disjoint sets.
2097 using GlobalClassesTy = EquivalenceClasses<
2098 PointerUnion<GlobalTypeMember *, Metadata *, ICallBranchFunnel *>>;
2099 GlobalClassesTy GlobalClasses;
2100
2101 // Verify the type metadata and build a few data structures to let us
2102 // efficiently enumerate the type identifiers associated with a global:
2103 // a list of GlobalTypeMembers (a GlobalObject stored alongside a vector
2104 // of associated type metadata) and a mapping from type identifiers to their
2105 // list of GlobalTypeMembers and last observed index in the list of globals.
2106 // The indices will be used later to deterministically order the list of type
2107 // identifiers.
2109 struct TIInfo {
2110 unsigned UniqueId;
2111 std::vector<GlobalTypeMember *> RefGlobals;
2112 };
2113 DenseMap<Metadata *, TIInfo> TypeIdInfo;
2114 unsigned CurUniqueId = 0;
2116
2117 // Cross-DSO CFI emits jumptable entries for exported functions as well as
2118 // address taken functions in case they are address taken in other modules.
2119 const bool CrossDsoCfi = M.getModuleFlag("Cross-DSO CFI") != nullptr;
2120
2121 struct ExportedFunctionInfo {
2123 MDNode *FuncMD; // {name, linkage, type[, type...]}
2124 };
2125 MapVector<StringRef, ExportedFunctionInfo> ExportedFunctions;
2126 if (ExportSummary) {
2127 NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions");
2128 if (CfiFunctionsMD) {
2129 // A set of all functions that are address taken by a live global object.
2130 DenseSet<GlobalValue::GUID> AddressTaken;
2131 for (auto &I : *ExportSummary)
2132 for (auto &GVS : I.second.getSummaryList())
2133 if (GVS->isLive())
2134 for (const auto &Ref : GVS->refs()) {
2135 AddressTaken.insert(Ref.getGUID());
2136 for (auto &RefGVS : Ref.getSummaryList())
2137 if (auto Alias = dyn_cast<AliasSummary>(RefGVS.get()))
2138 AddressTaken.insert(Alias->getAliaseeGUID());
2139 }
2141 if (AddressTaken.count(GUID))
2142 return true;
2143 auto VI = ExportSummary->getValueInfo(GUID);
2144 if (!VI)
2145 return false;
2146 for (auto &I : VI.getSummaryList())
2147 if (auto Alias = dyn_cast<AliasSummary>(I.get()))
2148 if (AddressTaken.count(Alias->getAliaseeGUID()))
2149 return true;
2150 return false;
2151 };
2152 for (auto *FuncMD : CfiFunctionsMD->operands()) {
2153 assert(FuncMD->getNumOperands() >= 2);
2154 StringRef FunctionName =
2155 cast<MDString>(FuncMD->getOperand(0))->getString();
2157 cast<ConstantAsMetadata>(FuncMD->getOperand(1))
2158 ->getValue()
2159 ->getUniqueInteger()
2160 .getZExtValue());
2161 const GlobalValue::GUID GUID =
2164 // Do not emit jumptable entries for functions that are not-live and
2165 // have no live references (and are not exported with cross-DSO CFI.)
2166 if (!ExportSummary->isGUIDLive(GUID))
2167 continue;
2168 if (!IsAddressTaken(GUID)) {
2169 if (!CrossDsoCfi || Linkage != CFL_Definition)
2170 continue;
2171
2172 bool Exported = false;
2173 if (auto VI = ExportSummary->getValueInfo(GUID))
2174 for (const auto &GVS : VI.getSummaryList())
2175 if (GVS->isLive() && !GlobalValue::isLocalLinkage(GVS->linkage()))
2176 Exported = true;
2177
2178 if (!Exported)
2179 continue;
2180 }
2181 auto P = ExportedFunctions.insert({FunctionName, {Linkage, FuncMD}});
2182 if (!P.second && P.first->second.Linkage != CFL_Definition)
2183 P.first->second = {Linkage, FuncMD};
2184 }
2185
2186 for (const auto &P : ExportedFunctions) {
2187 StringRef FunctionName = P.first;
2188 CfiFunctionLinkage Linkage = P.second.Linkage;
2189 MDNode *FuncMD = P.second.FuncMD;
2190 Function *F = M.getFunction(FunctionName);
2191 if (F && F->hasLocalLinkage()) {
2192 // Locally defined function that happens to have the same name as a
2193 // function defined in a ThinLTO module. Rename it to move it out of
2194 // the way of the external reference that we're about to create.
2195 // Note that setName will find a unique name for the function, so even
2196 // if there is an existing function with the suffix there won't be a
2197 // name collision.
2198 F->setName(F->getName() + ".1");
2199 F = nullptr;
2200 }
2201
2202 if (!F)
2204 FunctionType::get(Type::getVoidTy(M.getContext()), false),
2205 GlobalVariable::ExternalLinkage,
2206 M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
2207
2208 // If the function is available_externally, remove its definition so
2209 // that it is handled the same way as a declaration. Later we will try
2210 // to create an alias using this function's linkage, which will fail if
2211 // the linkage is available_externally. This will also result in us
2212 // following the code path below to replace the type metadata.
2213 if (F->hasAvailableExternallyLinkage()) {
2214 F->setLinkage(GlobalValue::ExternalLinkage);
2215 F->deleteBody();
2216 F->setComdat(nullptr);
2217 F->clearMetadata();
2218 }
2219
2220 // Update the linkage for extern_weak declarations when a definition
2221 // exists.
2222 if (Linkage == CFL_Definition && F->hasExternalWeakLinkage())
2223 F->setLinkage(GlobalValue::ExternalLinkage);
2224
2225 // If the function in the full LTO module is a declaration, replace its
2226 // type metadata with the type metadata we found in cfi.functions. That
2227 // metadata is presumed to be more accurate than the metadata attached
2228 // to the declaration.
2229 if (F->isDeclaration()) {
2232
2233 F->eraseMetadata(LLVMContext::MD_type);
2234 for (unsigned I = 2; I < FuncMD->getNumOperands(); ++I)
2235 F->addMetadata(LLVMContext::MD_type,
2236 *cast<MDNode>(FuncMD->getOperand(I).get()));
2237 }
2238 }
2239 }
2240 }
2241
2242 struct AliasToCreate {
2243 Function *Alias;
2244 std::string TargetName;
2245 };
2246 std::vector<AliasToCreate> AliasesToCreate;
2247
2248 // Parse alias data to replace stand-in function declarations for aliases
2249 // with an alias to the intended target.
2250 if (ExportSummary) {
2251 if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
2252 for (auto *AliasMD : AliasesMD->operands()) {
2254 for (Metadata *MD : AliasMD->operands()) {
2255 auto *MDS = dyn_cast<MDString>(MD);
2256 if (!MDS)
2257 continue;
2258 StringRef AliasName = MDS->getString();
2259 if (!ExportedFunctions.count(AliasName))
2260 continue;
2261 auto *AliasF = M.getFunction(AliasName);
2262 if (AliasF)
2263 Aliases.push_back(AliasF);
2264 }
2265
2266 if (Aliases.empty())
2267 continue;
2268
2269 for (unsigned I = 1; I != Aliases.size(); ++I) {
2270 auto *AliasF = Aliases[I];
2271 ExportedFunctions.erase(AliasF->getName());
2272 AliasesToCreate.push_back(
2273 {AliasF, std::string(Aliases[0]->getName())});
2274 }
2275 }
2276 }
2277 }
2278
2279 DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
2280 for (GlobalObject &GO : M.global_objects()) {
2282 continue;
2283
2284 Types.clear();
2285 GO.getMetadata(LLVMContext::MD_type, Types);
2286
2287 bool IsJumpTableCanonical = false;
2288 bool IsExported = false;
2289 if (Function *F = dyn_cast<Function>(&GO)) {
2290 IsJumpTableCanonical = isJumpTableCanonical(F);
2291 if (auto It = ExportedFunctions.find(F->getName());
2292 It != ExportedFunctions.end()) {
2293 IsJumpTableCanonical |= It->second.Linkage == CFL_Definition;
2294 IsExported = true;
2295 // TODO: The logic here checks only that the function is address taken,
2296 // not that the address takers are live. This can be updated to check
2297 // their liveness and emit fewer jumptable entries once monolithic LTO
2298 // builds also emit summaries.
2299 } else if (!F->hasAddressTaken()) {
2300 if (!CrossDsoCfi || !IsJumpTableCanonical || F->hasLocalLinkage())
2301 continue;
2302 }
2303 }
2304
2305 auto *GTM = GlobalTypeMember::create(Alloc, &GO, IsJumpTableCanonical,
2306 IsExported, Types);
2307 GlobalTypeMembers[&GO] = GTM;
2308 for (MDNode *Type : Types) {
2309 verifyTypeMDNode(&GO, Type);
2310 auto &Info = TypeIdInfo[Type->getOperand(1)];
2311 Info.UniqueId = ++CurUniqueId;
2312 Info.RefGlobals.push_back(GTM);
2313 }
2314 }
2315
2316 auto AddTypeIdUse = [&](Metadata *TypeId) -> TypeIdUserInfo & {
2317 // Add the call site to the list of call sites for this type identifier. We
2318 // also use TypeIdUsers to keep track of whether we have seen this type
2319 // identifier before. If we have, we don't need to re-add the referenced
2320 // globals to the equivalence class.
2321 auto Ins = TypeIdUsers.insert({TypeId, {}});
2322 if (Ins.second) {
2323 // Add the type identifier to the equivalence class.
2324 auto &GCI = GlobalClasses.insert(TypeId);
2325 GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
2326
2327 // Add the referenced globals to the type identifier's equivalence class.
2328 for (GlobalTypeMember *GTM : TypeIdInfo[TypeId].RefGlobals)
2329 CurSet = GlobalClasses.unionSets(
2330 CurSet, GlobalClasses.findLeader(GlobalClasses.insert(GTM)));
2331 }
2332
2333 return Ins.first->second;
2334 };
2335
2336 if (TypeTestFunc) {
2337 for (const Use &U : TypeTestFunc->uses()) {
2338 auto CI = cast<CallInst>(U.getUser());
2339 // If this type test is only used by llvm.assume instructions, it
2340 // was used for whole program devirtualization, and is being kept
2341 // for use by other optimization passes. We do not need or want to
2342 // lower it here. We also don't want to rewrite any associated globals
2343 // unnecessarily. These will be removed by a subsequent LTT invocation
2344 // with the DropTypeTests flag set.
2345 bool OnlyAssumeUses = !CI->use_empty();
2346 for (const Use &CIU : CI->uses()) {
2347 if (isa<AssumeInst>(CIU.getUser()))
2348 continue;
2349 OnlyAssumeUses = false;
2350 break;
2351 }
2352 if (OnlyAssumeUses)
2353 continue;
2354
2355 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
2356 if (!TypeIdMDVal)
2357 report_fatal_error("Second argument of llvm.type.test must be metadata");
2358 auto TypeId = TypeIdMDVal->getMetadata();
2359 AddTypeIdUse(TypeId).CallSites.push_back(CI);
2360 }
2361 }
2362
2363 if (ICallBranchFunnelFunc) {
2364 for (const Use &U : ICallBranchFunnelFunc->uses()) {
2365 if (Arch != Triple::x86_64)
2367 "llvm.icall.branch.funnel not supported on this target");
2368
2369 auto CI = cast<CallInst>(U.getUser());
2370
2371 std::vector<GlobalTypeMember *> Targets;
2372 if (CI->arg_size() % 2 != 1)
2373 report_fatal_error("number of arguments should be odd");
2374
2375 GlobalClassesTy::member_iterator CurSet;
2376 for (unsigned I = 1; I != CI->arg_size(); I += 2) {
2377 int64_t Offset;
2379 CI->getOperand(I), Offset, M.getDataLayout()));
2380 if (!Base)
2382 "Expected branch funnel operand to be global value");
2383
2384 GlobalTypeMember *GTM = GlobalTypeMembers[Base];
2385 Targets.push_back(GTM);
2386 GlobalClassesTy::member_iterator NewSet =
2387 GlobalClasses.findLeader(GlobalClasses.insert(GTM));
2388 if (I == 1)
2389 CurSet = NewSet;
2390 else
2391 CurSet = GlobalClasses.unionSets(CurSet, NewSet);
2392 }
2393
2394 GlobalClasses.unionSets(
2395 CurSet, GlobalClasses.findLeader(
2396 GlobalClasses.insert(ICallBranchFunnel::create(
2397 Alloc, CI, Targets, ++CurUniqueId))));
2398 }
2399 }
2400
2401 if (ExportSummary) {
2402 DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
2403 for (auto &P : TypeIdInfo) {
2404 if (auto *TypeId = dyn_cast<MDString>(P.first))
2406 TypeId->getString())]
2407 .push_back(TypeId);
2408 }
2409
2410 for (auto &P : *ExportSummary) {
2411 for (auto &S : P.second.getSummaryList()) {
2412 if (!ExportSummary->isGlobalValueLive(S.get()))
2413 continue;
2414 if (auto *FS = dyn_cast<FunctionSummary>(S->getBaseObject()))
2415 for (GlobalValue::GUID G : FS->type_tests())
2416 for (Metadata *MD : MetadataByGUID[G])
2417 AddTypeIdUse(MD).IsExported = true;
2418 }
2419 }
2420 }
2421
2422 if (GlobalClasses.empty())
2423 return false;
2424
2425 {
2426 ScopedSaveAliaseesAndUsed S(M);
2427 // For each disjoint set we found...
2428 for (const auto &C : GlobalClasses) {
2429 if (!C->isLeader())
2430 continue;
2431
2432 ++NumTypeIdDisjointSets;
2433 // Build the list of type identifiers in this disjoint set.
2434 std::vector<Metadata *> TypeIds;
2435 std::vector<GlobalTypeMember *> Globals;
2436 std::vector<ICallBranchFunnel *> ICallBranchFunnels;
2437 for (auto M : GlobalClasses.members(*C)) {
2438 if (isa<Metadata *>(M))
2439 TypeIds.push_back(cast<Metadata *>(M));
2440 else if (isa<GlobalTypeMember *>(M))
2441 Globals.push_back(cast<GlobalTypeMember *>(M));
2442 else
2443 ICallBranchFunnels.push_back(cast<ICallBranchFunnel *>(M));
2444 }
2445
2446 // Order type identifiers by unique ID for determinism. This ordering is
2447 // stable as there is a one-to-one mapping between metadata and unique
2448 // IDs.
2449 llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
2450 return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
2451 });
2452
2453 // Same for the branch funnels.
2454 llvm::sort(ICallBranchFunnels,
2455 [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
2456 return F1->UniqueId < F2->UniqueId;
2457 });
2458
2459 // Build bitsets for this disjoint set.
2460 buildBitSetsFromDisjointSet(TypeIds, Globals, ICallBranchFunnels);
2461 }
2462 }
2463
2464 allocateByteArrays();
2465
2466 for (auto A : AliasesToCreate) {
2467 auto *Target = M.getNamedValue(A.TargetName);
2468 if (!isa<GlobalAlias>(Target))
2469 continue;
2470 auto *AliasGA = GlobalAlias::create("", Target);
2471 AliasGA->setVisibility(A.Alias->getVisibility());
2472 AliasGA->setLinkage(A.Alias->getLinkage());
2473 AliasGA->takeName(A.Alias);
2474 A.Alias->replaceAllUsesWith(AliasGA);
2475 A.Alias->eraseFromParent();
2476 }
2477
2478 // Emit .symver directives for exported functions, if they exist.
2479 if (ExportSummary) {
2480 if (NamedMDNode *SymversMD = M.getNamedMetadata("symvers")) {
2481 for (auto *Symver : SymversMD->operands()) {
2482 assert(Symver->getNumOperands() >= 2);
2483 StringRef SymbolName =
2484 cast<MDString>(Symver->getOperand(0))->getString();
2485 StringRef Alias = cast<MDString>(Symver->getOperand(1))->getString();
2486
2487 if (!ExportedFunctions.count(SymbolName))
2488 continue;
2489
2490 M.appendModuleInlineAsm(
2491 (llvm::Twine(".symver ") + SymbolName + ", " + Alias).str());
2492 }
2493 }
2494 }
2495
2496 return true;
2497}
2498
2501 bool Changed;
2502 if (UseCommandLine)
2503 Changed = LowerTypeTestsModule::runForTesting(M, AM);
2504 else
2505 Changed =
2506 LowerTypeTestsModule(M, AM, ExportSummary, ImportSummary, DropTypeTests)
2507 .lower();
2508 if (!Changed)
2509 return PreservedAnalyses::all();
2510 return PreservedAnalyses::none();
2511}
2512
2515 bool Changed = false;
2516 // Figure out whether inlining has exposed a constant address to a lowered
2517 // type test, and remove the test if so and the address is known to pass the
2518 // test. Unfortunately this pass ends up needing to reverse engineer what
2519 // LowerTypeTests did; this is currently inherent to the design of ThinLTO
2520 // importing where LowerTypeTests needs to run at the start.
2521 //
2522 // We look for things like:
2523 //
2524 // sub (i64 ptrtoint (ptr @_Z2fpv to i64), i64 ptrtoint (ptr
2525 // @__typeid__ZTSFvvE_global_addr to i64))
2526 //
2527 // which gets replaced with 0 if _Z2fpv (more specifically _Z2fpv.cfi, the
2528 // function referred to by the jump table) is a member of the type _ZTSFvv, as
2529 // well as things like
2530 //
2531 // icmp eq ptr @_Z2fpv, @__typeid__ZTSFvvE_global_addr
2532 //
2533 // which gets replaced with true if _Z2fpv is a member.
2534 for (auto &GV : M.globals()) {
2535 if (!GV.getName().starts_with("__typeid_") ||
2536 !GV.getName().ends_with("_global_addr"))
2537 continue;
2538 // __typeid_foo_global_addr -> foo
2539 auto *MD = MDString::get(M.getContext(),
2540 GV.getName().substr(9, GV.getName().size() - 21));
2541 auto MaySimplifyPtr = [&](Value *Ptr) {
2542 if (auto *GV = dyn_cast<GlobalValue>(Ptr))
2543 if (auto *CFIGV = M.getNamedValue((GV->getName() + ".cfi").str()))
2544 Ptr = CFIGV;
2545 return isKnownTypeIdMember(MD, M.getDataLayout(), Ptr, 0);
2546 };
2547 auto MaySimplifyInt = [&](Value *Op) {
2548 auto *PtrAsInt = dyn_cast<ConstantExpr>(Op);
2549 if (!PtrAsInt || PtrAsInt->getOpcode() != Instruction::PtrToInt)
2550 return false;
2551 return MaySimplifyPtr(PtrAsInt->getOperand(0));
2552 };
2553 for (User *U : make_early_inc_range(GV.users())) {
2554 if (auto *CI = dyn_cast<ICmpInst>(U)) {
2555 if (CI->getPredicate() == CmpInst::ICMP_EQ &&
2556 MaySimplifyPtr(CI->getOperand(0))) {
2557 // This is an equality comparison (TypeTestResolution::Single case in
2558 // lowerTypeTestCall). In this case we just replace the comparison
2559 // with true.
2560 CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
2561 CI->eraseFromParent();
2562 Changed = true;
2563 continue;
2564 }
2565 }
2566 auto *CE = dyn_cast<ConstantExpr>(U);
2567 if (!CE || CE->getOpcode() != Instruction::PtrToInt)
2568 continue;
2569 for (Use &U : make_early_inc_range(CE->uses())) {
2570 auto *CE = dyn_cast<ConstantExpr>(U.getUser());
2571 if (U.getOperandNo() == 0 && CE &&
2572 CE->getOpcode() == Instruction::Sub &&
2573 MaySimplifyInt(CE->getOperand(1))) {
2574 // This is a computation of PtrOffset as generated by
2575 // LowerTypeTestsModule::lowerTypeTestCall above. If
2576 // isKnownTypeIdMember passes we just pretend it evaluated to 0. This
2577 // should cause later passes to remove the range and alignment checks.
2578 // The bitset checks won't be removed but those are uncommon.
2579 CE->replaceAllUsesWith(ConstantInt::get(CE->getType(), 0));
2580 Changed = true;
2581 }
2582 auto *CI = dyn_cast<ICmpInst>(U.getUser());
2583 if (U.getOperandNo() == 1 && CI &&
2584 CI->getPredicate() == CmpInst::ICMP_EQ &&
2585 MaySimplifyInt(CI->getOperand(0))) {
2586 // This is an equality comparison. Unlike in the case above it
2587 // remained as an integer compare.
2588 CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
2589 CI->eraseFromParent();
2590 Changed = true;
2591 }
2592 }
2593 }
2594 }
2595
2596 if (!Changed)
2597 return PreservedAnalyses::all();
2601 PA.preserve<LoopAnalysis>();
2602 return PA;
2603}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Prepare AGPR Alloc
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static const unsigned kARMJumpTableEntrySize
static const unsigned kLOONGARCH64JumpTableEntrySize
static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL, Value *V, uint64_t COffset)
static const unsigned kX86IBTJumpTableEntrySize
static cl::opt< std::string > ClReadSummary("lowertypetests-read-summary", cl::desc("Read summary from given YAML file before running pass"), cl::Hidden)
static const unsigned kRISCVJumpTableEntrySize
static auto buildBitSets(ArrayRef< Metadata * > TypeIds, const DenseMap< GlobalTypeMember *, uint64_t > &GlobalLayout)
static void dropTypeTests(Module &M, Function &TypeTestFunc, bool ShouldDropAll)
static Value * createMaskedBitTest(IRBuilder<> &B, Value *Bits, Value *BitOffset)
Build a test that bit BitOffset mod sizeof(Bits)*8 is set in Bits.
static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch)
static const unsigned kX86JumpTableEntrySize
static cl::opt< bool > AvoidReuse("lowertypetests-avoid-reuse", cl::desc("Try to avoid reuse of byte array addresses using aliases"), cl::Hidden, cl::init(true))
static cl::opt< PassSummaryAction > ClSummaryAction("lowertypetests-summary-action", cl::desc("What to do with the summary when running this pass"), cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"), clEnumValN(PassSummaryAction::Import, "import", "Import typeid resolutions from summary and globals"), clEnumValN(PassSummaryAction::Export, "export", "Export typeid resolutions to summary and globals")), cl::Hidden)
static const unsigned kARMBTIJumpTableEntrySize
static cl::opt< std::string > ClWriteSummary("lowertypetests-write-summary", cl::desc("Write summary to given YAML file after running pass"), cl::Hidden)
static BitSetInfo buildBitSet(ArrayRef< uint64_t > Offsets)
Build a bit set for list of offsets.
static bool isDirectCall(Use &U)
static const unsigned kARMv6MJumpTableEntrySize
static cl::opt< DropTestKind > ClDropTypeTests("lowertypetests-drop-type-tests", cl::desc("Simply drop type test sequences"), cl::values(clEnumValN(DropTestKind::None, "none", "Do not drop any type tests"), clEnumValN(DropTestKind::Assume, "assume", "Drop type test assume sequences"), clEnumValN(DropTestKind::All, "all", "Drop all type test sequences")), cl::Hidden, cl::init(DropTestKind::None))
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
#define T
ModuleSummaryIndex.h This file contains the declarations the classes that hold the module index and s...
#define P(N)
FunctionAnalysisManager FAM
This file defines the PointerUnion class, which is a discriminated union of pointer types.
static StringRef getName(Value *V)
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
This header defines support for implementing classes that have some trailing object (or arrays of obj...
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
size_t count(StringRef S) const
@ ICMP_NE
not equal
Definition InstrTypes.h:698
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
ConstantArray - Constant Array Declarations.
Definition Constants.h:433
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition Constants.h:715
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getInBoundsGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList)
Create an "inbounds" getelementptr.
Definition Constants.h:1301
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1274
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition Constants.h:486
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Function.cpp:451
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:598
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set a particular kind of metadata attachment.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:214
const Comdat * getComdat() const
LLVM_ABI bool eraseMetadata(unsigned KindID)
Erase all metadata attachments with the given kind.
bool hasSection() const
Check if this global has a custom object file section.
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
static LLVM_ABI GUID getGUIDAssumingExternalLinkage(StringRef GlobalName)
Return a 64-bit global unique ID constructed from the name of a global symbol.
Definition Globals.cpp:77
bool isDSOLocal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
VisibilityTypes getVisibility() const
static bool isLocalLinkage(LinkageTypes Linkage)
LinkageTypes getLinkage() const
uint64_t GUID
Declare a type to represent a global unique identifier for a global value.
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool isDeclarationForLinker() const
PointerType * getType() const
Global values are always pointers.
VisibilityTypes
An enumeration for the kinds of visibility of global values.
Definition GlobalValue.h:67
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void setInitializer(Constant *InitVal)
setInitializer - Sets the initializer for this global variable, removing any existing initializer if ...
Definition Globals.cpp:524
MaybeAlign getAlign() const
Returns the alignment of the given variable.
void setConstant(bool Val)
LLVM_ABI void setCodeModel(CodeModel::Model CM)
Change the code model for this global.
Definition Globals.cpp:566
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Globals.cpp:520
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2788
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
Metadata * get() const
Definition Metadata.h:929
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:119
static ErrorOr< std::unique_ptr< MemoryBuffer > > getFile(const Twine &Filename, bool IsText=false, bool RequiresNullTerminator=true, bool IsVolatile=false, std::optional< Align > Alignment=std::nullopt)
Open the specified file as a MemoryBuffer, returning a new MemoryBuffer if successful,...
Root of the metadata hierarchy.
Definition Metadata.h:64
TypeIdSummary & getOrInsertTypeIdSummary(StringRef TypeId)
Return an existing or new TypeIdSummary entry for TypeId.
const TypeIdSummary * getTypeIdSummary(StringRef TypeId) const
This returns either a pointer to the type id summary (if present in the summary map) or null (if not ...
CfiFunctionIndex & cfiFunctionDecls()
CfiFunctionIndex & cfiFunctionDefs()
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Analysis pass which computes a PostDominatorTree.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:149
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
Type * getElementType(unsigned N) const
Analysis pass providing the TargetTransformInfo.
See the file comment for details on the usage of the TrailingObjects type.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
@ loongarch64
Definition Triple.h:65
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:292
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
user_iterator user_begin()
Definition Value.h:402
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:546
iterator_range< user_iterator > users()
Definition Value.h:426
use_iterator use_begin()
Definition Value.h:364
LLVM_ABI void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:554
bool use_empty() const
Definition Value.h:346
iterator_range< use_iterator > uses()
Definition Value.h:380
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:396
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition DenseSet.h:180
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
Changed
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
DropTestKind
Specifies how to drop type tests.
@ Assume
Do not drop type tests (default).
LLVM_ABI bool isJumpTableCanonical(Function *F)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
SmallVector< unsigned char, 0 > ByteArray
Definition PropertySet.h:25
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition FileSystem.h:764
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2058
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1725
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
@ Export
Export information to summary.
Definition IPO.h:57
@ None
Do nothing.
Definition IPO.h:55
@ Import
Import information from summary.
Definition IPO.h:56
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2136
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
unsigned M1(unsigned Val)
Definition VE.h:377
LLVM_ABI bool convertUsersOfConstantsToInstructions(ArrayRef< Constant * > Consts, Function *RestrictToFunc=nullptr, bool RemoveDeadConstants=true, bool IncludeSelf=false)
Replace constant expressions users of the given constants with instructions.
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1622
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
Expected< T > errorOrToExpected(ErrorOr< T > &&EO)
Convert an ErrorOr<T> to an Expected<T>.
Definition Error.h:1245
ArrayRef(const T &OneElt) -> ArrayRef< T >
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1835
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Error errorCodeToError(std::error_code EC)
Helper for converting an std::error_code to a Error.
Definition Error.cpp:111
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
CfiFunctionLinkage
The type of CFI jumptable needed for a function.
@ CFL_WeakDeclaration
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
LLVM_ABI GlobalVariable * collectUsedGlobalVariables(const Module &M, SmallVectorImpl< GlobalValue * > &Vec, bool CompilerUsed)
Given "llvm.used" or "llvm.compiler.used" as a global name, collect the initializer elements of that ...
Definition Module.cpp:870
TypeTestResolution TTRes
Kind
Specifies which kind of type check we should emit for this byte array.
@ Unknown
Unknown (analysis not performed, don't lower)
@ Single
Single element (last example in "Short Inline Bit Vectors")
@ Inline
Inlined bit vector ("Short Inline Bit Vectors")
@ Unsat
Unsatisfiable type (i.e. no global has this type metadata)
@ AllOnes
All-ones bit vector ("Eliminating Bit Vector Checks for All-Ones Bit Vectors")
@ ByteArray
Test a byte array (first example)
unsigned SizeM1BitWidth
Range of size-1 expressed as a bit width.
enum llvm::TypeTestResolution::Kind TheKind
SmallVector< uint64_t, 16 > Offsets
LLVM_ABI bool containsGlobalOffset(uint64_t Offset) const
LLVM_ABI void print(raw_ostream &OS) const
This class is used to build a byte array containing overlapping bit sets.
uint64_t BitAllocs[BitsPerByte]
The number of bytes allocated so far for each of the bits.
std::vector< uint8_t > Bytes
The byte array built so far.
LLVM_ABI void allocate(const std::set< uint64_t > &Bits, uint64_t BitSize, uint64_t &AllocByteOffset, uint8_t &AllocMask)
Allocate BitSize bits in the byte array where Bits contains the bits to set.
This class implements a layout algorithm for globals referenced by bit sets that tries to keep member...
std::vector< std::vector< uint64_t > > Fragments
The computed layout.
LLVM_ABI void addFragment(const std::set< uint64_t > &F)
Add F to the layout while trying to keep its indices contiguous.
std::vector< uint64_t > FragmentMap
Mapping from object index to fragment index.