LLVM 22.0.0git
LowerTypeTests.cpp
Go to the documentation of this file.
1//===- LowerTypeTests.cpp - type metadata lowering pass -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass lowers type metadata and calls to the llvm.type.test intrinsic.
10// It also ensures that globals are properly laid out for the
11// llvm.icall.branch.funnel intrinsic.
12// See http://llvm.org/docs/TypeMetadata.html for more information.
13//
14//===----------------------------------------------------------------------===//
15
17#include "llvm/ADT/APInt.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/DenseMap.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SetVector.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringRef.h"
33#include "llvm/IR/Attributes.h"
34#include "llvm/IR/BasicBlock.h"
35#include "llvm/IR/Constant.h"
36#include "llvm/IR/Constants.h"
37#include "llvm/IR/DataLayout.h"
39#include "llvm/IR/Function.h"
40#include "llvm/IR/GlobalAlias.h"
42#include "llvm/IR/GlobalValue.h"
44#include "llvm/IR/IRBuilder.h"
45#include "llvm/IR/InlineAsm.h"
46#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Intrinsics.h"
50#include "llvm/IR/LLVMContext.h"
51#include "llvm/IR/MDBuilder.h"
52#include "llvm/IR/Metadata.h"
53#include "llvm/IR/Module.h"
56#include "llvm/IR/Operator.h"
57#include "llvm/IR/PassManager.h"
60#include "llvm/IR/Type.h"
61#include "llvm/IR/Use.h"
62#include "llvm/IR/User.h"
63#include "llvm/IR/Value.h"
67#include "llvm/Support/Debug.h"
68#include "llvm/Support/Error.h"
77#include "llvm/Transforms/IPO.h"
80#include <algorithm>
81#include <cassert>
82#include <cstdint>
83#include <set>
84#include <string>
85#include <system_error>
86#include <utility>
87#include <vector>
88
89using namespace llvm;
90using namespace lowertypetests;
91
92#define DEBUG_TYPE "lowertypetests"
93
94STATISTIC(ByteArraySizeBits, "Byte array size in bits");
95STATISTIC(ByteArraySizeBytes, "Byte array size in bytes");
96STATISTIC(NumByteArraysCreated, "Number of byte arrays created");
97STATISTIC(NumTypeTestCallsLowered, "Number of type test calls lowered");
98STATISTIC(NumTypeIdDisjointSets, "Number of disjoint sets of type identifiers");
99
101 "lowertypetests-avoid-reuse",
102 cl::desc("Try to avoid reuse of byte array addresses using aliases"),
103 cl::Hidden, cl::init(true));
104
106 "lowertypetests-summary-action",
107 cl::desc("What to do with the summary when running this pass"),
108 cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"),
110 "Import typeid resolutions from summary and globals"),
112 "Export typeid resolutions to summary and globals")),
113 cl::Hidden);
114
116 "lowertypetests-read-summary",
117 cl::desc("Read summary from given YAML file before running pass"),
118 cl::Hidden);
119
121 "lowertypetests-write-summary",
122 cl::desc("Write summary to given YAML file after running pass"),
123 cl::Hidden);
124
126 ClDropTypeTests("lowertypetests-drop-type-tests",
127 cl::desc("Simply drop type test sequences"),
128 cl::values(clEnumValN(DropTestKind::None, "none",
129 "Do not drop any type tests"),
130 clEnumValN(DropTestKind::Assume, "assume",
131 "Drop type test assume sequences"),
132 clEnumValN(DropTestKind::All, "all",
133 "Drop all type test sequences")),
134 cl::Hidden, cl::init(DropTestKind::None));
135
137 if (Offset < ByteOffset)
138 return false;
139
140 if ((Offset - ByteOffset) % (uint64_t(1) << AlignLog2) != 0)
141 return false;
142
143 uint64_t BitOffset = (Offset - ByteOffset) >> AlignLog2;
144 if (BitOffset >= BitSize)
145 return false;
146
147 return Bits.count(BitSize - 1 - BitOffset);
148}
149
151 OS << "offset " << ByteOffset << " size " << BitSize << " align "
152 << (1 << AlignLog2);
153
154 if (isAllOnes()) {
155 OS << " all-ones\n";
156 return;
157 }
158
159 OS << " { ";
160 for (uint64_t B : Bits)
161 OS << B << ' ';
162 OS << "}\n";
163}
164
166 if (Min > Max)
167 Min = 0;
168
169 // Normalize each offset against the minimum observed offset, and compute
170 // the bitwise OR of each of the offsets. The number of trailing zeros
171 // in the mask gives us the log2 of the alignment of all offsets, which
172 // allows us to compress the bitset by only storing one bit per aligned
173 // address.
174 uint64_t Mask = 0;
175 for (uint64_t &Offset : Offsets) {
176 Offset -= Min;
177 Mask |= Offset;
178 }
179
180 BitSetInfo BSI;
181 BSI.ByteOffset = Min;
182
183 BSI.AlignLog2 = 0;
184 if (Mask != 0)
185 BSI.AlignLog2 = llvm::countr_zero(Mask);
186
187 // Build the compressed bitset while normalizing the offsets against the
188 // computed alignment.
189 BSI.BitSize = ((Max - Min) >> BSI.AlignLog2) + 1;
190 for (uint64_t Offset : Offsets) {
191 Offset >>= BSI.AlignLog2;
192 // We invert the order of bits when adding them to the bitset. This is
193 // because the offset that we test against is computed by subtracting the
194 // address that we are testing from the global's address, which means that
195 // the offset increases as the tested address decreases.
196 BSI.Bits.insert(BSI.BitSize - 1 - Offset);
197 }
198
199 return BSI;
200}
201
202void GlobalLayoutBuilder::addFragment(const std::set<uint64_t> &F) {
203 // Create a new fragment to hold the layout for F.
204 Fragments.emplace_back();
205 std::vector<uint64_t> &Fragment = Fragments.back();
206 uint64_t FragmentIndex = Fragments.size() - 1;
207
208 for (auto ObjIndex : F) {
209 uint64_t OldFragmentIndex = FragmentMap[ObjIndex];
210 if (OldFragmentIndex == 0) {
211 // We haven't seen this object index before, so just add it to the current
212 // fragment.
213 Fragment.push_back(ObjIndex);
214 } else {
215 // This index belongs to an existing fragment. Copy the elements of the
216 // old fragment into this one and clear the old fragment. We don't update
217 // the fragment map just yet, this ensures that any further references to
218 // indices from the old fragment in this fragment do not insert any more
219 // indices.
220 std::vector<uint64_t> &OldFragment = Fragments[OldFragmentIndex];
221 llvm::append_range(Fragment, OldFragment);
222 OldFragment.clear();
223 }
224 }
225
226 // Update the fragment map to point our object indices to this fragment.
227 for (uint64_t ObjIndex : Fragment)
228 FragmentMap[ObjIndex] = FragmentIndex;
229}
230
231void ByteArrayBuilder::allocate(const std::set<uint64_t> &Bits,
232 uint64_t BitSize, uint64_t &AllocByteOffset,
233 uint8_t &AllocMask) {
234 // Find the smallest current allocation.
235 unsigned Bit = 0;
236 for (unsigned I = 1; I != BitsPerByte; ++I)
237 if (BitAllocs[I] < BitAllocs[Bit])
238 Bit = I;
239
240 AllocByteOffset = BitAllocs[Bit];
241
242 // Add our size to it.
243 unsigned ReqSize = AllocByteOffset + BitSize;
244 BitAllocs[Bit] = ReqSize;
245 if (Bytes.size() < ReqSize)
246 Bytes.resize(ReqSize);
247
248 // Set our bits.
249 AllocMask = 1 << Bit;
250 for (uint64_t B : Bits)
251 Bytes[AllocByteOffset + B] |= AllocMask;
252}
253
255 if (F->isDeclarationForLinker())
256 return false;
258 F->getParent()->getModuleFlag("CFI Canonical Jump Tables"));
259 if (!CI || !CI->isZero())
260 return true;
261 return F->hasFnAttribute("cfi-canonical-jump-table");
262}
263
264namespace {
265
266struct ByteArrayInfo {
267 std::set<uint64_t> Bits;
268 uint64_t BitSize;
269 GlobalVariable *ByteArray;
270 GlobalVariable *MaskGlobal;
271 uint8_t *MaskPtr = nullptr;
272};
273
274/// A POD-like structure that we use to store a global reference together with
275/// its metadata types. In this pass we frequently need to query the set of
276/// metadata types referenced by a global, which at the IR level is an expensive
277/// operation involving a map lookup; this data structure helps to reduce the
278/// number of times we need to do this lookup.
279class GlobalTypeMember final : TrailingObjects<GlobalTypeMember, MDNode *> {
280 friend TrailingObjects;
281
282 GlobalObject *GO;
283 size_t NTypes;
284
285 // For functions: true if the jump table is canonical. This essentially means
286 // whether the canonical address (i.e. the symbol table entry) of the function
287 // is provided by the local jump table. This is normally the same as whether
288 // the function is defined locally, but if canonical jump tables are disabled
289 // by the user then the jump table never provides a canonical definition.
290 bool IsJumpTableCanonical;
291
292 // For functions: true if this function is either defined or used in a thinlto
293 // module and its jumptable entry needs to be exported to thinlto backends.
294 bool IsExported;
295
296public:
297 static GlobalTypeMember *create(BumpPtrAllocator &Alloc, GlobalObject *GO,
298 bool IsJumpTableCanonical, bool IsExported,
299 ArrayRef<MDNode *> Types) {
300 auto *GTM = static_cast<GlobalTypeMember *>(Alloc.Allocate(
301 totalSizeToAlloc<MDNode *>(Types.size()), alignof(GlobalTypeMember)));
302 GTM->GO = GO;
303 GTM->NTypes = Types.size();
304 GTM->IsJumpTableCanonical = IsJumpTableCanonical;
305 GTM->IsExported = IsExported;
306 llvm::copy(Types, GTM->getTrailingObjects());
307 return GTM;
308 }
309
310 GlobalObject *getGlobal() const {
311 return GO;
312 }
313
314 bool isJumpTableCanonical() const {
315 return IsJumpTableCanonical;
316 }
317
318 bool isExported() const {
319 return IsExported;
320 }
321
322 ArrayRef<MDNode *> types() const { return getTrailingObjects(NTypes); }
323};
324
325struct ICallBranchFunnel final
326 : TrailingObjects<ICallBranchFunnel, GlobalTypeMember *> {
327 static ICallBranchFunnel *create(BumpPtrAllocator &Alloc, CallInst *CI,
329 unsigned UniqueId) {
330 auto *Call = static_cast<ICallBranchFunnel *>(
331 Alloc.Allocate(totalSizeToAlloc<GlobalTypeMember *>(Targets.size()),
332 alignof(ICallBranchFunnel)));
333 Call->CI = CI;
334 Call->UniqueId = UniqueId;
335 Call->NTargets = Targets.size();
336 llvm::copy(Targets, Call->getTrailingObjects());
337 return Call;
338 }
339
340 CallInst *CI;
341 ArrayRef<GlobalTypeMember *> targets() const {
342 return getTrailingObjects(NTargets);
343 }
344
345 unsigned UniqueId;
346
347private:
348 size_t NTargets;
349};
350
351struct ScopedSaveAliaseesAndUsed {
352 Module &M;
354 std::vector<std::pair<GlobalAlias *, Function *>> FunctionAliases;
355 std::vector<std::pair<GlobalIFunc *, Function *>> ResolverIFuncs;
356
357 // This function only removes functions from llvm.used and llvm.compiler.used.
358 // We cannot remove global variables because they need to follow RAUW, as
359 // they may be deleted by buildBitSetsFromGlobalVariables.
360 void collectAndEraseUsedFunctions(Module &M,
361 SmallVectorImpl<GlobalValue *> &Vec,
362 bool CompilerUsed) {
363 auto *GV = collectUsedGlobalVariables(M, Vec, CompilerUsed);
364 if (!GV)
365 return;
366 // There's no API to only remove certain array elements from
367 // llvm.used/llvm.compiler.used, so we remove all of them and add back only
368 // the non-functions.
369 GV->eraseFromParent();
370 auto NonFuncBegin =
371 std::stable_partition(Vec.begin(), Vec.end(), [](GlobalValue *GV) {
372 return isa<Function>(GV);
373 });
374 if (CompilerUsed)
375 appendToCompilerUsed(M, {NonFuncBegin, Vec.end()});
376 else
377 appendToUsed(M, {NonFuncBegin, Vec.end()});
378 Vec.resize(NonFuncBegin - Vec.begin());
379 }
380
381 ScopedSaveAliaseesAndUsed(Module &M) : M(M) {
382 // The users of this class want to replace all function references except
383 // for aliases and llvm.used/llvm.compiler.used with references to a jump
384 // table. We avoid replacing aliases in order to avoid introducing a double
385 // indirection (or an alias pointing to a declaration in ThinLTO mode), and
386 // we avoid replacing llvm.used/llvm.compiler.used because these global
387 // variables describe properties of the global, not the jump table (besides,
388 // offseted references to the jump table in llvm.used are invalid).
389 // Unfortunately, LLVM doesn't have a "RAUW except for these (possibly
390 // indirect) users", so what we do is save the list of globals referenced by
391 // llvm.used/llvm.compiler.used and aliases, erase the used lists, let RAUW
392 // replace the aliasees and then set them back to their original values at
393 // the end.
394 collectAndEraseUsedFunctions(M, Used, false);
395 collectAndEraseUsedFunctions(M, CompilerUsed, true);
396
397 for (auto &GA : M.aliases()) {
398 // FIXME: This should look past all aliases not just interposable ones,
399 // see discussion on D65118.
400 if (auto *F = dyn_cast<Function>(GA.getAliasee()->stripPointerCasts()))
401 FunctionAliases.push_back({&GA, F});
402 }
403
404 for (auto &GI : M.ifuncs())
405 if (auto *F = dyn_cast<Function>(GI.getResolver()->stripPointerCasts()))
406 ResolverIFuncs.push_back({&GI, F});
407 }
408
409 ~ScopedSaveAliaseesAndUsed() {
410 appendToUsed(M, Used);
411 appendToCompilerUsed(M, CompilerUsed);
412
413 for (auto P : FunctionAliases)
414 P.first->setAliasee(P.second);
415
416 for (auto P : ResolverIFuncs) {
417 // This does not preserve pointer casts that may have been stripped by the
418 // constructor, but the resolver's type is different from that of the
419 // ifunc anyway.
420 P.first->setResolver(P.second);
421 }
422 }
423};
424
425class LowerTypeTestsModule {
426 Module &M;
427
428 ModuleSummaryIndex *ExportSummary;
429 const ModuleSummaryIndex *ImportSummary;
430 // Set when the client has invoked this to simply drop all type test assume
431 // sequences.
432 DropTestKind DropTypeTests;
433
434 Triple::ArchType Arch;
436 Triple::ObjectFormatType ObjectFormat;
437
438 // Determines which kind of Thumb jump table we generate. If arch is
439 // either 'arm' or 'thumb' we need to find this out, because
440 // selectJumpTableArmEncoding may decide to use Thumb in either case.
441 bool CanUseArmJumpTable = false, CanUseThumbBWJumpTable = false;
442
443 // Cache variable used by hasBranchTargetEnforcement().
444 int HasBranchTargetEnforcement = -1;
445
446 IntegerType *Int1Ty = Type::getInt1Ty(M.getContext());
447 IntegerType *Int8Ty = Type::getInt8Ty(M.getContext());
448 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
449 ArrayType *Int8Arr0Ty = ArrayType::get(Type::getInt8Ty(M.getContext()), 0);
450 IntegerType *Int32Ty = Type::getInt32Ty(M.getContext());
451 IntegerType *Int64Ty = Type::getInt64Ty(M.getContext());
452 IntegerType *IntPtrTy = M.getDataLayout().getIntPtrType(M.getContext(), 0);
453
454 // Indirect function call index assignment counter for WebAssembly
455 uint64_t IndirectIndex = 1;
456
457 // Mapping from type identifiers to the call sites that test them, as well as
458 // whether the type identifier needs to be exported to ThinLTO backends as
459 // part of the regular LTO phase of the ThinLTO pipeline (see exportTypeId).
460 struct TypeIdUserInfo {
461 std::vector<CallInst *> CallSites;
462 bool IsExported = false;
463 };
464 DenseMap<Metadata *, TypeIdUserInfo> TypeIdUsers;
465
466 /// This structure describes how to lower type tests for a particular type
467 /// identifier. It is either built directly from the global analysis (during
468 /// regular LTO or the regular LTO phase of ThinLTO), or indirectly using type
469 /// identifier summaries and external symbol references (in ThinLTO backends).
470 struct TypeIdLowering {
472
473 /// All except Unsat: the address of the last element within the combined
474 /// global.
475 Constant *OffsetedGlobal;
476
477 /// ByteArray, Inline, AllOnes: log2 of the required global alignment
478 /// relative to the start address.
479 Constant *AlignLog2;
480
481 /// ByteArray, Inline, AllOnes: one less than the size of the memory region
482 /// covering members of this type identifier as a multiple of 2^AlignLog2.
483 Constant *SizeM1;
484
485 /// ByteArray: the byte array to test the address against.
486 Constant *TheByteArray;
487
488 /// ByteArray: the bit mask to apply to bytes loaded from the byte array.
489 Constant *BitMask;
490
491 /// Inline: the bit mask to test the address against.
492 Constant *InlineBits;
493 };
494
495 std::vector<ByteArrayInfo> ByteArrayInfos;
496
497 Function *WeakInitializerFn = nullptr;
498
499 GlobalVariable *GlobalAnnotation;
500 DenseSet<Value *> FunctionAnnotations;
501
502 bool shouldExportConstantsAsAbsoluteSymbols();
503 uint8_t *exportTypeId(StringRef TypeId, const TypeIdLowering &TIL);
504 TypeIdLowering importTypeId(StringRef TypeId);
505 void importTypeTest(CallInst *CI);
506 void importFunction(Function *F, bool isJumpTableCanonical);
507
508 ByteArrayInfo *createByteArray(const BitSetInfo &BSI);
509 void allocateByteArrays();
510 Value *createBitSetTest(IRBuilder<> &B, const TypeIdLowering &TIL,
511 Value *BitOffset);
512 void lowerTypeTestCalls(
513 ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
514 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout);
515 Value *lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
516 const TypeIdLowering &TIL);
517
518 void buildBitSetsFromGlobalVariables(ArrayRef<Metadata *> TypeIds,
521 selectJumpTableArmEncoding(ArrayRef<GlobalTypeMember *> Functions);
522 bool hasBranchTargetEnforcement();
523 unsigned getJumpTableEntrySize(Triple::ArchType JumpTableArch);
524 InlineAsm *createJumpTableEntryAsm(Triple::ArchType JumpTableArch);
525 void verifyTypeMDNode(GlobalObject *GO, MDNode *Type);
526 void buildBitSetsFromFunctions(ArrayRef<Metadata *> TypeIds,
528 void buildBitSetsFromFunctionsNative(ArrayRef<Metadata *> TypeIds,
530 void buildBitSetsFromFunctionsWASM(ArrayRef<Metadata *> TypeIds,
532 void
533 buildBitSetsFromDisjointSet(ArrayRef<Metadata *> TypeIds,
535 ArrayRef<ICallBranchFunnel *> ICallBranchFunnels);
536
537 void replaceWeakDeclarationWithJumpTablePtr(Function *F, Constant *JT,
538 bool IsJumpTableCanonical);
539 void moveInitializerToModuleConstructor(GlobalVariable *GV);
540 void findGlobalVariableUsersOf(Constant *C,
541 SmallSetVector<GlobalVariable *, 8> &Out);
542
543 void createJumpTable(Function *F, ArrayRef<GlobalTypeMember *> Functions,
544 Triple::ArchType JumpTableArch);
545
546 /// replaceCfiUses - Go through the uses list for this definition
547 /// and make each use point to "V" instead of "this" when the use is outside
548 /// the block. 'This's use list is expected to have at least one element.
549 /// Unlike replaceAllUsesWith this function skips blockaddr and direct call
550 /// uses.
551 void replaceCfiUses(Function *Old, Value *New, bool IsJumpTableCanonical);
552
553 /// replaceDirectCalls - Go through the uses list for this definition and
554 /// replace each use, which is a direct function call.
555 void replaceDirectCalls(Value *Old, Value *New);
556
557 bool isFunctionAnnotation(Value *V) const {
558 return FunctionAnnotations.contains(V);
559 }
560
561 void maybeReplaceComdat(Function *F, StringRef OriginalName);
562
563public:
564 LowerTypeTestsModule(Module &M, ModuleAnalysisManager &AM,
565 ModuleSummaryIndex *ExportSummary,
566 const ModuleSummaryIndex *ImportSummary,
567 DropTestKind DropTypeTests);
568
569 bool lower();
570
571 // Lower the module using the action and summary passed as command line
572 // arguments. For testing purposes only.
573 static bool runForTesting(Module &M, ModuleAnalysisManager &AM);
574};
575} // end anonymous namespace
576
577/// Build a bit set for list of offsets.
579 // Compute the byte offset of each address associated with this type
580 // identifier.
581 return BitSetBuilder(Offsets).build();
582}
583
584/// Build a test that bit BitOffset mod sizeof(Bits)*8 is set in
585/// Bits. This pattern matches to the bt instruction on x86.
587 Value *BitOffset) {
588 auto BitsType = cast<IntegerType>(Bits->getType());
589 unsigned BitWidth = BitsType->getBitWidth();
590
591 BitOffset = B.CreateZExtOrTrunc(BitOffset, BitsType);
592 Value *BitIndex =
593 B.CreateAnd(BitOffset, ConstantInt::get(BitsType, BitWidth - 1));
594 Value *BitMask = B.CreateShl(ConstantInt::get(BitsType, 1), BitIndex);
595 Value *MaskedBits = B.CreateAnd(Bits, BitMask);
596 return B.CreateICmpNE(MaskedBits, ConstantInt::get(BitsType, 0));
597}
598
599ByteArrayInfo *LowerTypeTestsModule::createByteArray(const BitSetInfo &BSI) {
600 // Create globals to stand in for byte arrays and masks. These never actually
601 // get initialized, we RAUW and erase them later in allocateByteArrays() once
602 // we know the offset and mask to use.
603 auto ByteArrayGlobal = new GlobalVariable(
604 M, Int8Ty, /*isConstant=*/true, GlobalValue::PrivateLinkage, nullptr);
605 auto MaskGlobal = new GlobalVariable(M, Int8Ty, /*isConstant=*/true,
607
608 ByteArrayInfos.emplace_back();
609 ByteArrayInfo *BAI = &ByteArrayInfos.back();
610
611 BAI->Bits = BSI.Bits;
612 BAI->BitSize = BSI.BitSize;
613 BAI->ByteArray = ByteArrayGlobal;
614 BAI->MaskGlobal = MaskGlobal;
615 return BAI;
616}
617
618void LowerTypeTestsModule::allocateByteArrays() {
619 llvm::stable_sort(ByteArrayInfos,
620 [](const ByteArrayInfo &BAI1, const ByteArrayInfo &BAI2) {
621 return BAI1.BitSize > BAI2.BitSize;
622 });
623
624 std::vector<uint64_t> ByteArrayOffsets(ByteArrayInfos.size());
625
627 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
628 ByteArrayInfo *BAI = &ByteArrayInfos[I];
629
630 uint8_t Mask;
631 BAB.allocate(BAI->Bits, BAI->BitSize, ByteArrayOffsets[I], Mask);
632
633 BAI->MaskGlobal->replaceAllUsesWith(
634 ConstantExpr::getIntToPtr(ConstantInt::get(Int8Ty, Mask), PtrTy));
635 BAI->MaskGlobal->eraseFromParent();
636 if (BAI->MaskPtr)
637 *BAI->MaskPtr = Mask;
638 }
639
640 Constant *ByteArrayConst = ConstantDataArray::get(M.getContext(), BAB.Bytes);
641 auto ByteArray =
642 new GlobalVariable(M, ByteArrayConst->getType(), /*isConstant=*/true,
643 GlobalValue::PrivateLinkage, ByteArrayConst);
644
645 for (unsigned I = 0; I != ByteArrayInfos.size(); ++I) {
646 ByteArrayInfo *BAI = &ByteArrayInfos[I];
647
648 Constant *Idxs[] = {ConstantInt::get(IntPtrTy, 0),
649 ConstantInt::get(IntPtrTy, ByteArrayOffsets[I])};
651 ByteArrayConst->getType(), ByteArray, Idxs);
652
653 // Create an alias instead of RAUW'ing the gep directly. On x86 this ensures
654 // that the pc-relative displacement is folded into the lea instead of the
655 // test instruction getting another displacement.
656 GlobalAlias *Alias = GlobalAlias::create(
657 Int8Ty, 0, GlobalValue::PrivateLinkage, "bits", GEP, &M);
658 BAI->ByteArray->replaceAllUsesWith(Alias);
659 BAI->ByteArray->eraseFromParent();
660 }
661
662 ByteArraySizeBits = BAB.BitAllocs[0] + BAB.BitAllocs[1] + BAB.BitAllocs[2] +
663 BAB.BitAllocs[3] + BAB.BitAllocs[4] + BAB.BitAllocs[5] +
664 BAB.BitAllocs[6] + BAB.BitAllocs[7];
665 ByteArraySizeBytes = BAB.Bytes.size();
666}
667
668/// Build a test that bit BitOffset is set in the type identifier that was
669/// lowered to TIL, which must be either an Inline or a ByteArray.
670Value *LowerTypeTestsModule::createBitSetTest(IRBuilder<> &B,
671 const TypeIdLowering &TIL,
672 Value *BitOffset) {
673 if (TIL.TheKind == TypeTestResolution::Inline) {
674 // If the bit set is sufficiently small, we can avoid a load by bit testing
675 // a constant.
676 return createMaskedBitTest(B, TIL.InlineBits, BitOffset);
677 } else {
678 Constant *ByteArray = TIL.TheByteArray;
679 if (AvoidReuse && !ImportSummary) {
680 // Each use of the byte array uses a different alias. This makes the
681 // backend less likely to reuse previously computed byte array addresses,
682 // improving the security of the CFI mechanism based on this pass.
683 // This won't work when importing because TheByteArray is external.
685 "bits_use", ByteArray, &M);
686 }
687
688 Value *ByteAddr = B.CreateGEP(Int8Ty, ByteArray, BitOffset);
689 Value *Byte = B.CreateLoad(Int8Ty, ByteAddr);
690
691 Value *ByteAndMask =
692 B.CreateAnd(Byte, ConstantExpr::getPtrToInt(TIL.BitMask, Int8Ty));
693 return B.CreateICmpNE(ByteAndMask, ConstantInt::get(Int8Ty, 0));
694 }
695}
696
697static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL,
698 Value *V, uint64_t COffset) {
699 if (auto GV = dyn_cast<GlobalObject>(V)) {
701 GV->getMetadata(LLVMContext::MD_type, Types);
702 for (MDNode *Type : Types) {
703 if (Type->getOperand(1) != TypeId)
704 continue;
707 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
708 ->getZExtValue();
709 if (COffset == Offset)
710 return true;
711 }
712 return false;
713 }
714
715 if (auto GEP = dyn_cast<GEPOperator>(V)) {
716 APInt APOffset(DL.getIndexSizeInBits(0), 0);
717 bool Result = GEP->accumulateConstantOffset(DL, APOffset);
718 if (!Result)
719 return false;
720 COffset += APOffset.getZExtValue();
721 return isKnownTypeIdMember(TypeId, DL, GEP->getPointerOperand(), COffset);
722 }
723
724 if (auto Op = dyn_cast<Operator>(V)) {
725 if (Op->getOpcode() == Instruction::BitCast)
726 return isKnownTypeIdMember(TypeId, DL, Op->getOperand(0), COffset);
727
728 if (Op->getOpcode() == Instruction::Select)
729 return isKnownTypeIdMember(TypeId, DL, Op->getOperand(1), COffset) &&
730 isKnownTypeIdMember(TypeId, DL, Op->getOperand(2), COffset);
731 }
732
733 return false;
734}
735
736/// Lower a llvm.type.test call to its implementation. Returns the value to
737/// replace the call with.
738Value *LowerTypeTestsModule::lowerTypeTestCall(Metadata *TypeId, CallInst *CI,
739 const TypeIdLowering &TIL) {
740 // Delay lowering if the resolution is currently unknown.
741 if (TIL.TheKind == TypeTestResolution::Unknown)
742 return nullptr;
743 if (TIL.TheKind == TypeTestResolution::Unsat)
744 return ConstantInt::getFalse(M.getContext());
745
746 Value *Ptr = CI->getArgOperand(0);
747 const DataLayout &DL = M.getDataLayout();
748 if (isKnownTypeIdMember(TypeId, DL, Ptr, 0))
749 return ConstantInt::getTrue(M.getContext());
750
751 BasicBlock *InitialBB = CI->getParent();
752
753 IRBuilder<> B(CI);
754
755 Value *PtrAsInt = B.CreatePtrToInt(Ptr, IntPtrTy);
756
757 Constant *OffsetedGlobalAsInt =
758 ConstantExpr::getPtrToInt(TIL.OffsetedGlobal, IntPtrTy);
759 if (TIL.TheKind == TypeTestResolution::Single)
760 return B.CreateICmpEQ(PtrAsInt, OffsetedGlobalAsInt);
761
762 // Here we compute `last element - address`. The reason why we do this instead
763 // of computing `address - first element` is that it leads to a slightly
764 // shorter instruction sequence on x86. Because it doesn't matter how we do
765 // the subtraction on other architectures, we do so unconditionally.
766 Value *PtrOffset = B.CreateSub(OffsetedGlobalAsInt, PtrAsInt);
767
768 // We need to check that the offset both falls within our range and is
769 // suitably aligned. We can check both properties at the same time by
770 // performing a right rotate by log2(alignment) followed by an integer
771 // comparison against the bitset size. The rotate will move the lower
772 // order bits that need to be zero into the higher order bits of the
773 // result, causing the comparison to fail if they are nonzero. The rotate
774 // also conveniently gives us a bit offset to use during the load from
775 // the bitset.
776 Value *BitOffset = B.CreateIntrinsic(IntPtrTy, Intrinsic::fshr,
777 {PtrOffset, PtrOffset, TIL.AlignLog2});
778
779 Value *OffsetInRange = B.CreateICmpULE(BitOffset, TIL.SizeM1);
780
781 // If the bit set is all ones, testing against it is unnecessary.
782 if (TIL.TheKind == TypeTestResolution::AllOnes)
783 return OffsetInRange;
784
785 // See if the intrinsic is used in the following common pattern:
786 // br(llvm.type.test(...), thenbb, elsebb)
787 // where nothing happens between the type test and the br.
788 // If so, create slightly simpler IR.
789 if (CI->hasOneUse())
790 if (auto *Br = dyn_cast<BranchInst>(*CI->user_begin()))
791 if (CI->getNextNode() == Br) {
792 BasicBlock *Then = InitialBB->splitBasicBlock(CI->getIterator());
793 BasicBlock *Else = Br->getSuccessor(1);
794 BranchInst *NewBr = BranchInst::Create(Then, Else, OffsetInRange);
795 NewBr->setMetadata(LLVMContext::MD_prof,
796 Br->getMetadata(LLVMContext::MD_prof));
797 ReplaceInstWithInst(InitialBB->getTerminator(), NewBr);
798
799 // Update phis in Else resulting from InitialBB being split
800 for (auto &Phi : Else->phis())
801 Phi.addIncoming(Phi.getIncomingValueForBlock(Then), InitialBB);
802
803 IRBuilder<> ThenB(CI);
804 return createBitSetTest(ThenB, TIL, BitOffset);
805 }
806
807 MDBuilder MDB(M.getContext());
808 IRBuilder<> ThenB(SplitBlockAndInsertIfThen(OffsetInRange, CI, false,
809 MDB.createLikelyBranchWeights()));
810
811 // Now that we know that the offset is in range and aligned, load the
812 // appropriate bit from the bitset.
813 Value *Bit = createBitSetTest(ThenB, TIL, BitOffset);
814
815 // The value we want is 0 if we came directly from the initial block
816 // (having failed the range or alignment checks), or the loaded bit if
817 // we came from the block in which we loaded it.
818 B.SetInsertPoint(CI);
819 PHINode *P = B.CreatePHI(Int1Ty, 2);
820 P->addIncoming(ConstantInt::get(Int1Ty, 0), InitialBB);
821 P->addIncoming(Bit, ThenB.GetInsertBlock());
822 return P;
823}
824
825/// Given a disjoint set of type identifiers and globals, lay out the globals,
826/// build the bit sets and lower the llvm.type.test calls.
827void LowerTypeTestsModule::buildBitSetsFromGlobalVariables(
829 // Build a new global with the combined contents of the referenced globals.
830 // This global is a struct whose even-indexed elements contain the original
831 // contents of the referenced globals and whose odd-indexed elements contain
832 // any padding required to align the next element to the next power of 2 plus
833 // any additional padding required to meet its alignment requirements.
834 std::vector<Constant *> GlobalInits;
835 const DataLayout &DL = M.getDataLayout();
836 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
837 Align MaxAlign;
838 uint64_t CurOffset = 0;
839 uint64_t DesiredPadding = 0;
840 for (GlobalTypeMember *G : Globals) {
841 auto *GV = cast<GlobalVariable>(G->getGlobal());
842 Align Alignment =
843 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
844 MaxAlign = std::max(MaxAlign, Alignment);
845 uint64_t GVOffset = alignTo(CurOffset + DesiredPadding, Alignment);
846 GlobalLayout[G] = GVOffset;
847 if (GVOffset != 0) {
848 uint64_t Padding = GVOffset - CurOffset;
849 GlobalInits.push_back(
851 }
852
853 GlobalInits.push_back(GV->getInitializer());
854 uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
855 CurOffset = GVOffset + InitSize;
856
857 // Compute the amount of padding that we'd like for the next element.
858 DesiredPadding = NextPowerOf2(InitSize - 1) - InitSize;
859
860 // Experiments of different caps with Chromium on both x64 and ARM64
861 // have shown that the 32-byte cap generates the smallest binary on
862 // both platforms while different caps yield similar performance.
863 // (see https://lists.llvm.org/pipermail/llvm-dev/2018-July/124694.html)
864 if (DesiredPadding > 32)
865 DesiredPadding = alignTo(InitSize, 32) - InitSize;
866 }
867
868 Constant *NewInit = ConstantStruct::getAnon(M.getContext(), GlobalInits);
869 auto *CombinedGlobal =
870 new GlobalVariable(M, NewInit->getType(), /*isConstant=*/true,
872 CombinedGlobal->setAlignment(MaxAlign);
873
874 StructType *NewTy = cast<StructType>(NewInit->getType());
875 lowerTypeTestCalls(TypeIds, CombinedGlobal, GlobalLayout);
876
877 // Build aliases pointing to offsets into the combined global for each
878 // global from which we built the combined global, and replace references
879 // to the original globals with references to the aliases.
880 for (unsigned I = 0; I != Globals.size(); ++I) {
881 GlobalVariable *GV = cast<GlobalVariable>(Globals[I]->getGlobal());
882
883 // Multiply by 2 to account for padding elements.
884 Constant *CombinedGlobalIdxs[] = {ConstantInt::get(Int32Ty, 0),
885 ConstantInt::get(Int32Ty, I * 2)};
886 Constant *CombinedGlobalElemPtr = ConstantExpr::getInBoundsGetElementPtr(
887 NewInit->getType(), CombinedGlobal, CombinedGlobalIdxs);
888 assert(GV->getType()->getAddressSpace() == 0);
889 GlobalAlias *GAlias =
890 GlobalAlias::create(NewTy->getElementType(I * 2), 0, GV->getLinkage(),
891 "", CombinedGlobalElemPtr, &M);
892 GAlias->setVisibility(GV->getVisibility());
893 GAlias->takeName(GV);
894 GV->replaceAllUsesWith(GAlias);
895 GV->eraseFromParent();
896 }
897}
898
899bool LowerTypeTestsModule::shouldExportConstantsAsAbsoluteSymbols() {
900 return (Arch == Triple::x86 || Arch == Triple::x86_64) &&
901 ObjectFormat == Triple::ELF;
902}
903
904/// Export the given type identifier so that ThinLTO backends may import it.
905/// Type identifiers are exported by adding coarse-grained information about how
906/// to test the type identifier to the summary, and creating symbols in the
907/// object file (aliases and absolute symbols) containing fine-grained
908/// information about the type identifier.
909///
910/// Returns a pointer to the location in which to store the bitmask, if
911/// applicable.
912uint8_t *LowerTypeTestsModule::exportTypeId(StringRef TypeId,
913 const TypeIdLowering &TIL) {
914 TypeTestResolution &TTRes =
915 ExportSummary->getOrInsertTypeIdSummary(TypeId).TTRes;
916 TTRes.TheKind = TIL.TheKind;
917
918 auto ExportGlobal = [&](StringRef Name, Constant *C) {
919 GlobalAlias *GA =
921 "__typeid_" + TypeId + "_" + Name, C, &M);
923 };
924
925 auto ExportConstant = [&](StringRef Name, uint64_t &Storage, Constant *C) {
926 if (shouldExportConstantsAsAbsoluteSymbols())
927 ExportGlobal(Name, ConstantExpr::getIntToPtr(C, PtrTy));
928 else
929 Storage = cast<ConstantInt>(C)->getZExtValue();
930 };
931
932 if (TIL.TheKind != TypeTestResolution::Unsat)
933 ExportGlobal("global_addr", TIL.OffsetedGlobal);
934
935 if (TIL.TheKind == TypeTestResolution::ByteArray ||
936 TIL.TheKind == TypeTestResolution::Inline ||
937 TIL.TheKind == TypeTestResolution::AllOnes) {
938 ExportConstant("align", TTRes.AlignLog2, TIL.AlignLog2);
939 ExportConstant("size_m1", TTRes.SizeM1, TIL.SizeM1);
940
941 uint64_t BitSize = cast<ConstantInt>(TIL.SizeM1)->getZExtValue() + 1;
942 if (TIL.TheKind == TypeTestResolution::Inline)
943 TTRes.SizeM1BitWidth = (BitSize <= 32) ? 5 : 6;
944 else
945 TTRes.SizeM1BitWidth = (BitSize <= 128) ? 7 : 32;
946 }
947
948 if (TIL.TheKind == TypeTestResolution::ByteArray) {
949 ExportGlobal("byte_array", TIL.TheByteArray);
950 if (shouldExportConstantsAsAbsoluteSymbols())
951 ExportGlobal("bit_mask", TIL.BitMask);
952 else
953 return &TTRes.BitMask;
954 }
955
956 if (TIL.TheKind == TypeTestResolution::Inline)
957 ExportConstant("inline_bits", TTRes.InlineBits, TIL.InlineBits);
958
959 return nullptr;
960}
961
962LowerTypeTestsModule::TypeIdLowering
963LowerTypeTestsModule::importTypeId(StringRef TypeId) {
964 const TypeIdSummary *TidSummary = ImportSummary->getTypeIdSummary(TypeId);
965 if (!TidSummary)
966 return {}; // Unsat: no globals match this type id.
967 const TypeTestResolution &TTRes = TidSummary->TTRes;
968
969 TypeIdLowering TIL;
970 TIL.TheKind = TTRes.TheKind;
971
972 auto ImportGlobal = [&](StringRef Name) {
973 // Give the global a type of length 0 so that it is not assumed not to alias
974 // with any other global.
975 GlobalVariable *GV = M.getOrInsertGlobal(
976 ("__typeid_" + TypeId + "_" + Name).str(), Int8Arr0Ty);
978 return GV;
979 };
980
981 auto ImportConstant = [&](StringRef Name, uint64_t Const, unsigned AbsWidth,
982 Type *Ty) {
983 if (!shouldExportConstantsAsAbsoluteSymbols()) {
984 Constant *C =
985 ConstantInt::get(isa<IntegerType>(Ty) ? Ty : Int64Ty, Const);
986 if (!isa<IntegerType>(Ty))
988 return C;
989 }
990
991 Constant *C = ImportGlobal(Name);
992 auto *GV = cast<GlobalVariable>(C->stripPointerCasts());
993 if (isa<IntegerType>(Ty))
995 if (GV->getMetadata(LLVMContext::MD_absolute_symbol))
996 return C;
997
998 auto SetAbsRange = [&](uint64_t Min, uint64_t Max) {
999 auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Min));
1000 auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntPtrTy, Max));
1001 GV->setMetadata(LLVMContext::MD_absolute_symbol,
1002 MDNode::get(M.getContext(), {MinC, MaxC}));
1003 };
1004 if (AbsWidth == IntPtrTy->getBitWidth()) {
1005 uint64_t AllOnes = IntPtrTy->getBitMask();
1006 SetAbsRange(AllOnes, AllOnes); // Full set.
1007 } else {
1008 SetAbsRange(0, 1ull << AbsWidth);
1009 }
1010 return C;
1011 };
1012
1013 if (TIL.TheKind != TypeTestResolution::Unsat) {
1014 auto *GV = ImportGlobal("global_addr");
1015 // This is either a vtable (in .data.rel.ro) or a jump table (in .text).
1016 // Either way it's expected to be in the low 2 GiB, so set the small code
1017 // model.
1018 //
1019 // For .data.rel.ro, we currently place all such sections in the low 2 GiB
1020 // [1], and for .text the sections are expected to be in the low 2 GiB under
1021 // the small and medium code models [2] and this pass only supports those
1022 // code models (e.g. jump tables use jmp instead of movabs/jmp).
1023 //
1024 // [1]https://github.com/llvm/llvm-project/pull/137742
1025 // [2]https://maskray.me/blog/2023-05-14-relocation-overflow-and-code-models
1027 TIL.OffsetedGlobal = GV;
1028 }
1029
1030 if (TIL.TheKind == TypeTestResolution::ByteArray ||
1031 TIL.TheKind == TypeTestResolution::Inline ||
1032 TIL.TheKind == TypeTestResolution::AllOnes) {
1033 TIL.AlignLog2 = ImportConstant("align", TTRes.AlignLog2, 8, IntPtrTy);
1034 TIL.SizeM1 =
1035 ImportConstant("size_m1", TTRes.SizeM1, TTRes.SizeM1BitWidth, IntPtrTy);
1036 }
1037
1038 if (TIL.TheKind == TypeTestResolution::ByteArray) {
1039 TIL.TheByteArray = ImportGlobal("byte_array");
1040 TIL.BitMask = ImportConstant("bit_mask", TTRes.BitMask, 8, PtrTy);
1041 }
1042
1043 if (TIL.TheKind == TypeTestResolution::Inline)
1044 TIL.InlineBits = ImportConstant(
1045 "inline_bits", TTRes.InlineBits, 1 << TTRes.SizeM1BitWidth,
1046 TTRes.SizeM1BitWidth <= 5 ? Int32Ty : Int64Ty);
1047
1048 return TIL;
1049}
1050
1051void LowerTypeTestsModule::importTypeTest(CallInst *CI) {
1052 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
1053 if (!TypeIdMDVal)
1054 report_fatal_error("Second argument of llvm.type.test must be metadata");
1055
1056 auto TypeIdStr = dyn_cast<MDString>(TypeIdMDVal->getMetadata());
1057 // If this is a local unpromoted type, which doesn't have a metadata string,
1058 // treat as Unknown and delay lowering, so that we can still utilize it for
1059 // later optimizations.
1060 if (!TypeIdStr)
1061 return;
1062
1063 TypeIdLowering TIL = importTypeId(TypeIdStr->getString());
1064 Value *Lowered = lowerTypeTestCall(TypeIdStr, CI, TIL);
1065 if (Lowered) {
1066 CI->replaceAllUsesWith(Lowered);
1067 CI->eraseFromParent();
1068 }
1069}
1070
1071void LowerTypeTestsModule::maybeReplaceComdat(Function *F,
1072 StringRef OriginalName) {
1073 // For COFF we should also rename the comdat if this function also
1074 // happens to be the key function. Even if the comdat name changes, this
1075 // should still be fine since comdat and symbol resolution happens
1076 // before LTO, so all symbols which would prevail have been selected.
1077 if (F->hasComdat() && ObjectFormat == Triple::COFF &&
1078 F->getComdat()->getName() == OriginalName) {
1079 Comdat *OldComdat = F->getComdat();
1080 Comdat *NewComdat = M.getOrInsertComdat(F->getName());
1081 for (GlobalObject &GO : M.global_objects()) {
1082 if (GO.getComdat() == OldComdat)
1083 GO.setComdat(NewComdat);
1084 }
1085 }
1086}
1087
1088// ThinLTO backend: the function F has a jump table entry; update this module
1089// accordingly. isJumpTableCanonical describes the type of the jump table entry.
1090void LowerTypeTestsModule::importFunction(Function *F,
1091 bool isJumpTableCanonical) {
1092 assert(F->getType()->getAddressSpace() == 0);
1093
1094 GlobalValue::VisibilityTypes Visibility = F->getVisibility();
1095 std::string Name = std::string(F->getName());
1096
1097 if (F->isDeclarationForLinker() && isJumpTableCanonical) {
1098 // Non-dso_local functions may be overriden at run time,
1099 // don't short curcuit them
1100 if (F->isDSOLocal()) {
1101 Function *RealF = Function::Create(F->getFunctionType(),
1103 F->getAddressSpace(),
1104 Name + ".cfi", &M);
1106 replaceDirectCalls(F, RealF);
1107 }
1108 return;
1109 }
1110
1111 Function *FDecl;
1112 if (!isJumpTableCanonical) {
1113 // Either a declaration of an external function or a reference to a locally
1114 // defined jump table.
1115 FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1116 F->getAddressSpace(), Name + ".cfi_jt", &M);
1118 } else {
1119 F->setName(Name + ".cfi");
1120 maybeReplaceComdat(F, Name);
1121 FDecl = Function::Create(F->getFunctionType(), GlobalValue::ExternalLinkage,
1122 F->getAddressSpace(), Name, &M);
1123 FDecl->setVisibility(Visibility);
1124 Visibility = GlobalValue::HiddenVisibility;
1125
1126 // Update aliases pointing to this function to also include the ".cfi" suffix,
1127 // We expect the jump table entry to either point to the real function or an
1128 // alias. Redirect all other users to the jump table entry.
1129 for (auto &U : F->uses()) {
1130 if (auto *A = dyn_cast<GlobalAlias>(U.getUser())) {
1131 std::string AliasName = A->getName().str() + ".cfi";
1132 Function *AliasDecl = Function::Create(
1133 F->getFunctionType(), GlobalValue::ExternalLinkage,
1134 F->getAddressSpace(), "", &M);
1135 AliasDecl->takeName(A);
1136 A->replaceAllUsesWith(AliasDecl);
1137 A->setName(AliasName);
1138 }
1139 }
1140 }
1141
1142 if (F->hasExternalWeakLinkage())
1143 replaceWeakDeclarationWithJumpTablePtr(F, FDecl, isJumpTableCanonical);
1144 else
1145 replaceCfiUses(F, FDecl, isJumpTableCanonical);
1146
1147 // Set visibility late because it's used in replaceCfiUses() to determine
1148 // whether uses need to be replaced.
1149 F->setVisibility(Visibility);
1150}
1151
1152static auto
1154 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1156 // Pre-populate the map with interesting type identifiers.
1157 for (Metadata *TypeId : TypeIds)
1158 OffsetsByTypeID[TypeId];
1159 for (const auto &[Mem, MemOff] : GlobalLayout) {
1160 for (MDNode *Type : Mem->types()) {
1161 auto It = OffsetsByTypeID.find(Type->getOperand(1));
1162 if (It == OffsetsByTypeID.end())
1163 continue;
1166 cast<ConstantAsMetadata>(Type->getOperand(0))->getValue())
1167 ->getZExtValue();
1168 It->second.push_back(MemOff + Offset);
1169 }
1170 }
1171
1173 BitSets.reserve(TypeIds.size());
1174 for (Metadata *TypeId : TypeIds) {
1175 BitSets.emplace_back(TypeId, buildBitSet(OffsetsByTypeID[TypeId]));
1176 LLVM_DEBUG({
1177 if (auto MDS = dyn_cast<MDString>(TypeId))
1178 dbgs() << MDS->getString() << ": ";
1179 else
1180 dbgs() << "<unnamed>: ";
1181 BitSets.back().second.print(dbgs());
1182 });
1183 }
1184
1185 return BitSets;
1186}
1187
1188void LowerTypeTestsModule::lowerTypeTestCalls(
1189 ArrayRef<Metadata *> TypeIds, Constant *CombinedGlobalAddr,
1190 const DenseMap<GlobalTypeMember *, uint64_t> &GlobalLayout) {
1191 // For each type identifier in this disjoint set...
1192 for (const auto &[TypeId, BSI] : buildBitSets(TypeIds, GlobalLayout)) {
1193 ByteArrayInfo *BAI = nullptr;
1194 TypeIdLowering TIL;
1195
1196 uint64_t GlobalOffset =
1197 BSI.ByteOffset + ((BSI.BitSize - 1) << BSI.AlignLog2);
1198 TIL.OffsetedGlobal = ConstantExpr::getGetElementPtr(
1199 Int8Ty, CombinedGlobalAddr, ConstantInt::get(IntPtrTy, GlobalOffset)),
1200 TIL.AlignLog2 = ConstantInt::get(IntPtrTy, BSI.AlignLog2);
1201 TIL.SizeM1 = ConstantInt::get(IntPtrTy, BSI.BitSize - 1);
1202 if (BSI.isAllOnes()) {
1203 TIL.TheKind = (BSI.BitSize == 1) ? TypeTestResolution::Single
1204 : TypeTestResolution::AllOnes;
1205 } else if (BSI.BitSize <= IntPtrTy->getBitWidth()) {
1206 TIL.TheKind = TypeTestResolution::Inline;
1207 uint64_t InlineBits = 0;
1208 for (auto Bit : BSI.Bits)
1209 InlineBits |= uint64_t(1) << Bit;
1210 if (InlineBits == 0)
1211 TIL.TheKind = TypeTestResolution::Unsat;
1212 else
1213 TIL.InlineBits = ConstantInt::get(
1214 (BSI.BitSize <= 32) ? Int32Ty : Int64Ty, InlineBits);
1215 } else {
1216 TIL.TheKind = TypeTestResolution::ByteArray;
1217 ++NumByteArraysCreated;
1218 BAI = createByteArray(BSI);
1219 TIL.TheByteArray = BAI->ByteArray;
1220 TIL.BitMask = BAI->MaskGlobal;
1221 }
1222
1223 TypeIdUserInfo &TIUI = TypeIdUsers[TypeId];
1224
1225 if (TIUI.IsExported) {
1226 uint8_t *MaskPtr = exportTypeId(cast<MDString>(TypeId)->getString(), TIL);
1227 if (BAI)
1228 BAI->MaskPtr = MaskPtr;
1229 }
1230
1231 // Lower each call to llvm.type.test for this type identifier.
1232 for (CallInst *CI : TIUI.CallSites) {
1233 ++NumTypeTestCallsLowered;
1234 Value *Lowered = lowerTypeTestCall(TypeId, CI, TIL);
1235 if (Lowered) {
1236 CI->replaceAllUsesWith(Lowered);
1237 CI->eraseFromParent();
1238 }
1239 }
1240 }
1241}
1242
1243void LowerTypeTestsModule::verifyTypeMDNode(GlobalObject *GO, MDNode *Type) {
1244 if (Type->getNumOperands() != 2)
1245 report_fatal_error("All operands of type metadata must have 2 elements");
1246
1247 if (GO->isThreadLocal())
1248 report_fatal_error("Bit set element may not be thread-local");
1249 if (isa<GlobalVariable>(GO) && GO->hasSection())
1251 "A member of a type identifier may not have an explicit section");
1252
1253 // FIXME: We previously checked that global var member of a type identifier
1254 // must be a definition, but the IR linker may leave type metadata on
1255 // declarations. We should restore this check after fixing PR31759.
1256
1257 auto OffsetConstMD = dyn_cast<ConstantAsMetadata>(Type->getOperand(0));
1258 if (!OffsetConstMD)
1259 report_fatal_error("Type offset must be a constant");
1260 auto OffsetInt = dyn_cast<ConstantInt>(OffsetConstMD->getValue());
1261 if (!OffsetInt)
1262 report_fatal_error("Type offset must be an integer constant");
1263}
1264
1265static const unsigned kX86JumpTableEntrySize = 8;
1266static const unsigned kX86IBTJumpTableEntrySize = 16;
1267static const unsigned kARMJumpTableEntrySize = 4;
1268static const unsigned kARMBTIJumpTableEntrySize = 8;
1269static const unsigned kARMv6MJumpTableEntrySize = 16;
1270static const unsigned kRISCVJumpTableEntrySize = 8;
1271static const unsigned kLOONGARCH64JumpTableEntrySize = 8;
1272
1273bool LowerTypeTestsModule::hasBranchTargetEnforcement() {
1274 if (HasBranchTargetEnforcement == -1) {
1275 // First time this query has been called. Find out the answer by checking
1276 // the module flags.
1277 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
1278 M.getModuleFlag("branch-target-enforcement")))
1279 HasBranchTargetEnforcement = !BTE->isZero();
1280 else
1281 HasBranchTargetEnforcement = 0;
1282 }
1283 return HasBranchTargetEnforcement;
1284}
1285
1286unsigned
1287LowerTypeTestsModule::getJumpTableEntrySize(Triple::ArchType JumpTableArch) {
1288 switch (JumpTableArch) {
1289 case Triple::x86:
1290 case Triple::x86_64:
1291 if (const auto *MD = mdconst::extract_or_null<ConstantInt>(
1292 M.getModuleFlag("cf-protection-branch")))
1293 if (MD->getZExtValue())
1296 case Triple::arm:
1298 case Triple::thumb:
1299 if (CanUseThumbBWJumpTable) {
1300 if (hasBranchTargetEnforcement())
1303 } else {
1305 }
1306 case Triple::aarch64:
1307 if (hasBranchTargetEnforcement())
1310 case Triple::riscv32:
1311 case Triple::riscv64:
1315 default:
1316 report_fatal_error("Unsupported architecture for jump tables");
1317 }
1318}
1319
1320// Create an inline asm constant representing a jump table entry for the target.
1321// This consists of an instruction sequence containing a relative branch to
1322// Dest.
1323InlineAsm *
1324LowerTypeTestsModule::createJumpTableEntryAsm(Triple::ArchType JumpTableArch) {
1325 std::string Asm;
1326 raw_string_ostream AsmOS(Asm);
1327
1328 if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64) {
1329 bool Endbr = false;
1330 if (const auto *MD = mdconst::extract_or_null<ConstantInt>(
1331 M.getModuleFlag("cf-protection-branch")))
1332 Endbr = !MD->isZero();
1333 if (Endbr)
1334 AsmOS << (JumpTableArch == Triple::x86 ? "endbr32\n" : "endbr64\n");
1335 AsmOS << "jmp ${0:c}@plt\n";
1336 if (Endbr)
1337 AsmOS << ".balign 16, 0xcc\n";
1338 else
1339 AsmOS << "int3\nint3\nint3\n";
1340 } else if (JumpTableArch == Triple::arm) {
1341 AsmOS << "b $0\n";
1342 } else if (JumpTableArch == Triple::aarch64) {
1343 if (hasBranchTargetEnforcement())
1344 AsmOS << "bti c\n";
1345 AsmOS << "b $0\n";
1346 } else if (JumpTableArch == Triple::thumb) {
1347 if (!CanUseThumbBWJumpTable) {
1348 // In Armv6-M, this sequence will generate a branch without corrupting
1349 // any registers. We use two stack words; in the second, we construct the
1350 // address we'll pop into pc, and the first is used to save and restore
1351 // r0 which we use as a temporary register.
1352 //
1353 // To support position-independent use cases, the offset of the target
1354 // function is stored as a relative offset (which will expand into an
1355 // R_ARM_REL32 relocation in ELF, and presumably the equivalent in other
1356 // object file types), and added to pc after we load it. (The alternative
1357 // B.W is automatically pc-relative.)
1358 //
1359 // There are five 16-bit Thumb instructions here, so the .balign 4 adds a
1360 // sixth halfword of padding, and then the offset consumes a further 4
1361 // bytes, for a total of 16, which is very convenient since entries in
1362 // this jump table need to have power-of-two size.
1363 AsmOS << "push {r0,r1}\n"
1364 << "ldr r0, 1f\n"
1365 << "0: add r0, r0, pc\n"
1366 << "str r0, [sp, #4]\n"
1367 << "pop {r0,pc}\n"
1368 << ".balign 4\n"
1369 << "1: .word $0 - (0b + 4)\n";
1370 } else {
1371 if (hasBranchTargetEnforcement())
1372 AsmOS << "bti\n";
1373 AsmOS << "b.w $0\n";
1374 }
1375 } else if (JumpTableArch == Triple::riscv32 ||
1376 JumpTableArch == Triple::riscv64) {
1377 AsmOS << "tail $0@plt\n";
1378 } else if (JumpTableArch == Triple::loongarch64) {
1379 AsmOS << "pcalau12i $$t0, %pc_hi20($0)\n"
1380 << "jirl $$r0, $$t0, %pc_lo12($0)\n";
1381 } else {
1382 report_fatal_error("Unsupported architecture for jump tables");
1383 }
1384
1385 return InlineAsm::get(
1386 FunctionType::get(Type::getVoidTy(M.getContext()), PtrTy, false),
1387 AsmOS.str(), "s",
1388 /*hasSideEffects=*/true);
1389}
1390
1391/// Given a disjoint set of type identifiers and functions, build the bit sets
1392/// and lower the llvm.type.test calls, architecture dependently.
1393void LowerTypeTestsModule::buildBitSetsFromFunctions(
1395 if (Arch == Triple::x86 || Arch == Triple::x86_64 || Arch == Triple::arm ||
1396 Arch == Triple::thumb || Arch == Triple::aarch64 ||
1397 Arch == Triple::riscv32 || Arch == Triple::riscv64 ||
1398 Arch == Triple::loongarch64)
1399 buildBitSetsFromFunctionsNative(TypeIds, Functions);
1400 else if (Arch == Triple::wasm32 || Arch == Triple::wasm64)
1401 buildBitSetsFromFunctionsWASM(TypeIds, Functions);
1402 else
1403 report_fatal_error("Unsupported architecture for jump tables");
1404}
1405
1406void LowerTypeTestsModule::moveInitializerToModuleConstructor(
1407 GlobalVariable *GV) {
1408 if (WeakInitializerFn == nullptr) {
1409 WeakInitializerFn = Function::Create(
1410 FunctionType::get(Type::getVoidTy(M.getContext()),
1411 /* IsVarArg */ false),
1413 M.getDataLayout().getProgramAddressSpace(),
1414 "__cfi_global_var_init", &M);
1415 BasicBlock *BB =
1416 BasicBlock::Create(M.getContext(), "entry", WeakInitializerFn);
1417 ReturnInst::Create(M.getContext(), BB);
1418 WeakInitializerFn->setSection(
1419 ObjectFormat == Triple::MachO
1420 ? "__TEXT,__StaticInit,regular,pure_instructions"
1421 : ".text.startup");
1422 // This code is equivalent to relocation application, and should run at the
1423 // earliest possible time (i.e. with the highest priority).
1424 appendToGlobalCtors(M, WeakInitializerFn, /* Priority */ 0);
1425 }
1426
1427 IRBuilder<> IRB(WeakInitializerFn->getEntryBlock().getTerminator());
1428 GV->setConstant(false);
1429 IRB.CreateAlignedStore(GV->getInitializer(), GV, GV->getAlign());
1431}
1432
1433void LowerTypeTestsModule::findGlobalVariableUsersOf(
1434 Constant *C, SmallSetVector<GlobalVariable *, 8> &Out) {
1435 for (auto *U : C->users()){
1436 if (auto *GV = dyn_cast<GlobalVariable>(U))
1437 Out.insert(GV);
1438 else if (auto *C2 = dyn_cast<Constant>(U))
1439 findGlobalVariableUsersOf(C2, Out);
1440 }
1441}
1442
1443// Replace all uses of F with (F ? JT : 0).
1444void LowerTypeTestsModule::replaceWeakDeclarationWithJumpTablePtr(
1445 Function *F, Constant *JT, bool IsJumpTableCanonical) {
1446 // The target expression can not appear in a constant initializer on most
1447 // (all?) targets. Switch to a runtime initializer.
1448 SmallSetVector<GlobalVariable *, 8> GlobalVarUsers;
1449 findGlobalVariableUsersOf(F, GlobalVarUsers);
1450 for (auto *GV : GlobalVarUsers) {
1451 if (GV == GlobalAnnotation)
1452 continue;
1453 moveInitializerToModuleConstructor(GV);
1454 }
1455
1456 // Can not RAUW F with an expression that uses F. Replace with a temporary
1457 // placeholder first.
1458 Function *PlaceholderFn =
1459 Function::Create(cast<FunctionType>(F->getValueType()),
1461 F->getAddressSpace(), "", &M);
1462 replaceCfiUses(F, PlaceholderFn, IsJumpTableCanonical);
1463
1465 // Don't use range based loop, because use list will be modified.
1466 while (!PlaceholderFn->use_empty()) {
1467 Use &U = *PlaceholderFn->use_begin();
1468 auto *InsertPt = dyn_cast<Instruction>(U.getUser());
1469 assert(InsertPt && "Non-instruction users should have been eliminated");
1470 auto *PN = dyn_cast<PHINode>(InsertPt);
1471 if (PN)
1472 InsertPt = PN->getIncomingBlock(U)->getTerminator();
1473 IRBuilder Builder(InsertPt);
1474 Value *ICmp = Builder.CreateICmp(CmpInst::ICMP_NE, F,
1475 Constant::getNullValue(F->getType()));
1476 Value *Select = Builder.CreateSelect(ICmp, JT,
1477 Constant::getNullValue(F->getType()));
1478
1479 if (auto *SI = dyn_cast<SelectInst>(Select))
1481 // For phi nodes, we need to update the incoming value for all operands
1482 // with the same predecessor.
1483 if (PN)
1484 PN->setIncomingValueForBlock(InsertPt->getParent(), Select);
1485 else
1486 U.set(Select);
1487 }
1488 PlaceholderFn->eraseFromParent();
1489}
1490
1491static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch) {
1492 Attribute TFAttr = F->getFnAttribute("target-features");
1493 if (TFAttr.isValid()) {
1495 TFAttr.getValueAsString().split(Features, ',');
1496 for (StringRef Feature : Features) {
1497 if (Feature == "-thumb-mode")
1498 return false;
1499 else if (Feature == "+thumb-mode")
1500 return true;
1501 }
1502 }
1503
1504 return ModuleArch == Triple::thumb;
1505}
1506
1507// Each jump table must be either ARM or Thumb as a whole for the bit-test math
1508// to work. Pick one that matches the majority of members to minimize interop
1509// veneers inserted by the linker.
1510Triple::ArchType LowerTypeTestsModule::selectJumpTableArmEncoding(
1511 ArrayRef<GlobalTypeMember *> Functions) {
1512 if (Arch != Triple::arm && Arch != Triple::thumb)
1513 return Arch;
1514
1515 if (!CanUseThumbBWJumpTable && CanUseArmJumpTable) {
1516 // In architectures that provide Arm and Thumb-1 but not Thumb-2,
1517 // we should always prefer the Arm jump table format, because the
1518 // Thumb-1 one is larger and slower.
1519 return Triple::arm;
1520 }
1521
1522 // Otherwise, go with majority vote.
1523 unsigned ArmCount = 0, ThumbCount = 0;
1524 for (const auto GTM : Functions) {
1525 if (!GTM->isJumpTableCanonical()) {
1526 // PLT stubs are always ARM.
1527 // FIXME: This is the wrong heuristic for non-canonical jump tables.
1528 ++ArmCount;
1529 continue;
1530 }
1531
1532 Function *F = cast<Function>(GTM->getGlobal());
1533 ++(isThumbFunction(F, Arch) ? ThumbCount : ArmCount);
1534 }
1535
1536 return ArmCount > ThumbCount ? Triple::arm : Triple::thumb;
1537}
1538
1539void LowerTypeTestsModule::createJumpTable(
1540 Function *F, ArrayRef<GlobalTypeMember *> Functions,
1541 Triple::ArchType JumpTableArch) {
1542 BasicBlock *BB = BasicBlock::Create(M.getContext(), "entry", F);
1543 IRBuilder<> IRB(BB);
1544
1545 InlineAsm *JumpTableAsm = createJumpTableEntryAsm(JumpTableArch);
1546
1547 // Check if all entries have the NoUnwind attribute.
1548 // If all entries have it, we can safely mark the
1549 // cfi.jumptable as NoUnwind, otherwise, direct calls
1550 // to the jump table will not handle exceptions properly
1551 bool areAllEntriesNounwind = true;
1552 for (GlobalTypeMember *GTM : Functions) {
1553 if (!llvm::cast<llvm::Function>(GTM->getGlobal())
1554 ->hasFnAttribute(llvm::Attribute::NoUnwind)) {
1555 areAllEntriesNounwind = false;
1556 }
1557 IRB.CreateCall(JumpTableAsm, GTM->getGlobal());
1558 }
1559 IRB.CreateUnreachable();
1560
1561 // Align the whole table by entry size.
1562 F->setAlignment(Align(getJumpTableEntrySize(JumpTableArch)));
1563 F->addFnAttr(Attribute::Naked);
1564 if (JumpTableArch == Triple::arm)
1565 F->addFnAttr("target-features", "-thumb-mode");
1566 if (JumpTableArch == Triple::thumb) {
1567 if (hasBranchTargetEnforcement()) {
1568 // If we're generating a Thumb jump table with BTI, add a target-features
1569 // setting to ensure BTI can be assembled.
1570 F->addFnAttr("target-features", "+thumb-mode,+pacbti");
1571 } else {
1572 F->addFnAttr("target-features", "+thumb-mode");
1573 if (CanUseThumbBWJumpTable) {
1574 // Thumb jump table assembly needs Thumb2. The following attribute is
1575 // added by Clang for -march=armv7.
1576 F->addFnAttr("target-cpu", "cortex-a8");
1577 }
1578 }
1579 }
1580 // When -mbranch-protection= is used, the inline asm adds a BTI. Suppress BTI
1581 // for the function to avoid double BTI. This is a no-op without
1582 // -mbranch-protection=.
1583 if (JumpTableArch == Triple::aarch64 || JumpTableArch == Triple::thumb) {
1584 if (F->hasFnAttribute("branch-target-enforcement"))
1585 F->removeFnAttr("branch-target-enforcement");
1586 if (F->hasFnAttribute("sign-return-address"))
1587 F->removeFnAttr("sign-return-address");
1588 }
1589 if (JumpTableArch == Triple::riscv32 || JumpTableArch == Triple::riscv64) {
1590 // Make sure the jump table assembly is not modified by the assembler or
1591 // the linker.
1592 F->addFnAttr("target-features", "-c,-relax");
1593 }
1594 // When -fcf-protection= is used, the inline asm adds an ENDBR. Suppress ENDBR
1595 // for the function to avoid double ENDBR. This is a no-op without
1596 // -fcf-protection=.
1597 if (JumpTableArch == Triple::x86 || JumpTableArch == Triple::x86_64)
1598 F->addFnAttr(Attribute::NoCfCheck);
1599
1600 // Make sure we don't emit .eh_frame for this function if it isn't needed.
1601 if (areAllEntriesNounwind)
1602 F->addFnAttr(Attribute::NoUnwind);
1603
1604 // Make sure we do not inline any calls to the cfi.jumptable.
1605 F->addFnAttr(Attribute::NoInline);
1606}
1607
1608/// Given a disjoint set of type identifiers and functions, build a jump table
1609/// for the functions, build the bit sets and lower the llvm.type.test calls.
1610void LowerTypeTestsModule::buildBitSetsFromFunctionsNative(
1612 // Unlike the global bitset builder, the function bitset builder cannot
1613 // re-arrange functions in a particular order and base its calculations on the
1614 // layout of the functions' entry points, as we have no idea how large a
1615 // particular function will end up being (the size could even depend on what
1616 // this pass does!) Instead, we build a jump table, which is a block of code
1617 // consisting of one branch instruction for each of the functions in the bit
1618 // set that branches to the target function, and redirect any taken function
1619 // addresses to the corresponding jump table entry. In the object file's
1620 // symbol table, the symbols for the target functions also refer to the jump
1621 // table entries, so that addresses taken outside the module will pass any
1622 // verification done inside the module.
1623 //
1624 // In more concrete terms, suppose we have three functions f, g, h which are
1625 // of the same type, and a function foo that returns their addresses:
1626 //
1627 // f:
1628 // mov 0, %eax
1629 // ret
1630 //
1631 // g:
1632 // mov 1, %eax
1633 // ret
1634 //
1635 // h:
1636 // mov 2, %eax
1637 // ret
1638 //
1639 // foo:
1640 // mov f, %eax
1641 // mov g, %edx
1642 // mov h, %ecx
1643 // ret
1644 //
1645 // We output the jump table as module-level inline asm string. The end result
1646 // will (conceptually) look like this:
1647 //
1648 // f = .cfi.jumptable
1649 // g = .cfi.jumptable + 4
1650 // h = .cfi.jumptable + 8
1651 // .cfi.jumptable:
1652 // jmp f.cfi ; 5 bytes
1653 // int3 ; 1 byte
1654 // int3 ; 1 byte
1655 // int3 ; 1 byte
1656 // jmp g.cfi ; 5 bytes
1657 // int3 ; 1 byte
1658 // int3 ; 1 byte
1659 // int3 ; 1 byte
1660 // jmp h.cfi ; 5 bytes
1661 // int3 ; 1 byte
1662 // int3 ; 1 byte
1663 // int3 ; 1 byte
1664 //
1665 // f.cfi:
1666 // mov 0, %eax
1667 // ret
1668 //
1669 // g.cfi:
1670 // mov 1, %eax
1671 // ret
1672 //
1673 // h.cfi:
1674 // mov 2, %eax
1675 // ret
1676 //
1677 // foo:
1678 // mov f, %eax
1679 // mov g, %edx
1680 // mov h, %ecx
1681 // ret
1682 //
1683 // Because the addresses of f, g, h are evenly spaced at a power of 2, in the
1684 // normal case the check can be carried out using the same kind of simple
1685 // arithmetic that we normally use for globals.
1686
1687 // FIXME: find a better way to represent the jumptable in the IR.
1688 assert(!Functions.empty());
1689
1690 // Decide on the jump table encoding, so that we know how big the
1691 // entries will be.
1692 Triple::ArchType JumpTableArch = selectJumpTableArmEncoding(Functions);
1693
1694 // Build a simple layout based on the regular layout of jump tables.
1695 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1696 unsigned EntrySize = getJumpTableEntrySize(JumpTableArch);
1697 for (unsigned I = 0; I != Functions.size(); ++I)
1698 GlobalLayout[Functions[I]] = I * EntrySize;
1699
1700 Function *JumpTableFn =
1702 /* IsVarArg */ false),
1704 M.getDataLayout().getProgramAddressSpace(),
1705 ".cfi.jumptable", &M);
1706 ArrayType *JumpTableEntryType = ArrayType::get(Int8Ty, EntrySize);
1708 ArrayType::get(JumpTableEntryType, Functions.size());
1710 JumpTableFn, PointerType::getUnqual(M.getContext()));
1711
1712 lowerTypeTestCalls(TypeIds, JumpTable, GlobalLayout);
1713
1714 // Build aliases pointing to offsets into the jump table, and replace
1715 // references to the original functions with references to the aliases.
1716 for (unsigned I = 0; I != Functions.size(); ++I) {
1717 Function *F = cast<Function>(Functions[I]->getGlobal());
1718 bool IsJumpTableCanonical = Functions[I]->isJumpTableCanonical();
1719
1720 Constant *CombinedGlobalElemPtr = ConstantExpr::getInBoundsGetElementPtr(
1721 JumpTableType, JumpTable,
1722 ArrayRef<Constant *>{ConstantInt::get(IntPtrTy, 0),
1723 ConstantInt::get(IntPtrTy, I)});
1724
1725 const bool IsExported = Functions[I]->isExported();
1726 if (!IsJumpTableCanonical) {
1729 GlobalAlias *JtAlias = GlobalAlias::create(JumpTableEntryType, 0, LT,
1730 F->getName() + ".cfi_jt",
1731 CombinedGlobalElemPtr, &M);
1732 if (IsExported)
1734 else
1735 appendToUsed(M, {JtAlias});
1736 }
1737
1738 if (IsExported) {
1739 if (IsJumpTableCanonical)
1740 ExportSummary->cfiFunctionDefs().emplace(F->getName());
1741 else
1742 ExportSummary->cfiFunctionDecls().emplace(F->getName());
1743 }
1744
1745 if (!IsJumpTableCanonical) {
1746 if (F->hasExternalWeakLinkage())
1747 replaceWeakDeclarationWithJumpTablePtr(F, CombinedGlobalElemPtr,
1748 IsJumpTableCanonical);
1749 else
1750 replaceCfiUses(F, CombinedGlobalElemPtr, IsJumpTableCanonical);
1751 } else {
1752 assert(F->getType()->getAddressSpace() == 0);
1753
1754 GlobalAlias *FAlias =
1755 GlobalAlias::create(JumpTableEntryType, 0, F->getLinkage(), "",
1756 CombinedGlobalElemPtr, &M);
1757 FAlias->setVisibility(F->getVisibility());
1758 FAlias->takeName(F);
1759 if (FAlias->hasName()) {
1760 F->setName(FAlias->getName() + ".cfi");
1761 maybeReplaceComdat(F, FAlias->getName());
1762 }
1763 replaceCfiUses(F, FAlias, IsJumpTableCanonical);
1764 if (!F->hasLocalLinkage())
1765 F->setVisibility(GlobalVariable::HiddenVisibility);
1766 }
1767 }
1768
1769 createJumpTable(JumpTableFn, Functions, JumpTableArch);
1770}
1771
1772/// Assign a dummy layout using an incrementing counter, tag each function
1773/// with its index represented as metadata, and lower each type test to an
1774/// integer range comparison. During generation of the indirect function call
1775/// table in the backend, it will assign the given indexes.
1776/// Note: Dynamic linking is not supported, as the WebAssembly ABI has not yet
1777/// been finalized.
1778void LowerTypeTestsModule::buildBitSetsFromFunctionsWASM(
1780 assert(!Functions.empty());
1781
1782 // Build consecutive monotonic integer ranges for each call target set
1783 DenseMap<GlobalTypeMember *, uint64_t> GlobalLayout;
1784
1785 for (GlobalTypeMember *GTM : Functions) {
1786 Function *F = cast<Function>(GTM->getGlobal());
1787
1788 // Skip functions that are not address taken, to avoid bloating the table
1789 if (!F->hasAddressTaken())
1790 continue;
1791
1792 // Store metadata with the index for each function
1793 MDNode *MD = MDNode::get(F->getContext(),
1795 ConstantInt::get(Int64Ty, IndirectIndex))));
1796 F->setMetadata("wasm.index", MD);
1797
1798 // Assign the counter value
1799 GlobalLayout[GTM] = IndirectIndex++;
1800 }
1801
1802 // The indirect function table index space starts at zero, so pass a NULL
1803 // pointer as the subtracted "jump table" offset.
1804 lowerTypeTestCalls(TypeIds, ConstantPointerNull::get(PtrTy),
1805 GlobalLayout);
1806}
1807
1808void LowerTypeTestsModule::buildBitSetsFromDisjointSet(
1810 ArrayRef<ICallBranchFunnel *> ICallBranchFunnels) {
1811 DenseMap<Metadata *, uint64_t> TypeIdIndices;
1812 for (unsigned I = 0; I != TypeIds.size(); ++I)
1813 TypeIdIndices[TypeIds[I]] = I;
1814
1815 // For each type identifier, build a set of indices that refer to members of
1816 // the type identifier.
1817 std::vector<std::set<uint64_t>> TypeMembers(TypeIds.size());
1818 unsigned GlobalIndex = 0;
1819 DenseMap<GlobalTypeMember *, uint64_t> GlobalIndices;
1820 for (GlobalTypeMember *GTM : Globals) {
1821 for (MDNode *Type : GTM->types()) {
1822 // Type = { offset, type identifier }
1823 auto I = TypeIdIndices.find(Type->getOperand(1));
1824 if (I != TypeIdIndices.end())
1825 TypeMembers[I->second].insert(GlobalIndex);
1826 }
1827 GlobalIndices[GTM] = GlobalIndex;
1828 GlobalIndex++;
1829 }
1830
1831 for (ICallBranchFunnel *JT : ICallBranchFunnels) {
1832 TypeMembers.emplace_back();
1833 std::set<uint64_t> &TMSet = TypeMembers.back();
1834 for (GlobalTypeMember *T : JT->targets())
1835 TMSet.insert(GlobalIndices[T]);
1836 }
1837
1838 // Order the sets of indices by size. The GlobalLayoutBuilder works best
1839 // when given small index sets first.
1840 llvm::stable_sort(TypeMembers, [](const std::set<uint64_t> &O1,
1841 const std::set<uint64_t> &O2) {
1842 return O1.size() < O2.size();
1843 });
1844
1845 // Create a GlobalLayoutBuilder and provide it with index sets as layout
1846 // fragments. The GlobalLayoutBuilder tries to lay out members of fragments as
1847 // close together as possible.
1848 GlobalLayoutBuilder GLB(Globals.size());
1849 for (auto &&MemSet : TypeMembers)
1850 GLB.addFragment(MemSet);
1851
1852 // Build a vector of globals with the computed layout.
1853 bool IsGlobalSet =
1854 Globals.empty() || isa<GlobalVariable>(Globals[0]->getGlobal());
1855 std::vector<GlobalTypeMember *> OrderedGTMs(Globals.size());
1856 auto OGTMI = OrderedGTMs.begin();
1857 for (auto &&F : GLB.Fragments) {
1858 for (auto &&Offset : F) {
1859 if (IsGlobalSet != isa<GlobalVariable>(Globals[Offset]->getGlobal()))
1860 report_fatal_error("Type identifier may not contain both global "
1861 "variables and functions");
1862 *OGTMI++ = Globals[Offset];
1863 }
1864 }
1865
1866 // Build the bitsets from this disjoint set.
1867 if (IsGlobalSet)
1868 buildBitSetsFromGlobalVariables(TypeIds, OrderedGTMs);
1869 else
1870 buildBitSetsFromFunctions(TypeIds, OrderedGTMs);
1871}
1872
1873/// Lower all type tests in this module.
1874LowerTypeTestsModule::LowerTypeTestsModule(
1875 Module &M, ModuleAnalysisManager &AM, ModuleSummaryIndex *ExportSummary,
1876 const ModuleSummaryIndex *ImportSummary, DropTestKind DropTypeTests)
1877 : M(M), ExportSummary(ExportSummary), ImportSummary(ImportSummary),
1878 DropTypeTests(ClDropTypeTests > DropTypeTests ? ClDropTypeTests
1879 : DropTypeTests) {
1880 assert(!(ExportSummary && ImportSummary));
1881 Triple TargetTriple(M.getTargetTriple());
1882 Arch = TargetTriple.getArch();
1883 if (Arch == Triple::arm)
1884 CanUseArmJumpTable = true;
1885 if (Arch == Triple::arm || Arch == Triple::thumb) {
1886 auto &FAM =
1888 for (Function &F : M) {
1889 // Skip declarations since we should not query the TTI for them.
1890 if (F.isDeclaration())
1891 continue;
1892 auto &TTI = FAM.getResult<TargetIRAnalysis>(F);
1893 if (TTI.hasArmWideBranch(false))
1894 CanUseArmJumpTable = true;
1895 if (TTI.hasArmWideBranch(true))
1896 CanUseThumbBWJumpTable = true;
1897 }
1898 }
1899 OS = TargetTriple.getOS();
1900 ObjectFormat = TargetTriple.getObjectFormat();
1901
1902 // Function annotation describes or applies to function itself, and
1903 // shouldn't be associated with jump table thunk generated for CFI.
1904 GlobalAnnotation = M.getGlobalVariable("llvm.global.annotations");
1905 if (GlobalAnnotation && GlobalAnnotation->hasInitializer()) {
1906 const ConstantArray *CA =
1907 cast<ConstantArray>(GlobalAnnotation->getInitializer());
1908 FunctionAnnotations.insert_range(CA->operands());
1909 }
1910}
1911
1912bool LowerTypeTestsModule::runForTesting(Module &M, ModuleAnalysisManager &AM) {
1913 ModuleSummaryIndex Summary(/*HaveGVs=*/false);
1914
1915 // Handle the command-line summary arguments. This code is for testing
1916 // purposes only, so we handle errors directly.
1917 if (!ClReadSummary.empty()) {
1918 ExitOnError ExitOnErr("-lowertypetests-read-summary: " + ClReadSummary +
1919 ": ");
1920 auto ReadSummaryFile = ExitOnErr(errorOrToExpected(
1921 MemoryBuffer::getFile(ClReadSummary, /*IsText=*/true)));
1922
1923 yaml::Input In(ReadSummaryFile->getBuffer());
1924 In >> Summary;
1925 ExitOnErr(errorCodeToError(In.error()));
1926 }
1927
1928 bool Changed =
1929 LowerTypeTestsModule(
1930 M, AM,
1931 ClSummaryAction == PassSummaryAction::Export ? &Summary : nullptr,
1932 ClSummaryAction == PassSummaryAction::Import ? &Summary : nullptr,
1933 /*DropTypeTests=*/DropTestKind::None)
1934 .lower();
1935
1936 if (!ClWriteSummary.empty()) {
1937 ExitOnError ExitOnErr("-lowertypetests-write-summary: " + ClWriteSummary +
1938 ": ");
1939 std::error_code EC;
1940 raw_fd_ostream OS(ClWriteSummary, EC, sys::fs::OF_TextWithCRLF);
1941 ExitOnErr(errorCodeToError(EC));
1942
1943 yaml::Output Out(OS);
1944 Out << Summary;
1945 }
1946
1947 return Changed;
1948}
1949
1950static bool isDirectCall(Use& U) {
1951 auto *Usr = dyn_cast<CallInst>(U.getUser());
1952 if (Usr) {
1953 auto *CB = dyn_cast<CallBase>(Usr);
1954 if (CB && CB->isCallee(&U))
1955 return true;
1956 }
1957 return false;
1958}
1959
1960void LowerTypeTestsModule::replaceCfiUses(Function *Old, Value *New,
1961 bool IsJumpTableCanonical) {
1962 SmallSetVector<Constant *, 4> Constants;
1963 for (Use &U : llvm::make_early_inc_range(Old->uses())) {
1964 // Skip no_cfi values, which refer to the function body instead of the jump
1965 // table.
1966 if (isa<NoCFIValue>(U.getUser()))
1967 continue;
1968
1969 // Skip direct calls to externally defined or non-dso_local functions.
1970 if (isDirectCall(U) && (Old->isDSOLocal() || !IsJumpTableCanonical))
1971 continue;
1972
1973 // Skip function annotation.
1974 if (isFunctionAnnotation(U.getUser()))
1975 continue;
1976
1977 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
1978 // constant because they are uniqued.
1979 if (auto *C = dyn_cast<Constant>(U.getUser())) {
1980 if (!isa<GlobalValue>(C)) {
1981 // Save unique users to avoid processing operand replacement
1982 // more than once.
1983 Constants.insert(C);
1984 continue;
1985 }
1986 }
1987
1988 U.set(New);
1989 }
1990
1991 // Process operand replacement of saved constants.
1992 for (auto *C : Constants)
1993 C->handleOperandChange(Old, New);
1994}
1995
1996void LowerTypeTestsModule::replaceDirectCalls(Value *Old, Value *New) {
1998}
1999
2000static void dropTypeTests(Module &M, Function &TypeTestFunc,
2001 bool ShouldDropAll) {
2002 for (Use &U : llvm::make_early_inc_range(TypeTestFunc.uses())) {
2003 auto *CI = cast<CallInst>(U.getUser());
2004 // Find and erase llvm.assume intrinsics for this llvm.type.test call.
2005 for (Use &CIU : llvm::make_early_inc_range(CI->uses()))
2006 if (auto *Assume = dyn_cast<AssumeInst>(CIU.getUser()))
2007 Assume->eraseFromParent();
2008 // If the assume was merged with another assume, we might have a use on a
2009 // phi (which will feed the assume). Simply replace the use on the phi
2010 // with "true" and leave the merged assume.
2011 //
2012 // If ShouldDropAll is set, then we we need to update any remaining uses,
2013 // regardless of the instruction type.
2014 if (!CI->use_empty()) {
2015 assert(ShouldDropAll || all_of(CI->users(), [](User *U) -> bool {
2016 return isa<PHINode>(U);
2017 }));
2018 CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
2019 }
2020 CI->eraseFromParent();
2021 }
2022}
2023
2024bool LowerTypeTestsModule::lower() {
2025 Function *TypeTestFunc =
2026 Intrinsic::getDeclarationIfExists(&M, Intrinsic::type_test);
2027
2028 if (DropTypeTests != DropTestKind::None) {
2029 bool ShouldDropAll = DropTypeTests == DropTestKind::All;
2030 if (TypeTestFunc)
2031 dropTypeTests(M, *TypeTestFunc, ShouldDropAll);
2032 // Normally we'd have already removed all @llvm.public.type.test calls,
2033 // except for in the case where we originally were performing ThinLTO but
2034 // decided not to in the backend.
2035 Function *PublicTypeTestFunc =
2036 Intrinsic::getDeclarationIfExists(&M, Intrinsic::public_type_test);
2037 if (PublicTypeTestFunc)
2038 dropTypeTests(M, *PublicTypeTestFunc, ShouldDropAll);
2039 if (TypeTestFunc || PublicTypeTestFunc) {
2040 // We have deleted the type intrinsics, so we no longer have enough
2041 // information to reason about the liveness of virtual function pointers
2042 // in GlobalDCE.
2043 for (GlobalVariable &GV : M.globals())
2044 GV.eraseMetadata(LLVMContext::MD_vcall_visibility);
2045 return true;
2046 }
2047 return false;
2048 }
2049
2050 // If only some of the modules were split, we cannot correctly perform
2051 // this transformation. We already checked for the presense of type tests
2052 // with partially split modules during the thin link, and would have emitted
2053 // an error if any were found, so here we can simply return.
2054 if ((ExportSummary && ExportSummary->partiallySplitLTOUnits()) ||
2055 (ImportSummary && ImportSummary->partiallySplitLTOUnits()))
2056 return false;
2057
2058 Function *ICallBranchFunnelFunc =
2059 Intrinsic::getDeclarationIfExists(&M, Intrinsic::icall_branch_funnel);
2060 if ((!TypeTestFunc || TypeTestFunc->use_empty()) &&
2061 (!ICallBranchFunnelFunc || ICallBranchFunnelFunc->use_empty()) &&
2062 !ExportSummary && !ImportSummary)
2063 return false;
2064
2065 if (ImportSummary) {
2066 if (TypeTestFunc)
2067 for (Use &U : llvm::make_early_inc_range(TypeTestFunc->uses()))
2068 importTypeTest(cast<CallInst>(U.getUser()));
2069
2070 if (ICallBranchFunnelFunc && !ICallBranchFunnelFunc->use_empty())
2072 "unexpected call to llvm.icall.branch.funnel during import phase");
2073
2076 for (auto &F : M) {
2077 // CFI functions are either external, or promoted. A local function may
2078 // have the same name, but it's not the one we are looking for.
2079 if (F.hasLocalLinkage())
2080 continue;
2081 if (ImportSummary->cfiFunctionDefs().count(F.getName()))
2082 Defs.push_back(&F);
2083 else if (ImportSummary->cfiFunctionDecls().count(F.getName()))
2084 Decls.push_back(&F);
2085 }
2086
2087 {
2088 ScopedSaveAliaseesAndUsed S(M);
2089 for (auto *F : Defs)
2090 importFunction(F, /*isJumpTableCanonical*/ true);
2091 for (auto *F : Decls)
2092 importFunction(F, /*isJumpTableCanonical*/ false);
2093 }
2094
2095 return true;
2096 }
2097
2098 // Equivalence class set containing type identifiers and the globals that
2099 // reference them. This is used to partition the set of type identifiers in
2100 // the module into disjoint sets.
2101 using GlobalClassesTy = EquivalenceClasses<
2102 PointerUnion<GlobalTypeMember *, Metadata *, ICallBranchFunnel *>>;
2103 GlobalClassesTy GlobalClasses;
2104
2105 // Verify the type metadata and build a few data structures to let us
2106 // efficiently enumerate the type identifiers associated with a global:
2107 // a list of GlobalTypeMembers (a GlobalObject stored alongside a vector
2108 // of associated type metadata) and a mapping from type identifiers to their
2109 // list of GlobalTypeMembers and last observed index in the list of globals.
2110 // The indices will be used later to deterministically order the list of type
2111 // identifiers.
2113 struct TIInfo {
2114 unsigned UniqueId;
2115 std::vector<GlobalTypeMember *> RefGlobals;
2116 };
2117 DenseMap<Metadata *, TIInfo> TypeIdInfo;
2118 unsigned CurUniqueId = 0;
2120
2121 // Cross-DSO CFI emits jumptable entries for exported functions as well as
2122 // address taken functions in case they are address taken in other modules.
2123 const bool CrossDsoCfi = M.getModuleFlag("Cross-DSO CFI") != nullptr;
2124
2125 struct ExportedFunctionInfo {
2127 MDNode *FuncMD; // {name, linkage, type[, type...]}
2128 };
2129 MapVector<StringRef, ExportedFunctionInfo> ExportedFunctions;
2130 if (ExportSummary) {
2131 NamedMDNode *CfiFunctionsMD = M.getNamedMetadata("cfi.functions");
2132 if (CfiFunctionsMD) {
2133 // A set of all functions that are address taken by a live global object.
2134 DenseSet<GlobalValue::GUID> AddressTaken;
2135 for (auto &I : *ExportSummary)
2136 for (auto &GVS : I.second.getSummaryList())
2137 if (GVS->isLive())
2138 for (const auto &Ref : GVS->refs()) {
2139 AddressTaken.insert(Ref.getGUID());
2140 for (auto &RefGVS : Ref.getSummaryList())
2141 if (auto Alias = dyn_cast<AliasSummary>(RefGVS.get()))
2142 AddressTaken.insert(Alias->getAliaseeGUID());
2143 }
2145 if (AddressTaken.count(GUID))
2146 return true;
2147 auto VI = ExportSummary->getValueInfo(GUID);
2148 if (!VI)
2149 return false;
2150 for (auto &I : VI.getSummaryList())
2151 if (auto Alias = dyn_cast<AliasSummary>(I.get()))
2152 if (AddressTaken.count(Alias->getAliaseeGUID()))
2153 return true;
2154 return false;
2155 };
2156 for (auto *FuncMD : CfiFunctionsMD->operands()) {
2157 assert(FuncMD->getNumOperands() >= 2);
2158 StringRef FunctionName =
2159 cast<MDString>(FuncMD->getOperand(0))->getString();
2161 cast<ConstantAsMetadata>(FuncMD->getOperand(1))
2162 ->getValue()
2163 ->getUniqueInteger()
2164 .getZExtValue());
2165 const GlobalValue::GUID GUID =
2168 // Do not emit jumptable entries for functions that are not-live and
2169 // have no live references (and are not exported with cross-DSO CFI.)
2170 if (!ExportSummary->isGUIDLive(GUID))
2171 continue;
2172 if (!IsAddressTaken(GUID)) {
2173 if (!CrossDsoCfi || Linkage != CFL_Definition)
2174 continue;
2175
2176 bool Exported = false;
2177 if (auto VI = ExportSummary->getValueInfo(GUID))
2178 for (const auto &GVS : VI.getSummaryList())
2179 if (GVS->isLive() && !GlobalValue::isLocalLinkage(GVS->linkage()))
2180 Exported = true;
2181
2182 if (!Exported)
2183 continue;
2184 }
2185 auto P = ExportedFunctions.insert({FunctionName, {Linkage, FuncMD}});
2186 if (!P.second && P.first->second.Linkage != CFL_Definition)
2187 P.first->second = {Linkage, FuncMD};
2188 }
2189
2190 for (const auto &P : ExportedFunctions) {
2191 StringRef FunctionName = P.first;
2192 CfiFunctionLinkage Linkage = P.second.Linkage;
2193 MDNode *FuncMD = P.second.FuncMD;
2194 Function *F = M.getFunction(FunctionName);
2195 if (F && F->hasLocalLinkage()) {
2196 // Locally defined function that happens to have the same name as a
2197 // function defined in a ThinLTO module. Rename it to move it out of
2198 // the way of the external reference that we're about to create.
2199 // Note that setName will find a unique name for the function, so even
2200 // if there is an existing function with the suffix there won't be a
2201 // name collision.
2202 F->setName(F->getName() + ".1");
2203 F = nullptr;
2204 }
2205
2206 if (!F)
2208 FunctionType::get(Type::getVoidTy(M.getContext()), false),
2209 GlobalVariable::ExternalLinkage,
2210 M.getDataLayout().getProgramAddressSpace(), FunctionName, &M);
2211
2212 // If the function is available_externally, remove its definition so
2213 // that it is handled the same way as a declaration. Later we will try
2214 // to create an alias using this function's linkage, which will fail if
2215 // the linkage is available_externally. This will also result in us
2216 // following the code path below to replace the type metadata.
2217 if (F->hasAvailableExternallyLinkage()) {
2218 F->setLinkage(GlobalValue::ExternalLinkage);
2219 F->deleteBody();
2220 F->setComdat(nullptr);
2221 F->clearMetadata();
2222 }
2223
2224 // Update the linkage for extern_weak declarations when a definition
2225 // exists.
2226 if (Linkage == CFL_Definition && F->hasExternalWeakLinkage())
2227 F->setLinkage(GlobalValue::ExternalLinkage);
2228
2229 // If the function in the full LTO module is a declaration, replace its
2230 // type metadata with the type metadata we found in cfi.functions. That
2231 // metadata is presumed to be more accurate than the metadata attached
2232 // to the declaration.
2233 if (F->isDeclaration()) {
2236
2237 F->eraseMetadata(LLVMContext::MD_type);
2238 for (unsigned I = 2; I < FuncMD->getNumOperands(); ++I)
2239 F->addMetadata(LLVMContext::MD_type,
2240 *cast<MDNode>(FuncMD->getOperand(I).get()));
2241 }
2242 }
2243 }
2244 }
2245
2246 struct AliasToCreate {
2247 Function *Alias;
2248 std::string TargetName;
2249 };
2250 std::vector<AliasToCreate> AliasesToCreate;
2251
2252 // Parse alias data to replace stand-in function declarations for aliases
2253 // with an alias to the intended target.
2254 if (ExportSummary) {
2255 if (NamedMDNode *AliasesMD = M.getNamedMetadata("aliases")) {
2256 for (auto *AliasMD : AliasesMD->operands()) {
2258 for (Metadata *MD : AliasMD->operands()) {
2259 auto *MDS = dyn_cast<MDString>(MD);
2260 if (!MDS)
2261 continue;
2262 StringRef AliasName = MDS->getString();
2263 if (!ExportedFunctions.count(AliasName))
2264 continue;
2265 auto *AliasF = M.getFunction(AliasName);
2266 if (AliasF)
2267 Aliases.push_back(AliasF);
2268 }
2269
2270 if (Aliases.empty())
2271 continue;
2272
2273 for (unsigned I = 1; I != Aliases.size(); ++I) {
2274 auto *AliasF = Aliases[I];
2275 ExportedFunctions.erase(AliasF->getName());
2276 AliasesToCreate.push_back(
2277 {AliasF, std::string(Aliases[0]->getName())});
2278 }
2279 }
2280 }
2281 }
2282
2283 DenseMap<GlobalObject *, GlobalTypeMember *> GlobalTypeMembers;
2284 for (GlobalObject &GO : M.global_objects()) {
2286 continue;
2287
2288 Types.clear();
2289 GO.getMetadata(LLVMContext::MD_type, Types);
2290
2291 bool IsJumpTableCanonical = false;
2292 bool IsExported = false;
2293 if (Function *F = dyn_cast<Function>(&GO)) {
2294 IsJumpTableCanonical = isJumpTableCanonical(F);
2295 if (auto It = ExportedFunctions.find(F->getName());
2296 It != ExportedFunctions.end()) {
2297 IsJumpTableCanonical |= It->second.Linkage == CFL_Definition;
2298 IsExported = true;
2299 // TODO: The logic here checks only that the function is address taken,
2300 // not that the address takers are live. This can be updated to check
2301 // their liveness and emit fewer jumptable entries once monolithic LTO
2302 // builds also emit summaries.
2303 } else if (!F->hasAddressTaken()) {
2304 if (!CrossDsoCfi || !IsJumpTableCanonical || F->hasLocalLinkage())
2305 continue;
2306 }
2307 }
2308
2309 auto *GTM = GlobalTypeMember::create(Alloc, &GO, IsJumpTableCanonical,
2310 IsExported, Types);
2311 GlobalTypeMembers[&GO] = GTM;
2312 for (MDNode *Type : Types) {
2313 verifyTypeMDNode(&GO, Type);
2314 auto &Info = TypeIdInfo[Type->getOperand(1)];
2315 Info.UniqueId = ++CurUniqueId;
2316 Info.RefGlobals.push_back(GTM);
2317 }
2318 }
2319
2320 auto AddTypeIdUse = [&](Metadata *TypeId) -> TypeIdUserInfo & {
2321 // Add the call site to the list of call sites for this type identifier. We
2322 // also use TypeIdUsers to keep track of whether we have seen this type
2323 // identifier before. If we have, we don't need to re-add the referenced
2324 // globals to the equivalence class.
2325 auto Ins = TypeIdUsers.insert({TypeId, {}});
2326 if (Ins.second) {
2327 // Add the type identifier to the equivalence class.
2328 auto &GCI = GlobalClasses.insert(TypeId);
2329 GlobalClassesTy::member_iterator CurSet = GlobalClasses.findLeader(GCI);
2330
2331 // Add the referenced globals to the type identifier's equivalence class.
2332 for (GlobalTypeMember *GTM : TypeIdInfo[TypeId].RefGlobals)
2333 CurSet = GlobalClasses.unionSets(
2334 CurSet, GlobalClasses.findLeader(GlobalClasses.insert(GTM)));
2335 }
2336
2337 return Ins.first->second;
2338 };
2339
2340 if (TypeTestFunc) {
2341 for (const Use &U : TypeTestFunc->uses()) {
2342 auto CI = cast<CallInst>(U.getUser());
2343 // If this type test is only used by llvm.assume instructions, it
2344 // was used for whole program devirtualization, and is being kept
2345 // for use by other optimization passes. We do not need or want to
2346 // lower it here. We also don't want to rewrite any associated globals
2347 // unnecessarily. These will be removed by a subsequent LTT invocation
2348 // with the DropTypeTests flag set.
2349 bool OnlyAssumeUses = !CI->use_empty();
2350 for (const Use &CIU : CI->uses()) {
2351 if (isa<AssumeInst>(CIU.getUser()))
2352 continue;
2353 OnlyAssumeUses = false;
2354 break;
2355 }
2356 if (OnlyAssumeUses)
2357 continue;
2358
2359 auto TypeIdMDVal = dyn_cast<MetadataAsValue>(CI->getArgOperand(1));
2360 if (!TypeIdMDVal)
2361 report_fatal_error("Second argument of llvm.type.test must be metadata");
2362 auto TypeId = TypeIdMDVal->getMetadata();
2363 AddTypeIdUse(TypeId).CallSites.push_back(CI);
2364 }
2365 }
2366
2367 if (ICallBranchFunnelFunc) {
2368 for (const Use &U : ICallBranchFunnelFunc->uses()) {
2369 if (Arch != Triple::x86_64)
2371 "llvm.icall.branch.funnel not supported on this target");
2372
2373 auto CI = cast<CallInst>(U.getUser());
2374
2375 std::vector<GlobalTypeMember *> Targets;
2376 if (CI->arg_size() % 2 != 1)
2377 report_fatal_error("number of arguments should be odd");
2378
2379 GlobalClassesTy::member_iterator CurSet;
2380 for (unsigned I = 1; I != CI->arg_size(); I += 2) {
2381 int64_t Offset;
2383 CI->getOperand(I), Offset, M.getDataLayout()));
2384 if (!Base)
2386 "Expected branch funnel operand to be global value");
2387
2388 GlobalTypeMember *GTM = GlobalTypeMembers[Base];
2389 Targets.push_back(GTM);
2390 GlobalClassesTy::member_iterator NewSet =
2391 GlobalClasses.findLeader(GlobalClasses.insert(GTM));
2392 if (I == 1)
2393 CurSet = NewSet;
2394 else
2395 CurSet = GlobalClasses.unionSets(CurSet, NewSet);
2396 }
2397
2398 GlobalClasses.unionSets(
2399 CurSet, GlobalClasses.findLeader(
2400 GlobalClasses.insert(ICallBranchFunnel::create(
2401 Alloc, CI, Targets, ++CurUniqueId))));
2402 }
2403 }
2404
2405 if (ExportSummary) {
2406 DenseMap<GlobalValue::GUID, TinyPtrVector<Metadata *>> MetadataByGUID;
2407 for (auto &P : TypeIdInfo) {
2408 if (auto *TypeId = dyn_cast<MDString>(P.first))
2410 TypeId->getString())]
2411 .push_back(TypeId);
2412 }
2413
2414 for (auto &P : *ExportSummary) {
2415 for (auto &S : P.second.getSummaryList()) {
2416 if (!ExportSummary->isGlobalValueLive(S.get()))
2417 continue;
2418 if (auto *FS = dyn_cast<FunctionSummary>(S->getBaseObject()))
2419 for (GlobalValue::GUID G : FS->type_tests())
2420 for (Metadata *MD : MetadataByGUID[G])
2421 AddTypeIdUse(MD).IsExported = true;
2422 }
2423 }
2424 }
2425
2426 if (GlobalClasses.empty())
2427 return false;
2428
2429 {
2430 ScopedSaveAliaseesAndUsed S(M);
2431 // For each disjoint set we found...
2432 for (const auto &C : GlobalClasses) {
2433 if (!C->isLeader())
2434 continue;
2435
2436 ++NumTypeIdDisjointSets;
2437 // Build the list of type identifiers in this disjoint set.
2438 std::vector<Metadata *> TypeIds;
2439 std::vector<GlobalTypeMember *> Globals;
2440 std::vector<ICallBranchFunnel *> ICallBranchFunnels;
2441 for (auto M : GlobalClasses.members(*C)) {
2442 if (isa<Metadata *>(M))
2443 TypeIds.push_back(cast<Metadata *>(M));
2444 else if (isa<GlobalTypeMember *>(M))
2445 Globals.push_back(cast<GlobalTypeMember *>(M));
2446 else
2447 ICallBranchFunnels.push_back(cast<ICallBranchFunnel *>(M));
2448 }
2449
2450 // Order type identifiers by unique ID for determinism. This ordering is
2451 // stable as there is a one-to-one mapping between metadata and unique
2452 // IDs.
2453 llvm::sort(TypeIds, [&](Metadata *M1, Metadata *M2) {
2454 return TypeIdInfo[M1].UniqueId < TypeIdInfo[M2].UniqueId;
2455 });
2456
2457 // Same for the branch funnels.
2458 llvm::sort(ICallBranchFunnels,
2459 [&](ICallBranchFunnel *F1, ICallBranchFunnel *F2) {
2460 return F1->UniqueId < F2->UniqueId;
2461 });
2462
2463 // Build bitsets for this disjoint set.
2464 buildBitSetsFromDisjointSet(TypeIds, Globals, ICallBranchFunnels);
2465 }
2466 }
2467
2468 allocateByteArrays();
2469
2470 for (auto A : AliasesToCreate) {
2471 auto *Target = M.getNamedValue(A.TargetName);
2472 if (!isa<GlobalAlias>(Target))
2473 continue;
2474 auto *AliasGA = GlobalAlias::create("", Target);
2475 AliasGA->setVisibility(A.Alias->getVisibility());
2476 AliasGA->setLinkage(A.Alias->getLinkage());
2477 AliasGA->takeName(A.Alias);
2478 A.Alias->replaceAllUsesWith(AliasGA);
2479 A.Alias->eraseFromParent();
2480 }
2481
2482 // Emit .symver directives for exported functions, if they exist.
2483 if (ExportSummary) {
2484 if (NamedMDNode *SymversMD = M.getNamedMetadata("symvers")) {
2485 for (auto *Symver : SymversMD->operands()) {
2486 assert(Symver->getNumOperands() >= 2);
2487 StringRef SymbolName =
2488 cast<MDString>(Symver->getOperand(0))->getString();
2489 StringRef Alias = cast<MDString>(Symver->getOperand(1))->getString();
2490
2491 if (!ExportedFunctions.count(SymbolName))
2492 continue;
2493
2494 M.appendModuleInlineAsm(
2495 (llvm::Twine(".symver ") + SymbolName + ", " + Alias).str());
2496 }
2497 }
2498 }
2499
2500 return true;
2501}
2502
2505 bool Changed;
2506 if (UseCommandLine)
2507 Changed = LowerTypeTestsModule::runForTesting(M, AM);
2508 else
2509 Changed =
2510 LowerTypeTestsModule(M, AM, ExportSummary, ImportSummary, DropTypeTests)
2511 .lower();
2512 if (!Changed)
2513 return PreservedAnalyses::all();
2514 return PreservedAnalyses::none();
2515}
2516
2519 bool Changed = false;
2520 // Figure out whether inlining has exposed a constant address to a lowered
2521 // type test, and remove the test if so and the address is known to pass the
2522 // test. Unfortunately this pass ends up needing to reverse engineer what
2523 // LowerTypeTests did; this is currently inherent to the design of ThinLTO
2524 // importing where LowerTypeTests needs to run at the start.
2525 //
2526 // We look for things like:
2527 //
2528 // sub (i64 ptrtoint (ptr @_Z2fpv to i64), i64 ptrtoint (ptr
2529 // @__typeid__ZTSFvvE_global_addr to i64))
2530 //
2531 // which gets replaced with 0 if _Z2fpv (more specifically _Z2fpv.cfi, the
2532 // function referred to by the jump table) is a member of the type _ZTSFvv, as
2533 // well as things like
2534 //
2535 // icmp eq ptr @_Z2fpv, @__typeid__ZTSFvvE_global_addr
2536 //
2537 // which gets replaced with true if _Z2fpv is a member.
2538 for (auto &GV : M.globals()) {
2539 if (!GV.getName().starts_with("__typeid_") ||
2540 !GV.getName().ends_with("_global_addr"))
2541 continue;
2542 // __typeid_foo_global_addr -> foo
2543 auto *MD = MDString::get(M.getContext(),
2544 GV.getName().substr(9, GV.getName().size() - 21));
2545 auto MaySimplifyPtr = [&](Value *Ptr) {
2546 if (auto *GV = dyn_cast<GlobalValue>(Ptr))
2547 if (auto *CFIGV = M.getNamedValue((GV->getName() + ".cfi").str()))
2548 Ptr = CFIGV;
2549 return isKnownTypeIdMember(MD, M.getDataLayout(), Ptr, 0);
2550 };
2551 auto MaySimplifyInt = [&](Value *Op) {
2552 auto *PtrAsInt = dyn_cast<ConstantExpr>(Op);
2553 if (!PtrAsInt || PtrAsInt->getOpcode() != Instruction::PtrToInt)
2554 return false;
2555 return MaySimplifyPtr(PtrAsInt->getOperand(0));
2556 };
2557 for (User *U : make_early_inc_range(GV.users())) {
2558 if (auto *CI = dyn_cast<ICmpInst>(U)) {
2559 if (CI->getPredicate() == CmpInst::ICMP_EQ &&
2560 MaySimplifyPtr(CI->getOperand(0))) {
2561 // This is an equality comparison (TypeTestResolution::Single case in
2562 // lowerTypeTestCall). In this case we just replace the comparison
2563 // with true.
2564 CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
2565 CI->eraseFromParent();
2566 Changed = true;
2567 continue;
2568 }
2569 }
2570 auto *CE = dyn_cast<ConstantExpr>(U);
2571 if (!CE || CE->getOpcode() != Instruction::PtrToInt)
2572 continue;
2573 for (Use &U : make_early_inc_range(CE->uses())) {
2574 auto *CE = dyn_cast<ConstantExpr>(U.getUser());
2575 if (U.getOperandNo() == 0 && CE &&
2576 CE->getOpcode() == Instruction::Sub &&
2577 MaySimplifyInt(CE->getOperand(1))) {
2578 // This is a computation of PtrOffset as generated by
2579 // LowerTypeTestsModule::lowerTypeTestCall above. If
2580 // isKnownTypeIdMember passes we just pretend it evaluated to 0. This
2581 // should cause later passes to remove the range and alignment checks.
2582 // The bitset checks won't be removed but those are uncommon.
2583 CE->replaceAllUsesWith(ConstantInt::get(CE->getType(), 0));
2584 Changed = true;
2585 }
2586 auto *CI = dyn_cast<ICmpInst>(U.getUser());
2587 if (U.getOperandNo() == 1 && CI &&
2588 CI->getPredicate() == CmpInst::ICMP_EQ &&
2589 MaySimplifyInt(CI->getOperand(0))) {
2590 // This is an equality comparison. Unlike in the case above it
2591 // remained as an integer compare.
2592 CI->replaceAllUsesWith(ConstantInt::getTrue(M.getContext()));
2593 CI->eraseFromParent();
2594 Changed = true;
2595 }
2596 }
2597 }
2598 }
2599
2600 if (!Changed)
2601 return PreservedAnalyses::all();
2605 PA.preserve<LoopAnalysis>();
2606 return PA;
2607}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Prepare AGPR Alloc
AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file defines the BumpPtrAllocator interface.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
This file contains the declarations for the subclasses of Constant, which represent the different fla...
DXIL Finalize Linkage
dxil translate DXIL Translate Metadata
This file defines the DenseMap class.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define DEBUG_TYPE
Hexagon Common GEP
Module.h This file contains the declarations for the Module class.
This header defines various interfaces for pass management in LLVM.
This defines the Use class.
static const unsigned kARMJumpTableEntrySize
static const unsigned kLOONGARCH64JumpTableEntrySize
static bool isKnownTypeIdMember(Metadata *TypeId, const DataLayout &DL, Value *V, uint64_t COffset)
static const unsigned kX86IBTJumpTableEntrySize
static cl::opt< std::string > ClReadSummary("lowertypetests-read-summary", cl::desc("Read summary from given YAML file before running pass"), cl::Hidden)
static const unsigned kRISCVJumpTableEntrySize
static auto buildBitSets(ArrayRef< Metadata * > TypeIds, const DenseMap< GlobalTypeMember *, uint64_t > &GlobalLayout)
static void dropTypeTests(Module &M, Function &TypeTestFunc, bool ShouldDropAll)
static Value * createMaskedBitTest(IRBuilder<> &B, Value *Bits, Value *BitOffset)
Build a test that bit BitOffset mod sizeof(Bits)*8 is set in Bits.
static bool isThumbFunction(Function *F, Triple::ArchType ModuleArch)
static const unsigned kX86JumpTableEntrySize
static cl::opt< bool > AvoidReuse("lowertypetests-avoid-reuse", cl::desc("Try to avoid reuse of byte array addresses using aliases"), cl::Hidden, cl::init(true))
static cl::opt< PassSummaryAction > ClSummaryAction("lowertypetests-summary-action", cl::desc("What to do with the summary when running this pass"), cl::values(clEnumValN(PassSummaryAction::None, "none", "Do nothing"), clEnumValN(PassSummaryAction::Import, "import", "Import typeid resolutions from summary and globals"), clEnumValN(PassSummaryAction::Export, "export", "Export typeid resolutions to summary and globals")), cl::Hidden)
static const unsigned kARMBTIJumpTableEntrySize
static cl::opt< std::string > ClWriteSummary("lowertypetests-write-summary", cl::desc("Write summary to given YAML file after running pass"), cl::Hidden)
static BitSetInfo buildBitSet(ArrayRef< uint64_t > Offsets)
Build a bit set for list of offsets.
static bool isDirectCall(Use &U)
static const unsigned kARMv6MJumpTableEntrySize
static cl::opt< DropTestKind > ClDropTypeTests("lowertypetests-drop-type-tests", cl::desc("Simply drop type test sequences"), cl::values(clEnumValN(DropTestKind::None, "none", "Do not drop any type tests"), clEnumValN(DropTestKind::Assume, "assume", "Drop type test assume sequences"), clEnumValN(DropTestKind::All, "all", "Drop all type test sequences")), cl::Hidden, cl::init(DropTestKind::None))
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
#define G(x, y, z)
Definition MD5.cpp:55
Machine Check Debug Module
This file contains the declarations for metadata subclasses.
#define T
ModuleSummaryIndex.h This file contains the declarations the classes that hold the module index and s...
#define P(N)
FunctionAnalysisManager FAM
This file defines the PointerUnion class, which is a discriminated union of pointer types.
This file contains the declarations for profiling metadata utility functions.
static StringRef getName(Value *V)
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
#define LLVM_DEBUG(...)
Definition Debug.h:114
This pass exposes codegen information to IR-level passes.
This header defines support for implementing classes that have some trailing object (or arrays of obj...
Class for arbitrary precision integers.
Definition APInt.h:78
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:137
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
Definition BasicBlock.h:206
LLVM_ABI BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
static BranchInst * Create(BasicBlock *IfTrue, InsertPosition InsertBefore=nullptr)
Value * getArgOperand(unsigned i) const
unsigned arg_size() const
size_t count(StringRef S) const
@ ICMP_NE
not equal
Definition InstrTypes.h:698
static LLVM_ABI ConstantAggregateZero * get(Type *Ty)
ConstantArray - Constant Array Declarations.
Definition Constants.h:438
static ConstantAsMetadata * get(Constant *C)
Definition Metadata.h:536
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
Definition Constants.h:720
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getInBoundsGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList)
Create an "inbounds" getelementptr.
Definition Constants.h:1311
static LLVM_ABI Constant * getPointerCast(Constant *C, Type *Ty)
Create a BitCast, AddrSpaceCast, or a PtrToInt cast constant expression.
static LLVM_ABI Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1284
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static Constant * getAnon(ArrayRef< Constant * > V, bool Packed=false)
Return an anonymous struct that has the specified elements.
Definition Constants.h:491
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
iterator find(const_arg_type_t< KeyT > Val)
Definition DenseMap.h:178
iterator end()
Definition DenseMap.h:81
Analysis pass which computes a DominatorTree.
Definition Dominators.h:283
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
Definition Function.h:166
void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Function.cpp:451
static LLVM_ABI GlobalAlias * create(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage, const Twine &Name, Constant *Aliasee, Module *Parent)
If a parent module is specified, the alias is automatically inserted into the end of the specified mo...
Definition Globals.cpp:598
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set a particular kind of metadata attachment.
LLVM_ABI void setComdat(Comdat *C)
Definition Globals.cpp:214
const Comdat * getComdat() const
LLVM_ABI bool eraseMetadata(unsigned KindID)
Erase all metadata attachments with the given kind.
bool hasSection() const
Check if this global has a custom object file section.
MDNode * getMetadata(unsigned KindID) const
Get the current metadata attachments for the given kind, if any.
Definition Value.h:576
static LLVM_ABI GUID getGUIDAssumingExternalLinkage(StringRef GlobalName)
Return a 64-bit global unique ID constructed from the name of a global symbol.
Definition Globals.cpp:77
bool isDSOLocal() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
VisibilityTypes getVisibility() const
static bool isLocalLinkage(LinkageTypes Linkage)
LinkageTypes getLinkage() const
uint64_t GUID
Declare a type to represent a global unique identifier for a global value.
static StringRef dropLLVMManglingEscape(StringRef Name)
If the given string begins with the GlobalValue name mangling escape character '\1',...
bool isDeclarationForLinker() const
PointerType * getType() const
Global values are always pointers.
VisibilityTypes
An enumeration for the kinds of visibility of global values.
Definition GlobalValue.h:67
@ HiddenVisibility
The GV is hidden.
Definition GlobalValue.h:69
void setVisibility(VisibilityTypes V)
LinkageTypes
An enumeration for the kinds of linkage for global values.
Definition GlobalValue.h:52
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
@ InternalLinkage
Rename collisions when linking (static functions).
Definition GlobalValue.h:60
@ ExternalLinkage
Externally visible function.
Definition GlobalValue.h:53
@ ExternalWeakLinkage
ExternalWeak linkage description.
Definition GlobalValue.h:62
Type * getValueType() const
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
LLVM_ABI void setInitializer(Constant *InitVal)
setInitializer - Sets the initializer for this global variable, removing any existing initializer if ...
Definition Globals.cpp:524
MaybeAlign getAlign() const
Returns the alignment of the given variable.
void setConstant(bool Val)
LLVM_ABI void setCodeModel(CodeModel::Model CM)
Change the code model for this global.
Definition Globals.cpp:566
LLVM_ABI void eraseFromParent()
eraseFromParent - This method unlinks 'this' from the containing module and deletes it.
Definition Globals.cpp:520
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition IRBuilder.h:2794
static LLVM_ABI InlineAsm * get(FunctionType *Ty, StringRef AsmString, StringRef Constraints, bool hasSideEffects, bool isAlignStack=false, AsmDialect asmDialect=AD_ATT, bool canThrow=false)
InlineAsm::get - Return the specified uniqued inline asm string.
Definition InlineAsm.cpp:43
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
uint64_t getBitMask() const
Return a bitmask with ones set for all of the bits that can be set by an unsigned version of this typ...
Analysis pass that exposes the LoopInfo for a function.
Definition LoopInfo.h:569
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
Metadata node.
Definition Metadata.h:1078
const MDOperand & getOperand(unsigned I) const
Definition Metadata.h:1442
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition Metadata.h:1569
unsigned getNumOperands() const
Return number of MDNode operands.
Definition Metadata.h:1448
Metadata * get() const
Definition Metadata.h:929
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Definition Metadata.cpp:608
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition MapVector.h:124
static ErrorOr< std::unique_ptr< MemoryBuffer > > getFile(const Twine &Filename, bool IsText=false, bool RequiresNullTerminator=true, bool IsVolatile=false, std::optional< Align > Alignment=std::nullopt)
Open the specified file as a MemoryBuffer, returning a new MemoryBuffer if successful,...
Root of the metadata hierarchy.
Definition Metadata.h:64
TypeIdSummary & getOrInsertTypeIdSummary(StringRef TypeId)
Return an existing or new TypeIdSummary entry for TypeId.
const TypeIdSummary * getTypeIdSummary(StringRef TypeId) const
This returns either a pointer to the type id summary (if present in the summary map) or null (if not ...
CfiFunctionIndex & cfiFunctionDecls()
CfiFunctionIndex & cfiFunctionDefs()
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
iterator_range< op_iterator > operands()
Definition Metadata.h:1853
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Analysis pass which computes a PostDominatorTree.
A set of analyses that are preserved following a run of a transformation pass.
Definition Analysis.h:112
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition Analysis.h:115
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
Definition Analysis.h:118
PreservedAnalyses & preserve()
Mark an analysis as preserved.
Definition Analysis.h:132
static ReturnInst * Create(LLVMContext &C, Value *retVal=nullptr, InsertPosition InsertBefore=nullptr)
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:151
LLVM_ABI PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
Type * getElementType(unsigned N) const
Analysis pass providing the TargetTransformInfo.
See the file comment for details on the usage of the TrailingObjects type.
Triple - Helper class for working with autoconf configuration names.
Definition Triple.h:47
@ loongarch64
Definition Triple.h:65
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
op_range operands()
Definition User.h:293
Value * getOperand(unsigned i) const
Definition User.h:233
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
user_iterator user_begin()
Definition Value.h:402
bool hasOneUse() const
Return true if there is exactly one use of this value.
Definition Value.h:439
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition Value.cpp:553
iterator_range< user_iterator > users()
Definition Value.h:426
use_iterator use_begin()
Definition Value.h:364
LLVM_ABI void replaceUsesWithIf(Value *New, llvm::function_ref< bool(Use &U)> ShouldReplace)
Go through the uses list for this definition and make each use point to "V" if the callback ShouldRep...
Definition Value.cpp:561
bool use_empty() const
Definition Value.h:346
iterator_range< use_iterator > uses()
Definition Value.h:380
bool hasName() const
Definition Value.h:262
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition Value.cpp:322
LLVM_ABI void takeName(Value *V)
Transfer the name from V to this value.
Definition Value.cpp:403
std::pair< iterator, bool > insert(const ValueT &V)
Definition DenseSet.h:202
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition DenseSet.h:175
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition DenseSet.h:180
const ParentTy * getParent() const
Definition ilist_node.h:34
self_iterator getIterator()
Definition ilist_node.h:123
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
CallInst * Call
Changed
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr char SymbolName[]
Key for Kernel::Metadata::mSymbolName.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
@ BasicBlock
Various leaf nodes.
Definition ISDOpcodes.h:81
LLVM_ABI Function * getDeclarationIfExists(const Module *M, ID id)
Look up the Function declaration of the intrinsic id in the Module M and return it if it exists.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
DropTestKind
Specifies how to drop type tests.
@ Assume
Do not drop type tests (default).
LLVM_ABI bool isJumpTableCanonical(Function *F)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract_or_null(Y &&MD)
Extract a Value from Metadata, allowing null.
Definition Metadata.h:682
SmallVector< unsigned char, 0 > ByteArray
Definition PropertySet.h:25
NodeAddr< PhiNode * > Phi
Definition RDFGraph.h:390
NodeAddr< UseNode * > Use
Definition RDFGraph.h:385
@ OF_TextWithCRLF
The file should be opened in text mode and use a carriage linefeed '\r '.
Definition FileSystem.h:764
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
LLVM_ABI void ReplaceInstWithInst(BasicBlock *BB, BasicBlock::iterator &BI, Instruction *I)
Replace the instruction specified by BI with the instruction specified by I.
@ Offset
Definition DWP.cpp:532
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
void stable_sort(R &&Range)
Definition STLExtras.h:2106
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1737
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
@ Export
Export information to summary.
Definition IPO.h:57
@ None
Do nothing.
Definition IPO.h:55
@ Import
Import information from summary.
Definition IPO.h:56
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2184
Value * GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, const DataLayout &DL, bool AllowNonInbounds=true)
Analyze the specified pointer to see if it can be expressed as a base pointer plus a constant offset.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition STLExtras.h:632
InnerAnalysisManagerProxy< FunctionAnalysisManager, Module > FunctionAnalysisManagerModuleProxy
Provide the FunctionAnalysisManager to Module proxy.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
unsigned M1(unsigned Val)
Definition VE.h:377
LLVM_ABI bool convertUsersOfConstantsToInstructions(ArrayRef< Constant * > Consts, Function *RestrictToFunc=nullptr, bool RemoveDeadConstants=true, bool IncludeSelf=false)
Replace constant expressions users of the given constants with instructions.
void sort(IteratorTy Start, IteratorTy End)
Definition STLExtras.h:1634
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
@ Ref
The access may reference the value stored in memory.
Definition ModRef.h:32
TargetTransformInfo TTI
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
LLVM_ABI void appendToCompilerUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.compiler.used list.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition Alignment.h:144
DWARFExpression::Operation Op
Expected< T > errorOrToExpected(ErrorOr< T > &&EO)
Convert an ErrorOr<T> to an Expected<T>.
Definition Error.h:1245
ArrayRef(const T &OneElt) -> ArrayRef< T >
OutputIt copy(R &&Range, OutputIt Out)
Definition STLExtras.h:1883
constexpr unsigned BitWidth
LLVM_ABI void appendToGlobalCtors(Module &M, Function *F, int Priority, Constant *Data=nullptr)
Append F to the list of global ctors of module M with the given Priority.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI Error errorCodeToError(std::error_code EC)
Helper for converting an std::error_code to a Error.
Definition Error.cpp:111
LLVM_ABI Instruction * SplitBlockAndInsertIfThen(Value *Cond, BasicBlock::iterator SplitBefore, bool Unreachable, MDNode *BranchWeights=nullptr, DomTreeUpdater *DTU=nullptr, LoopInfo *LI=nullptr, BasicBlock *ThenBlock=nullptr)
Split the containing block at the specified instruction - everything before SplitBefore stays in the ...
BumpPtrAllocatorImpl<> BumpPtrAllocator
The standard BumpPtrAllocator which just uses the default template parameters.
Definition Allocator.h:383
LLVM_ABI void appendToUsed(Module &M, ArrayRef< GlobalValue * > Values)
Adds global values to the llvm.used list.
CfiFunctionLinkage
The type of CFI jumptable needed for a function.
@ CFL_WeakDeclaration
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
Definition MIRParser.h:39
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Definition MathExtras.h:373
LLVM_ABI GlobalVariable * collectUsedGlobalVariables(const Module &M, SmallVectorImpl< GlobalValue * > &Vec, bool CompilerUsed)
Given "llvm.used" or "llvm.compiler.used" as a global name, collect the initializer elements of that ...
Definition Module.cpp:870
TypeTestResolution TTRes
Kind
Specifies which kind of type check we should emit for this byte array.
@ Unknown
Unknown (analysis not performed, don't lower)
@ Single
Single element (last example in "Short Inline Bit Vectors")
@ Inline
Inlined bit vector ("Short Inline Bit Vectors")
@ Unsat
Unsatisfiable type (i.e. no global has this type metadata)
@ AllOnes
All-ones bit vector ("Eliminating Bit Vector Checks for All-Ones Bit Vectors")
@ ByteArray
Test a byte array (first example)
unsigned SizeM1BitWidth
Range of size-1 expressed as a bit width.
enum llvm::TypeTestResolution::Kind TheKind
SmallVector< uint64_t, 16 > Offsets
LLVM_ABI bool containsGlobalOffset(uint64_t Offset) const
LLVM_ABI void print(raw_ostream &OS) const
This class is used to build a byte array containing overlapping bit sets.
uint64_t BitAllocs[BitsPerByte]
The number of bytes allocated so far for each of the bits.
std::vector< uint8_t > Bytes
The byte array built so far.
LLVM_ABI void allocate(const std::set< uint64_t > &Bits, uint64_t BitSize, uint64_t &AllocByteOffset, uint8_t &AllocMask)
Allocate BitSize bits in the byte array where Bits contains the bits to set.
This class implements a layout algorithm for globals referenced by bit sets that tries to keep member...
std::vector< std::vector< uint64_t > > Fragments
The computed layout.
LLVM_ABI void addFragment(const std::set< uint64_t > &F)
Add F to the layout while trying to keep its indices contiguous.
std::vector< uint64_t > FragmentMap
Mapping from object index to fragment index.