Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/clang/lib/CodeGen/CodeGenModule.cpp
Warning:line 3767, column 27
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name CodeGenModule.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -relaxed-aliasing -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I tools/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/clang/lib/CodeGen -I /build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/clang/include -I tools/clang/include -I include -I /build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-03-20-232535-108605-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/clang/lib/CodeGen/CodeGenModule.cpp

/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/clang/lib/CodeGen/CodeGenModule.cpp

1//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This coordinates the per-module state used while generating code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenModule.h"
14#include "CGBlocks.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGCall.h"
18#include "CGDebugInfo.h"
19#include "CGObjCRuntime.h"
20#include "CGOpenCLRuntime.h"
21#include "CGOpenMPRuntime.h"
22#include "CGOpenMPRuntimeGPU.h"
23#include "CodeGenFunction.h"
24#include "CodeGenPGO.h"
25#include "ConstantEmitter.h"
26#include "CoverageMappingGen.h"
27#include "TargetInfo.h"
28#include "clang/AST/ASTContext.h"
29#include "clang/AST/CharUnits.h"
30#include "clang/AST/DeclCXX.h"
31#include "clang/AST/DeclObjC.h"
32#include "clang/AST/DeclTemplate.h"
33#include "clang/AST/Mangle.h"
34#include "clang/AST/RecordLayout.h"
35#include "clang/AST/RecursiveASTVisitor.h"
36#include "clang/AST/StmtVisitor.h"
37#include "clang/Basic/Builtins.h"
38#include "clang/Basic/CharInfo.h"
39#include "clang/Basic/CodeGenOptions.h"
40#include "clang/Basic/Diagnostic.h"
41#include "clang/Basic/FileManager.h"
42#include "clang/Basic/Module.h"
43#include "clang/Basic/SourceManager.h"
44#include "clang/Basic/TargetInfo.h"
45#include "clang/Basic/Version.h"
46#include "clang/CodeGen/BackendUtil.h"
47#include "clang/CodeGen/ConstantInitBuilder.h"
48#include "clang/Frontend/FrontendDiagnostic.h"
49#include "llvm/ADT/StringSwitch.h"
50#include "llvm/ADT/Triple.h"
51#include "llvm/Analysis/TargetLibraryInfo.h"
52#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
53#include "llvm/IR/CallingConv.h"
54#include "llvm/IR/DataLayout.h"
55#include "llvm/IR/Intrinsics.h"
56#include "llvm/IR/LLVMContext.h"
57#include "llvm/IR/Module.h"
58#include "llvm/IR/ProfileSummary.h"
59#include "llvm/ProfileData/InstrProfReader.h"
60#include "llvm/Support/CodeGen.h"
61#include "llvm/Support/CommandLine.h"
62#include "llvm/Support/ConvertUTF.h"
63#include "llvm/Support/ErrorHandling.h"
64#include "llvm/Support/MD5.h"
65#include "llvm/Support/TimeProfiler.h"
66#include "llvm/Support/X86TargetParser.h"
67
68using namespace clang;
69using namespace CodeGen;
70
71static llvm::cl::opt<bool> LimitedCoverage(
72 "limited-coverage-experimental", llvm::cl::ZeroOrMore, llvm::cl::Hidden,
73 llvm::cl::desc("Emit limited coverage mapping information (experimental)"),
74 llvm::cl::init(false));
75
76static const char AnnotationSection[] = "llvm.metadata";
77
78static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
79 switch (CGM.getContext().getCXXABIKind()) {
80 case TargetCXXABI::AppleARM64:
81 case TargetCXXABI::Fuchsia:
82 case TargetCXXABI::GenericAArch64:
83 case TargetCXXABI::GenericARM:
84 case TargetCXXABI::iOS:
85 case TargetCXXABI::WatchOS:
86 case TargetCXXABI::GenericMIPS:
87 case TargetCXXABI::GenericItanium:
88 case TargetCXXABI::WebAssembly:
89 case TargetCXXABI::XL:
90 return CreateItaniumCXXABI(CGM);
91 case TargetCXXABI::Microsoft:
92 return CreateMicrosoftCXXABI(CGM);
93 }
94
95 llvm_unreachable("invalid C++ ABI kind")::llvm::llvm_unreachable_internal("invalid C++ ABI kind", "clang/lib/CodeGen/CodeGenModule.cpp"
, 95)
;
96}
97
98CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
99 const PreprocessorOptions &PPO,
100 const CodeGenOptions &CGO, llvm::Module &M,
101 DiagnosticsEngine &diags,
102 CoverageSourceInfo *CoverageInfo)
103 : Context(C), LangOpts(C.getLangOpts()), HeaderSearchOpts(HSO),
104 PreprocessorOpts(PPO), CodeGenOpts(CGO), TheModule(M), Diags(diags),
105 Target(C.getTargetInfo()), ABI(createCXXABI(*this)),
106 VMContext(M.getContext()), Types(*this), VTables(*this),
107 SanitizerMD(new SanitizerMetadata(*this)) {
108
109 // Initialize the type cache.
110 llvm::LLVMContext &LLVMContext = M.getContext();
111 VoidTy = llvm::Type::getVoidTy(LLVMContext);
112 Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
113 Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
114 Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
115 Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
116 HalfTy = llvm::Type::getHalfTy(LLVMContext);
117 BFloatTy = llvm::Type::getBFloatTy(LLVMContext);
118 FloatTy = llvm::Type::getFloatTy(LLVMContext);
119 DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
120 PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
121 PointerAlignInBytes =
122 C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
123 SizeSizeInBytes =
124 C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
125 IntAlignInBytes =
126 C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
127 CharTy =
128 llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getCharWidth());
129 IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
130 IntPtrTy = llvm::IntegerType::get(LLVMContext,
131 C.getTargetInfo().getMaxPointerWidth());
132 Int8PtrTy = Int8Ty->getPointerTo(0);
133 Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
134 const llvm::DataLayout &DL = M.getDataLayout();
135 AllocaInt8PtrTy = Int8Ty->getPointerTo(DL.getAllocaAddrSpace());
136 GlobalsInt8PtrTy = Int8Ty->getPointerTo(DL.getDefaultGlobalsAddressSpace());
137 ASTAllocaAddressSpace = getTargetCodeGenInfo().getASTAllocaAddressSpace();
138
139 RuntimeCC = getTargetCodeGenInfo().getABIInfo().getRuntimeCC();
140
141 if (LangOpts.ObjC)
142 createObjCRuntime();
143 if (LangOpts.OpenCL)
144 createOpenCLRuntime();
145 if (LangOpts.OpenMP)
146 createOpenMPRuntime();
147 if (LangOpts.CUDA)
148 createCUDARuntime();
149
150 // Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
151 if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
152 (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0))
153 TBAA.reset(new CodeGenTBAA(Context, TheModule, CodeGenOpts, getLangOpts(),
154 getCXXABI().getMangleContext()));
155
156 // If debug info or coverage generation is enabled, create the CGDebugInfo
157 // object.
158 if (CodeGenOpts.getDebugInfo() != codegenoptions::NoDebugInfo ||
159 CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)
160 DebugInfo.reset(new CGDebugInfo(*this));
161
162 Block.GlobalUniqueCount = 0;
163
164 if (C.getLangOpts().ObjC)
165 ObjCData.reset(new ObjCEntrypoints());
166
167 if (CodeGenOpts.hasProfileClangUse()) {
168 auto ReaderOrErr = llvm::IndexedInstrProfReader::create(
169 CodeGenOpts.ProfileInstrumentUsePath, CodeGenOpts.ProfileRemappingFile);
170 if (auto E = ReaderOrErr.takeError()) {
171 unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
172 "Could not read profile %0: %1");
173 llvm::handleAllErrors(std::move(E), [&](const llvm::ErrorInfoBase &EI) {
174 getDiags().Report(DiagID) << CodeGenOpts.ProfileInstrumentUsePath
175 << EI.message();
176 });
177 } else
178 PGOReader = std::move(ReaderOrErr.get());
179 }
180
181 // If coverage mapping generation is enabled, create the
182 // CoverageMappingModuleGen object.
183 if (CodeGenOpts.CoverageMapping)
184 CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
185
186 // Generate the module name hash here if needed.
187 if (CodeGenOpts.UniqueInternalLinkageNames &&
188 !getModule().getSourceFileName().empty()) {
189 std::string Path = getModule().getSourceFileName();
190 // Check if a path substitution is needed from the MacroPrefixMap.
191 for (const auto &Entry : LangOpts.MacroPrefixMap)
192 if (Path.rfind(Entry.first, 0) != std::string::npos) {
193 Path = Entry.second + Path.substr(Entry.first.size());
194 break;
195 }
196 llvm::MD5 Md5;
197 Md5.update(Path);
198 llvm::MD5::MD5Result R;
199 Md5.final(R);
200 SmallString<32> Str;
201 llvm::MD5::stringifyResult(R, Str);
202 // Convert MD5hash to Decimal. Demangler suffixes can either contain
203 // numbers or characters but not both.
204 llvm::APInt IntHash(128, Str.str(), 16);
205 // Prepend "__uniq" before the hash for tools like profilers to understand
206 // that this symbol is of internal linkage type. The "__uniq" is the
207 // pre-determined prefix that is used to tell tools that this symbol was
208 // created with -funique-internal-linakge-symbols and the tools can strip or
209 // keep the prefix as needed.
210 ModuleNameHash = (Twine(".__uniq.") +
211 Twine(toString(IntHash, /* Radix = */ 10, /* Signed = */false))).str();
212 }
213}
214
215CodeGenModule::~CodeGenModule() {}
216
217void CodeGenModule::createObjCRuntime() {
218 // This is just isGNUFamily(), but we want to force implementors of
219 // new ABIs to decide how best to do this.
220 switch (LangOpts.ObjCRuntime.getKind()) {
221 case ObjCRuntime::GNUstep:
222 case ObjCRuntime::GCC:
223 case ObjCRuntime::ObjFW:
224 ObjCRuntime.reset(CreateGNUObjCRuntime(*this));
225 return;
226
227 case ObjCRuntime::FragileMacOSX:
228 case ObjCRuntime::MacOSX:
229 case ObjCRuntime::iOS:
230 case ObjCRuntime::WatchOS:
231 ObjCRuntime.reset(CreateMacObjCRuntime(*this));
232 return;
233 }
234 llvm_unreachable("bad runtime kind")::llvm::llvm_unreachable_internal("bad runtime kind", "clang/lib/CodeGen/CodeGenModule.cpp"
, 234)
;
235}
236
237void CodeGenModule::createOpenCLRuntime() {
238 OpenCLRuntime.reset(new CGOpenCLRuntime(*this));
239}
240
241void CodeGenModule::createOpenMPRuntime() {
242 // Select a specialized code generation class based on the target, if any.
243 // If it does not exist use the default implementation.
244 switch (getTriple().getArch()) {
245 case llvm::Triple::nvptx:
246 case llvm::Triple::nvptx64:
247 case llvm::Triple::amdgcn:
248 assert(getLangOpts().OpenMPIsDevice &&(static_cast <bool> (getLangOpts().OpenMPIsDevice &&
"OpenMP AMDGPU/NVPTX is only prepared to deal with device code."
) ? void (0) : __assert_fail ("getLangOpts().OpenMPIsDevice && \"OpenMP AMDGPU/NVPTX is only prepared to deal with device code.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 249, __extension__ __PRETTY_FUNCTION__
))
249 "OpenMP AMDGPU/NVPTX is only prepared to deal with device code.")(static_cast <bool> (getLangOpts().OpenMPIsDevice &&
"OpenMP AMDGPU/NVPTX is only prepared to deal with device code."
) ? void (0) : __assert_fail ("getLangOpts().OpenMPIsDevice && \"OpenMP AMDGPU/NVPTX is only prepared to deal with device code.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 249, __extension__ __PRETTY_FUNCTION__
))
;
250 OpenMPRuntime.reset(new CGOpenMPRuntimeGPU(*this));
251 break;
252 default:
253 if (LangOpts.OpenMPSimd)
254 OpenMPRuntime.reset(new CGOpenMPSIMDRuntime(*this));
255 else
256 OpenMPRuntime.reset(new CGOpenMPRuntime(*this));
257 break;
258 }
259}
260
261void CodeGenModule::createCUDARuntime() {
262 CUDARuntime.reset(CreateNVCUDARuntime(*this));
263}
264
265void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
266 Replacements[Name] = C;
267}
268
269void CodeGenModule::applyReplacements() {
270 for (auto &I : Replacements) {
271 StringRef MangledName = I.first();
272 llvm::Constant *Replacement = I.second;
273 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
274 if (!Entry)
275 continue;
276 auto *OldF = cast<llvm::Function>(Entry);
277 auto *NewF = dyn_cast<llvm::Function>(Replacement);
278 if (!NewF) {
279 if (auto *Alias = dyn_cast<llvm::GlobalAlias>(Replacement)) {
280 NewF = dyn_cast<llvm::Function>(Alias->getAliasee());
281 } else {
282 auto *CE = cast<llvm::ConstantExpr>(Replacement);
283 assert(CE->getOpcode() == llvm::Instruction::BitCast ||(static_cast <bool> (CE->getOpcode() == llvm::Instruction
::BitCast || CE->getOpcode() == llvm::Instruction::GetElementPtr
) ? void (0) : __assert_fail ("CE->getOpcode() == llvm::Instruction::BitCast || CE->getOpcode() == llvm::Instruction::GetElementPtr"
, "clang/lib/CodeGen/CodeGenModule.cpp", 284, __extension__ __PRETTY_FUNCTION__
))
284 CE->getOpcode() == llvm::Instruction::GetElementPtr)(static_cast <bool> (CE->getOpcode() == llvm::Instruction
::BitCast || CE->getOpcode() == llvm::Instruction::GetElementPtr
) ? void (0) : __assert_fail ("CE->getOpcode() == llvm::Instruction::BitCast || CE->getOpcode() == llvm::Instruction::GetElementPtr"
, "clang/lib/CodeGen/CodeGenModule.cpp", 284, __extension__ __PRETTY_FUNCTION__
))
;
285 NewF = dyn_cast<llvm::Function>(CE->getOperand(0));
286 }
287 }
288
289 // Replace old with new, but keep the old order.
290 OldF->replaceAllUsesWith(Replacement);
291 if (NewF) {
292 NewF->removeFromParent();
293 OldF->getParent()->getFunctionList().insertAfter(OldF->getIterator(),
294 NewF);
295 }
296 OldF->eraseFromParent();
297 }
298}
299
300void CodeGenModule::addGlobalValReplacement(llvm::GlobalValue *GV, llvm::Constant *C) {
301 GlobalValReplacements.push_back(std::make_pair(GV, C));
302}
303
304void CodeGenModule::applyGlobalValReplacements() {
305 for (auto &I : GlobalValReplacements) {
306 llvm::GlobalValue *GV = I.first;
307 llvm::Constant *C = I.second;
308
309 GV->replaceAllUsesWith(C);
310 GV->eraseFromParent();
311 }
312}
313
314// This is only used in aliases that we created and we know they have a
315// linear structure.
316static const llvm::GlobalValue *getAliasedGlobal(const llvm::GlobalValue *GV) {
317 const llvm::Constant *C;
318 if (auto *GA = dyn_cast<llvm::GlobalAlias>(GV))
319 C = GA->getAliasee();
320 else if (auto *GI = dyn_cast<llvm::GlobalIFunc>(GV))
321 C = GI->getResolver();
322 else
323 return GV;
324
325 const auto *AliaseeGV = dyn_cast<llvm::GlobalValue>(C->stripPointerCasts());
326 if (!AliaseeGV)
327 return nullptr;
328
329 const llvm::GlobalValue *FinalGV = AliaseeGV->getAliaseeObject();
330 if (FinalGV == GV)
331 return nullptr;
332
333 return FinalGV;
334}
335
336static bool checkAliasedGlobal(DiagnosticsEngine &Diags,
337 SourceLocation Location, bool IsIFunc,
338 const llvm::GlobalValue *Alias,
339 const llvm::GlobalValue *&GV) {
340 GV = getAliasedGlobal(Alias);
341 if (!GV) {
342 Diags.Report(Location, diag::err_cyclic_alias) << IsIFunc;
343 return false;
344 }
345
346 if (GV->isDeclaration()) {
347 Diags.Report(Location, diag::err_alias_to_undefined) << IsIFunc << IsIFunc;
348 return false;
349 }
350
351 if (IsIFunc) {
352 // Check resolver function type.
353 const auto *F = dyn_cast<llvm::Function>(GV);
354 if (!F) {
355 Diags.Report(Location, diag::err_alias_to_undefined)
356 << IsIFunc << IsIFunc;
357 return false;
358 }
359
360 llvm::FunctionType *FTy = F->getFunctionType();
361 if (!FTy->getReturnType()->isPointerTy()) {
362 Diags.Report(Location, diag::err_ifunc_resolver_return);
363 return false;
364 }
365 }
366
367 return true;
368}
369
370void CodeGenModule::checkAliases() {
371 // Check if the constructed aliases are well formed. It is really unfortunate
372 // that we have to do this in CodeGen, but we only construct mangled names
373 // and aliases during codegen.
374 bool Error = false;
375 DiagnosticsEngine &Diags = getDiags();
376 for (const GlobalDecl &GD : Aliases) {
377 const auto *D = cast<ValueDecl>(GD.getDecl());
378 SourceLocation Location;
379 bool IsIFunc = D->hasAttr<IFuncAttr>();
380 if (const Attr *A = D->getDefiningAttr())
381 Location = A->getLocation();
382 else
383 llvm_unreachable("Not an alias or ifunc?")::llvm::llvm_unreachable_internal("Not an alias or ifunc?", "clang/lib/CodeGen/CodeGenModule.cpp"
, 383)
;
384
385 StringRef MangledName = getMangledName(GD);
386 llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
387 const llvm::GlobalValue *GV = nullptr;
388 if (!checkAliasedGlobal(Diags, Location, IsIFunc, Alias, GV)) {
389 Error = true;
390 continue;
391 }
392
393 llvm::Constant *Aliasee =
394 IsIFunc ? cast<llvm::GlobalIFunc>(Alias)->getResolver()
395 : cast<llvm::GlobalAlias>(Alias)->getAliasee();
396
397 llvm::GlobalValue *AliaseeGV;
398 if (auto CE = dyn_cast<llvm::ConstantExpr>(Aliasee))
399 AliaseeGV = cast<llvm::GlobalValue>(CE->getOperand(0));
400 else
401 AliaseeGV = cast<llvm::GlobalValue>(Aliasee);
402
403 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
404 StringRef AliasSection = SA->getName();
405 if (AliasSection != AliaseeGV->getSection())
406 Diags.Report(SA->getLocation(), diag::warn_alias_with_section)
407 << AliasSection << IsIFunc << IsIFunc;
408 }
409
410 // We have to handle alias to weak aliases in here. LLVM itself disallows
411 // this since the object semantics would not match the IL one. For
412 // compatibility with gcc we implement it by just pointing the alias
413 // to its aliasee's aliasee. We also warn, since the user is probably
414 // expecting the link to be weak.
415 if (auto *GA = dyn_cast<llvm::GlobalAlias>(AliaseeGV)) {
416 if (GA->isInterposable()) {
417 Diags.Report(Location, diag::warn_alias_to_weak_alias)
418 << GV->getName() << GA->getName() << IsIFunc;
419 Aliasee = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
420 GA->getAliasee(), Alias->getType());
421
422 if (IsIFunc)
423 cast<llvm::GlobalIFunc>(Alias)->setResolver(Aliasee);
424 else
425 cast<llvm::GlobalAlias>(Alias)->setAliasee(Aliasee);
426 }
427 }
428 }
429 if (!Error)
430 return;
431
432 for (const GlobalDecl &GD : Aliases) {
433 StringRef MangledName = getMangledName(GD);
434 llvm::GlobalValue *Alias = GetGlobalValue(MangledName);
435 Alias->replaceAllUsesWith(llvm::UndefValue::get(Alias->getType()));
436 Alias->eraseFromParent();
437 }
438}
439
440void CodeGenModule::clear() {
441 DeferredDeclsToEmit.clear();
442 if (OpenMPRuntime)
443 OpenMPRuntime->clear();
444}
445
446void InstrProfStats::reportDiagnostics(DiagnosticsEngine &Diags,
447 StringRef MainFile) {
448 if (!hasDiagnostics())
449 return;
450 if (VisitedInMainFile > 0 && VisitedInMainFile == MissingInMainFile) {
451 if (MainFile.empty())
452 MainFile = "<stdin>";
453 Diags.Report(diag::warn_profile_data_unprofiled) << MainFile;
454 } else {
455 if (Mismatched > 0)
456 Diags.Report(diag::warn_profile_data_out_of_date) << Visited << Mismatched;
457
458 if (Missing > 0)
459 Diags.Report(diag::warn_profile_data_missing) << Visited << Missing;
460 }
461}
462
463static void setVisibilityFromDLLStorageClass(const clang::LangOptions &LO,
464 llvm::Module &M) {
465 if (!LO.VisibilityFromDLLStorageClass)
466 return;
467
468 llvm::GlobalValue::VisibilityTypes DLLExportVisibility =
469 CodeGenModule::GetLLVMVisibility(LO.getDLLExportVisibility());
470 llvm::GlobalValue::VisibilityTypes NoDLLStorageClassVisibility =
471 CodeGenModule::GetLLVMVisibility(LO.getNoDLLStorageClassVisibility());
472 llvm::GlobalValue::VisibilityTypes ExternDeclDLLImportVisibility =
473 CodeGenModule::GetLLVMVisibility(LO.getExternDeclDLLImportVisibility());
474 llvm::GlobalValue::VisibilityTypes ExternDeclNoDLLStorageClassVisibility =
475 CodeGenModule::GetLLVMVisibility(
476 LO.getExternDeclNoDLLStorageClassVisibility());
477
478 for (llvm::GlobalValue &GV : M.global_values()) {
479 if (GV.hasAppendingLinkage() || GV.hasLocalLinkage())
480 continue;
481
482 // Reset DSO locality before setting the visibility. This removes
483 // any effects that visibility options and annotations may have
484 // had on the DSO locality. Setting the visibility will implicitly set
485 // appropriate globals to DSO Local; however, this will be pessimistic
486 // w.r.t. to the normal compiler IRGen.
487 GV.setDSOLocal(false);
488
489 if (GV.isDeclarationForLinker()) {
490 GV.setVisibility(GV.getDLLStorageClass() ==
491 llvm::GlobalValue::DLLImportStorageClass
492 ? ExternDeclDLLImportVisibility
493 : ExternDeclNoDLLStorageClassVisibility);
494 } else {
495 GV.setVisibility(GV.getDLLStorageClass() ==
496 llvm::GlobalValue::DLLExportStorageClass
497 ? DLLExportVisibility
498 : NoDLLStorageClassVisibility);
499 }
500
501 GV.setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
502 }
503}
504
505void CodeGenModule::Release() {
506 EmitDeferred();
507 EmitVTablesOpportunistically();
508 applyGlobalValReplacements();
509 applyReplacements();
510 checkAliases();
511 emitMultiVersionFunctions();
512 EmitCXXGlobalInitFunc();
513 EmitCXXGlobalCleanUpFunc();
514 registerGlobalDtorsWithAtExit();
515 EmitCXXThreadLocalInitFunc();
516 if (ObjCRuntime)
517 if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
518 AddGlobalCtor(ObjCInitFunction);
519 if (Context.getLangOpts().CUDA && CUDARuntime) {
520 if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule())
521 AddGlobalCtor(CudaCtorFunction);
522 }
523 if (OpenMPRuntime) {
524 if (llvm::Function *OpenMPRequiresDirectiveRegFun =
525 OpenMPRuntime->emitRequiresDirectiveRegFun()) {
526 AddGlobalCtor(OpenMPRequiresDirectiveRegFun, 0);
527 }
528 OpenMPRuntime->createOffloadEntriesAndInfoMetadata();
529 OpenMPRuntime->clear();
530 }
531 if (PGOReader) {
532 getModule().setProfileSummary(
533 PGOReader->getSummary(/* UseCS */ false).getMD(VMContext),
534 llvm::ProfileSummary::PSK_Instr);
535 if (PGOStats.hasDiagnostics())
536 PGOStats.reportDiagnostics(getDiags(), getCodeGenOpts().MainFileName);
537 }
538 EmitCtorList(GlobalCtors, "llvm.global_ctors");
539 EmitCtorList(GlobalDtors, "llvm.global_dtors");
540 EmitGlobalAnnotations();
541 EmitStaticExternCAliases();
542 EmitDeferredUnusedCoverageMappings();
543 CodeGenPGO(*this).setValueProfilingFlag(getModule());
544 if (CoverageMapping)
545 CoverageMapping->emit();
546 if (CodeGenOpts.SanitizeCfiCrossDso) {
547 CodeGenFunction(*this).EmitCfiCheckFail();
548 CodeGenFunction(*this).EmitCfiCheckStub();
549 }
550 emitAtAvailableLinkGuard();
551 if (Context.getTargetInfo().getTriple().isWasm() &&
552 !Context.getTargetInfo().getTriple().isOSEmscripten()) {
553 EmitMainVoidAlias();
554 }
555
556 if (getTriple().isAMDGPU()) {
557 // Emit reference of __amdgpu_device_library_preserve_asan_functions to
558 // preserve ASAN functions in bitcode libraries.
559 if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
560 auto *FT = llvm::FunctionType::get(VoidTy, {});
561 auto *F = llvm::Function::Create(
562 FT, llvm::GlobalValue::ExternalLinkage,
563 "__amdgpu_device_library_preserve_asan_functions", &getModule());
564 auto *Var = new llvm::GlobalVariable(
565 getModule(), FT->getPointerTo(),
566 /*isConstant=*/true, llvm::GlobalValue::WeakAnyLinkage, F,
567 "__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
568 llvm::GlobalVariable::NotThreadLocal);
569 addCompilerUsedGlobal(Var);
570 }
571 // Emit amdgpu_code_object_version module flag, which is code object version
572 // times 100.
573 // ToDo: Enable module flag for all code object version when ROCm device
574 // library is ready.
575 if (getTarget().getTargetOpts().CodeObjectVersion == TargetOptions::COV_5) {
576 getModule().addModuleFlag(llvm::Module::Error,
577 "amdgpu_code_object_version",
578 getTarget().getTargetOpts().CodeObjectVersion);
579 }
580 }
581
582 emitLLVMUsed();
583 if (SanStats)
584 SanStats->finish();
585
586 if (CodeGenOpts.Autolink &&
587 (Context.getLangOpts().Modules || !LinkerOptionsMetadata.empty())) {
588 EmitModuleLinkOptions();
589 }
590
591 // On ELF we pass the dependent library specifiers directly to the linker
592 // without manipulating them. This is in contrast to other platforms where
593 // they are mapped to a specific linker option by the compiler. This
594 // difference is a result of the greater variety of ELF linkers and the fact
595 // that ELF linkers tend to handle libraries in a more complicated fashion
596 // than on other platforms. This forces us to defer handling the dependent
597 // libs to the linker.
598 //
599 // CUDA/HIP device and host libraries are different. Currently there is no
600 // way to differentiate dependent libraries for host or device. Existing
601 // usage of #pragma comment(lib, *) is intended for host libraries on
602 // Windows. Therefore emit llvm.dependent-libraries only for host.
603 if (!ELFDependentLibraries.empty() && !Context.getLangOpts().CUDAIsDevice) {
604 auto *NMD = getModule().getOrInsertNamedMetadata("llvm.dependent-libraries");
605 for (auto *MD : ELFDependentLibraries)
606 NMD->addOperand(MD);
607 }
608
609 // Record mregparm value now so it is visible through rest of codegen.
610 if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
611 getModule().addModuleFlag(llvm::Module::Error, "NumRegisterParameters",
612 CodeGenOpts.NumRegisterParameters);
613
614 if (CodeGenOpts.DwarfVersion) {
615 getModule().addModuleFlag(llvm::Module::Max, "Dwarf Version",
616 CodeGenOpts.DwarfVersion);
617 }
618
619 if (CodeGenOpts.Dwarf64)
620 getModule().addModuleFlag(llvm::Module::Max, "DWARF64", 1);
621
622 if (Context.getLangOpts().SemanticInterposition)
623 // Require various optimization to respect semantic interposition.
624 getModule().setSemanticInterposition(true);
625
626 if (CodeGenOpts.EmitCodeView) {
627 // Indicate that we want CodeView in the metadata.
628 getModule().addModuleFlag(llvm::Module::Warning, "CodeView", 1);
629 }
630 if (CodeGenOpts.CodeViewGHash) {
631 getModule().addModuleFlag(llvm::Module::Warning, "CodeViewGHash", 1);
632 }
633 if (CodeGenOpts.ControlFlowGuard) {
634 // Function ID tables and checks for Control Flow Guard (cfguard=2).
635 getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 2);
636 } else if (CodeGenOpts.ControlFlowGuardNoChecks) {
637 // Function ID tables for Control Flow Guard (cfguard=1).
638 getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
639 }
640 if (CodeGenOpts.EHContGuard) {
641 // Function ID tables for EH Continuation Guard.
642 getModule().addModuleFlag(llvm::Module::Warning, "ehcontguard", 1);
643 }
644 if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
645 // We don't support LTO with 2 with different StrictVTablePointers
646 // FIXME: we could support it by stripping all the information introduced
647 // by StrictVTablePointers.
648
649 getModule().addModuleFlag(llvm::Module::Error, "StrictVTablePointers",1);
650
651 llvm::Metadata *Ops[2] = {
652 llvm::MDString::get(VMContext, "StrictVTablePointers"),
653 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
654 llvm::Type::getInt32Ty(VMContext), 1))};
655
656 getModule().addModuleFlag(llvm::Module::Require,
657 "StrictVTablePointersRequirement",
658 llvm::MDNode::get(VMContext, Ops));
659 }
660 if (getModuleDebugInfo())
661 // We support a single version in the linked module. The LLVM
662 // parser will drop debug info with a different version number
663 // (and warn about it, too).
664 getModule().addModuleFlag(llvm::Module::Warning, "Debug Info Version",
665 llvm::DEBUG_METADATA_VERSION);
666
667 // We need to record the widths of enums and wchar_t, so that we can generate
668 // the correct build attributes in the ARM backend. wchar_size is also used by
669 // TargetLibraryInfo.
670 uint64_t WCharWidth =
671 Context.getTypeSizeInChars(Context.getWideCharType()).getQuantity();
672 getModule().addModuleFlag(llvm::Module::Error, "wchar_size", WCharWidth);
673
674 llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch();
675 if ( Arch == llvm::Triple::arm
676 || Arch == llvm::Triple::armeb
677 || Arch == llvm::Triple::thumb
678 || Arch == llvm::Triple::thumbeb) {
679 // The minimum width of an enum in bytes
680 uint64_t EnumWidth = Context.getLangOpts().ShortEnums ? 1 : 4;
681 getModule().addModuleFlag(llvm::Module::Error, "min_enum_size", EnumWidth);
682 }
683
684 if (Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64) {
685 StringRef ABIStr = Target.getABI();
686 llvm::LLVMContext &Ctx = TheModule.getContext();
687 getModule().addModuleFlag(llvm::Module::Error, "target-abi",
688 llvm::MDString::get(Ctx, ABIStr));
689 }
690
691 if (CodeGenOpts.SanitizeCfiCrossDso) {
692 // Indicate that we want cross-DSO control flow integrity checks.
693 getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
694 }
695
696 if (CodeGenOpts.WholeProgramVTables) {
697 // Indicate whether VFE was enabled for this module, so that the
698 // vcall_visibility metadata added under whole program vtables is handled
699 // appropriately in the optimizer.
700 getModule().addModuleFlag(llvm::Module::Error, "Virtual Function Elim",
701 CodeGenOpts.VirtualFunctionElimination);
702 }
703
704 if (LangOpts.Sanitize.has(SanitizerKind::CFIICall)) {
705 getModule().addModuleFlag(llvm::Module::Override,
706 "CFI Canonical Jump Tables",
707 CodeGenOpts.SanitizeCfiCanonicalJumpTables);
708 }
709
710 if (CodeGenOpts.CFProtectionReturn &&
711 Target.checkCFProtectionReturnSupported(getDiags())) {
712 // Indicate that we want to instrument return control flow protection.
713 getModule().addModuleFlag(llvm::Module::Override, "cf-protection-return",
714 1);
715 }
716
717 if (CodeGenOpts.CFProtectionBranch &&
718 Target.checkCFProtectionBranchSupported(getDiags())) {
719 // Indicate that we want to instrument branch control flow protection.
720 getModule().addModuleFlag(llvm::Module::Override, "cf-protection-branch",
721 1);
722 }
723
724 if (CodeGenOpts.IBTSeal)
725 getModule().addModuleFlag(llvm::Module::Override, "ibt-seal", 1);
726
727 // Add module metadata for return address signing (ignoring
728 // non-leaf/all) and stack tagging. These are actually turned on by function
729 // attributes, but we use module metadata to emit build attributes. This is
730 // needed for LTO, where the function attributes are inside bitcode
731 // serialised into a global variable by the time build attributes are
732 // emitted, so we can't access them.
733 if (Context.getTargetInfo().hasFeature("ptrauth") &&
734 LangOpts.getSignReturnAddressScope() !=
735 LangOptions::SignReturnAddressScopeKind::None)
736 getModule().addModuleFlag(llvm::Module::Override,
737 "sign-return-address-buildattr", 1);
738 if (LangOpts.Sanitize.has(SanitizerKind::MemTag))
739 getModule().addModuleFlag(llvm::Module::Override,
740 "tag-stack-memory-buildattr", 1);
741
742 if (Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb ||
743 Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
744 Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
745 Arch == llvm::Triple::aarch64_be) {
746 getModule().addModuleFlag(llvm::Module::Error, "branch-target-enforcement",
747 LangOpts.BranchTargetEnforcement);
748
749 getModule().addModuleFlag(llvm::Module::Error, "sign-return-address",
750 LangOpts.hasSignReturnAddress());
751
752 getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
753 LangOpts.isSignReturnAddressScopeAll());
754
755 getModule().addModuleFlag(llvm::Module::Error,
756 "sign-return-address-with-bkey",
757 !LangOpts.isSignReturnAddressWithAKey());
758 }
759
760 if (!CodeGenOpts.MemoryProfileOutput.empty()) {
761 llvm::LLVMContext &Ctx = TheModule.getContext();
762 getModule().addModuleFlag(
763 llvm::Module::Error, "MemProfProfileFilename",
764 llvm::MDString::get(Ctx, CodeGenOpts.MemoryProfileOutput));
765 }
766
767 if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
768 // Indicate whether __nvvm_reflect should be configured to flush denormal
769 // floating point values to 0. (This corresponds to its "__CUDA_FTZ"
770 // property.)
771 getModule().addModuleFlag(llvm::Module::Override, "nvvm-reflect-ftz",
772 CodeGenOpts.FP32DenormalMode.Output !=
773 llvm::DenormalMode::IEEE);
774 }
775
776 if (LangOpts.EHAsynch)
777 getModule().addModuleFlag(llvm::Module::Warning, "eh-asynch", 1);
778
779 // Indicate whether this Module was compiled with -fopenmp
780 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
781 getModule().addModuleFlag(llvm::Module::Max, "openmp", LangOpts.OpenMP);
782 if (getLangOpts().OpenMPIsDevice)
783 getModule().addModuleFlag(llvm::Module::Max, "openmp-device",
784 LangOpts.OpenMP);
785
786 // Emit OpenCL specific module metadata: OpenCL/SPIR version.
787 if (LangOpts.OpenCL) {
788 EmitOpenCLMetadata();
789 // Emit SPIR version.
790 if (getTriple().isSPIR()) {
791 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
792 // opencl.spir.version named metadata.
793 // C++ for OpenCL has a distinct mapping for version compatibility with
794 // OpenCL.
795 auto Version = LangOpts.getOpenCLCompatibleVersion();
796 llvm::Metadata *SPIRVerElts[] = {
797 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
798 Int32Ty, Version / 100)),
799 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
800 Int32Ty, (Version / 100 > 1) ? 0 : 2))};
801 llvm::NamedMDNode *SPIRVerMD =
802 TheModule.getOrInsertNamedMetadata("opencl.spir.version");
803 llvm::LLVMContext &Ctx = TheModule.getContext();
804 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
805 }
806 }
807
808 if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
809 assert(PLevel < 3 && "Invalid PIC Level")(static_cast <bool> (PLevel < 3 && "Invalid PIC Level"
) ? void (0) : __assert_fail ("PLevel < 3 && \"Invalid PIC Level\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 809, __extension__ __PRETTY_FUNCTION__
))
;
810 getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
811 if (Context.getLangOpts().PIE)
812 getModule().setPIELevel(static_cast<llvm::PIELevel::Level>(PLevel));
813 }
814
815 if (getCodeGenOpts().CodeModel.size() > 0) {
816 unsigned CM = llvm::StringSwitch<unsigned>(getCodeGenOpts().CodeModel)
817 .Case("tiny", llvm::CodeModel::Tiny)
818 .Case("small", llvm::CodeModel::Small)
819 .Case("kernel", llvm::CodeModel::Kernel)
820 .Case("medium", llvm::CodeModel::Medium)
821 .Case("large", llvm::CodeModel::Large)
822 .Default(~0u);
823 if (CM != ~0u) {
824 llvm::CodeModel::Model codeModel = static_cast<llvm::CodeModel::Model>(CM);
825 getModule().setCodeModel(codeModel);
826 }
827 }
828
829 if (CodeGenOpts.NoPLT)
830 getModule().setRtLibUseGOT();
831 if (CodeGenOpts.UnwindTables)
832 getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables));
833
834 switch (CodeGenOpts.getFramePointer()) {
835 case CodeGenOptions::FramePointerKind::None:
836 // 0 ("none") is the default.
837 break;
838 case CodeGenOptions::FramePointerKind::NonLeaf:
839 getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
840 break;
841 case CodeGenOptions::FramePointerKind::All:
842 getModule().setFramePointer(llvm::FramePointerKind::All);
843 break;
844 }
845
846 SimplifyPersonality();
847
848 if (getCodeGenOpts().EmitDeclMetadata)
849 EmitDeclMetadata();
850
851 if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
852 EmitCoverageFile();
853
854 if (CGDebugInfo *DI = getModuleDebugInfo())
855 DI->finalize();
856
857 if (getCodeGenOpts().EmitVersionIdentMetadata)
858 EmitVersionIdentMetadata();
859
860 if (!getCodeGenOpts().RecordCommandLine.empty())
861 EmitCommandLineMetadata();
862
863 if (!getCodeGenOpts().StackProtectorGuard.empty())
864 getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard);
865 if (!getCodeGenOpts().StackProtectorGuardReg.empty())
866 getModule().setStackProtectorGuardReg(
867 getCodeGenOpts().StackProtectorGuardReg);
868 if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX2147483647)
869 getModule().setStackProtectorGuardOffset(
870 getCodeGenOpts().StackProtectorGuardOffset);
871 if (getCodeGenOpts().StackAlignment)
872 getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment);
873 if (getCodeGenOpts().SkipRaxSetup)
874 getModule().addModuleFlag(llvm::Module::Override, "SkipRaxSetup", 1);
875
876 getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
877
878 EmitBackendOptionsMetadata(getCodeGenOpts());
879
880 // If there is device offloading code embed it in the host now.
881 EmbedObject(&getModule(), CodeGenOpts, getDiags());
882
883 // Set visibility from DLL storage class
884 // We do this at the end of LLVM IR generation; after any operation
885 // that might affect the DLL storage class or the visibility, and
886 // before anything that might act on these.
887 setVisibilityFromDLLStorageClass(LangOpts, getModule());
888}
889
890void CodeGenModule::EmitOpenCLMetadata() {
891 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
892 // opencl.ocl.version named metadata node.
893 // C++ for OpenCL has a distinct mapping for versions compatibile with OpenCL.
894 auto Version = LangOpts.getOpenCLCompatibleVersion();
895 llvm::Metadata *OCLVerElts[] = {
896 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
897 Int32Ty, Version / 100)),
898 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
899 Int32Ty, (Version % 100) / 10))};
900 llvm::NamedMDNode *OCLVerMD =
901 TheModule.getOrInsertNamedMetadata("opencl.ocl.version");
902 llvm::LLVMContext &Ctx = TheModule.getContext();
903 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
904}
905
906void CodeGenModule::EmitBackendOptionsMetadata(
907 const CodeGenOptions CodeGenOpts) {
908 switch (getTriple().getArch()) {
909 default:
910 break;
911 case llvm::Triple::riscv32:
912 case llvm::Triple::riscv64:
913 getModule().addModuleFlag(llvm::Module::Error, "SmallDataLimit",
914 CodeGenOpts.SmallDataLimit);
915 break;
916 }
917}
918
919void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
920 // Make sure that this type is translated.
921 Types.UpdateCompletedType(TD);
922}
923
924void CodeGenModule::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
925 // Make sure that this type is translated.
926 Types.RefreshTypeCacheForClass(RD);
927}
928
929llvm::MDNode *CodeGenModule::getTBAATypeInfo(QualType QTy) {
930 if (!TBAA)
931 return nullptr;
932 return TBAA->getTypeInfo(QTy);
933}
934
935TBAAAccessInfo CodeGenModule::getTBAAAccessInfo(QualType AccessType) {
936 if (!TBAA)
937 return TBAAAccessInfo();
938 if (getLangOpts().CUDAIsDevice) {
939 // As CUDA builtin surface/texture types are replaced, skip generating TBAA
940 // access info.
941 if (AccessType->isCUDADeviceBuiltinSurfaceType()) {
942 if (getTargetCodeGenInfo().getCUDADeviceBuiltinSurfaceDeviceType() !=
943 nullptr)
944 return TBAAAccessInfo();
945 } else if (AccessType->isCUDADeviceBuiltinTextureType()) {
946 if (getTargetCodeGenInfo().getCUDADeviceBuiltinTextureDeviceType() !=
947 nullptr)
948 return TBAAAccessInfo();
949 }
950 }
951 return TBAA->getAccessInfo(AccessType);
952}
953
954TBAAAccessInfo
955CodeGenModule::getTBAAVTablePtrAccessInfo(llvm::Type *VTablePtrType) {
956 if (!TBAA)
957 return TBAAAccessInfo();
958 return TBAA->getVTablePtrAccessInfo(VTablePtrType);
959}
960
961llvm::MDNode *CodeGenModule::getTBAAStructInfo(QualType QTy) {
962 if (!TBAA)
963 return nullptr;
964 return TBAA->getTBAAStructInfo(QTy);
965}
966
967llvm::MDNode *CodeGenModule::getTBAABaseTypeInfo(QualType QTy) {
968 if (!TBAA)
969 return nullptr;
970 return TBAA->getBaseTypeInfo(QTy);
971}
972
973llvm::MDNode *CodeGenModule::getTBAAAccessTagInfo(TBAAAccessInfo Info) {
974 if (!TBAA)
975 return nullptr;
976 return TBAA->getAccessTagInfo(Info);
977}
978
979TBAAAccessInfo CodeGenModule::mergeTBAAInfoForCast(TBAAAccessInfo SourceInfo,
980 TBAAAccessInfo TargetInfo) {
981 if (!TBAA)
982 return TBAAAccessInfo();
983 return TBAA->mergeTBAAInfoForCast(SourceInfo, TargetInfo);
984}
985
986TBAAAccessInfo
987CodeGenModule::mergeTBAAInfoForConditionalOperator(TBAAAccessInfo InfoA,
988 TBAAAccessInfo InfoB) {
989 if (!TBAA)
990 return TBAAAccessInfo();
991 return TBAA->mergeTBAAInfoForConditionalOperator(InfoA, InfoB);
992}
993
994TBAAAccessInfo
995CodeGenModule::mergeTBAAInfoForMemoryTransfer(TBAAAccessInfo DestInfo,
996 TBAAAccessInfo SrcInfo) {
997 if (!TBAA)
998 return TBAAAccessInfo();
999 return TBAA->mergeTBAAInfoForConditionalOperator(DestInfo, SrcInfo);
1000}
1001
1002void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
1003 TBAAAccessInfo TBAAInfo) {
1004 if (llvm::MDNode *Tag = getTBAAAccessTagInfo(TBAAInfo))
1005 Inst->setMetadata(llvm::LLVMContext::MD_tbaa, Tag);
1006}
1007
1008void CodeGenModule::DecorateInstructionWithInvariantGroup(
1009 llvm::Instruction *I, const CXXRecordDecl *RD) {
1010 I->setMetadata(llvm::LLVMContext::MD_invariant_group,
1011 llvm::MDNode::get(getLLVMContext(), {}));
1012}
1013
1014void CodeGenModule::Error(SourceLocation loc, StringRef message) {
1015 unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, "%0");
1016 getDiags().Report(Context.getFullLoc(loc), diagID) << message;
1017}
1018
1019/// ErrorUnsupported - Print out an error that codegen doesn't support the
1020/// specified stmt yet.
1021void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type) {
1022 unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
1023 "cannot compile this %0 yet");
1024 std::string Msg = Type;
1025 getDiags().Report(Context.getFullLoc(S->getBeginLoc()), DiagID)
1026 << Msg << S->getSourceRange();
1027}
1028
1029/// ErrorUnsupported - Print out an error that codegen doesn't support the
1030/// specified decl yet.
1031void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type) {
1032 unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
1033 "cannot compile this %0 yet");
1034 std::string Msg = Type;
1035 getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
1036}
1037
1038llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
1039 return llvm::ConstantInt::get(SizeTy, size.getQuantity());
1040}
1041
1042void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
1043 const NamedDecl *D) const {
1044 if (GV->hasDLLImportStorageClass())
1045 return;
1046 // Internal definitions always have default visibility.
1047 if (GV->hasLocalLinkage()) {
1048 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
1049 return;
1050 }
1051 if (!D)
1052 return;
1053 // Set visibility for definitions, and for declarations if requested globally
1054 // or set explicitly.
1055 LinkageInfo LV = D->getLinkageAndVisibility();
1056 if (LV.isVisibilityExplicit() || getLangOpts().SetVisibilityForExternDecls ||
1057 !GV->isDeclarationForLinker())
1058 GV->setVisibility(GetLLVMVisibility(LV.getVisibility()));
1059}
1060
1061static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
1062 llvm::GlobalValue *GV) {
1063 if (GV->hasLocalLinkage())
1064 return true;
1065
1066 if (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage())
1067 return true;
1068
1069 // DLLImport explicitly marks the GV as external.
1070 if (GV->hasDLLImportStorageClass())
1071 return false;
1072
1073 const llvm::Triple &TT = CGM.getTriple();
1074 if (TT.isWindowsGNUEnvironment()) {
1075 // In MinGW, variables without DLLImport can still be automatically
1076 // imported from a DLL by the linker; don't mark variables that
1077 // potentially could come from another DLL as DSO local.
1078
1079 // With EmulatedTLS, TLS variables can be autoimported from other DLLs
1080 // (and this actually happens in the public interface of libstdc++), so
1081 // such variables can't be marked as DSO local. (Native TLS variables
1082 // can't be dllimported at all, though.)
1083 if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
1084 (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS))
1085 return false;
1086 }
1087
1088 // On COFF, don't mark 'extern_weak' symbols as DSO local. If these symbols
1089 // remain unresolved in the link, they can be resolved to zero, which is
1090 // outside the current DSO.
1091 if (TT.isOSBinFormatCOFF() && GV->hasExternalWeakLinkage())
1092 return false;
1093
1094 // Every other GV is local on COFF.
1095 // Make an exception for windows OS in the triple: Some firmware builds use
1096 // *-win32-macho triples. This (accidentally?) produced windows relocations
1097 // without GOT tables in older clang versions; Keep this behaviour.
1098 // FIXME: even thread local variables?
1099 if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
1100 return true;
1101
1102 // Only handle COFF and ELF for now.
1103 if (!TT.isOSBinFormatELF())
1104 return false;
1105
1106 // If this is not an executable, don't assume anything is local.
1107 const auto &CGOpts = CGM.getCodeGenOpts();
1108 llvm::Reloc::Model RM = CGOpts.RelocationModel;
1109 const auto &LOpts = CGM.getLangOpts();
1110 if (RM != llvm::Reloc::Static && !LOpts.PIE) {
1111 // On ELF, if -fno-semantic-interposition is specified and the target
1112 // supports local aliases, there will be neither CC1
1113 // -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
1114 // dso_local on the function if using a local alias is preferable (can avoid
1115 // PLT indirection).
1116 if (!(isa<llvm::Function>(GV) && GV->canBenefitFromLocalAlias()))
1117 return false;
1118 return !(CGM.getLangOpts().SemanticInterposition ||
1119 CGM.getLangOpts().HalfNoSemanticInterposition);
1120 }
1121
1122 // A definition cannot be preempted from an executable.
1123 if (!GV->isDeclarationForLinker())
1124 return true;
1125
1126 // Most PIC code sequences that assume that a symbol is local cannot produce a
1127 // 0 if it turns out the symbol is undefined. While this is ABI and relocation
1128 // depended, it seems worth it to handle it here.
1129 if (RM == llvm::Reloc::PIC_ && GV->hasExternalWeakLinkage())
1130 return false;
1131
1132 // PowerPC64 prefers TOC indirection to avoid copy relocations.
1133 if (TT.isPPC64())
1134 return false;
1135
1136 if (CGOpts.DirectAccessExternalData) {
1137 // If -fdirect-access-external-data (default for -fno-pic), set dso_local
1138 // for non-thread-local variables. If the symbol is not defined in the
1139 // executable, a copy relocation will be needed at link time. dso_local is
1140 // excluded for thread-local variables because they generally don't support
1141 // copy relocations.
1142 if (auto *Var = dyn_cast<llvm::GlobalVariable>(GV))
1143 if (!Var->isThreadLocal())
1144 return true;
1145
1146 // -fno-pic sets dso_local on a function declaration to allow direct
1147 // accesses when taking its address (similar to a data symbol). If the
1148 // function is not defined in the executable, a canonical PLT entry will be
1149 // needed at link time. -fno-direct-access-external-data can avoid the
1150 // canonical PLT entry. We don't generalize this condition to -fpie/-fpic as
1151 // it could just cause trouble without providing perceptible benefits.
1152 if (isa<llvm::Function>(GV) && !CGOpts.NoPLT && RM == llvm::Reloc::Static)
1153 return true;
1154 }
1155
1156 // If we can use copy relocations we can assume it is local.
1157
1158 // Otherwise don't assume it is local.
1159 return false;
1160}
1161
1162void CodeGenModule::setDSOLocal(llvm::GlobalValue *GV) const {
1163 GV->setDSOLocal(shouldAssumeDSOLocal(*this, GV));
1164}
1165
1166void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1167 GlobalDecl GD) const {
1168 const auto *D = dyn_cast<NamedDecl>(GD.getDecl());
1169 // C++ destructors have a few C++ ABI specific special cases.
1170 if (const auto *Dtor = dyn_cast_or_null<CXXDestructorDecl>(D)) {
1171 getCXXABI().setCXXDestructorDLLStorage(GV, Dtor, GD.getDtorType());
1172 return;
1173 }
1174 setDLLImportDLLExport(GV, D);
1175}
1176
1177void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
1178 const NamedDecl *D) const {
1179 if (D && D->isExternallyVisible()) {
1180 if (D->hasAttr<DLLImportAttr>())
1181 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
1182 else if (D->hasAttr<DLLExportAttr>() && !GV->isDeclarationForLinker())
1183 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
1184 }
1185}
1186
1187void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1188 GlobalDecl GD) const {
1189 setDLLImportDLLExport(GV, GD);
1190 setGVPropertiesAux(GV, dyn_cast<NamedDecl>(GD.getDecl()));
1191}
1192
1193void CodeGenModule::setGVProperties(llvm::GlobalValue *GV,
1194 const NamedDecl *D) const {
1195 setDLLImportDLLExport(GV, D);
1196 setGVPropertiesAux(GV, D);
1197}
1198
1199void CodeGenModule::setGVPropertiesAux(llvm::GlobalValue *GV,
1200 const NamedDecl *D) const {
1201 setGlobalVisibility(GV, D);
1202 setDSOLocal(GV);
1203 GV->setPartition(CodeGenOpts.SymbolPartition);
1204}
1205
1206static llvm::GlobalVariable::ThreadLocalMode GetLLVMTLSModel(StringRef S) {
1207 return llvm::StringSwitch<llvm::GlobalVariable::ThreadLocalMode>(S)
1208 .Case("global-dynamic", llvm::GlobalVariable::GeneralDynamicTLSModel)
1209 .Case("local-dynamic", llvm::GlobalVariable::LocalDynamicTLSModel)
1210 .Case("initial-exec", llvm::GlobalVariable::InitialExecTLSModel)
1211 .Case("local-exec", llvm::GlobalVariable::LocalExecTLSModel);
1212}
1213
1214llvm::GlobalVariable::ThreadLocalMode
1215CodeGenModule::GetDefaultLLVMTLSModel() const {
1216 switch (CodeGenOpts.getDefaultTLSModel()) {
1217 case CodeGenOptions::GeneralDynamicTLSModel:
1218 return llvm::GlobalVariable::GeneralDynamicTLSModel;
1219 case CodeGenOptions::LocalDynamicTLSModel:
1220 return llvm::GlobalVariable::LocalDynamicTLSModel;
1221 case CodeGenOptions::InitialExecTLSModel:
1222 return llvm::GlobalVariable::InitialExecTLSModel;
1223 case CodeGenOptions::LocalExecTLSModel:
1224 return llvm::GlobalVariable::LocalExecTLSModel;
1225 }
1226 llvm_unreachable("Invalid TLS model!")::llvm::llvm_unreachable_internal("Invalid TLS model!", "clang/lib/CodeGen/CodeGenModule.cpp"
, 1226)
;
1227}
1228
1229void CodeGenModule::setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const {
1230 assert(D.getTLSKind() && "setting TLS mode on non-TLS var!")(static_cast <bool> (D.getTLSKind() && "setting TLS mode on non-TLS var!"
) ? void (0) : __assert_fail ("D.getTLSKind() && \"setting TLS mode on non-TLS var!\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1230, __extension__ __PRETTY_FUNCTION__
))
;
1231
1232 llvm::GlobalValue::ThreadLocalMode TLM;
1233 TLM = GetDefaultLLVMTLSModel();
1234
1235 // Override the TLS model if it is explicitly specified.
1236 if (const TLSModelAttr *Attr = D.getAttr<TLSModelAttr>()) {
1237 TLM = GetLLVMTLSModel(Attr->getModel());
1238 }
1239
1240 GV->setThreadLocalMode(TLM);
1241}
1242
1243static std::string getCPUSpecificMangling(const CodeGenModule &CGM,
1244 StringRef Name) {
1245 const TargetInfo &Target = CGM.getTarget();
1246 return (Twine('.') + Twine(Target.CPUSpecificManglingCharacter(Name))).str();
1247}
1248
1249static void AppendCPUSpecificCPUDispatchMangling(const CodeGenModule &CGM,
1250 const CPUSpecificAttr *Attr,
1251 unsigned CPUIndex,
1252 raw_ostream &Out) {
1253 // cpu_specific gets the current name, dispatch gets the resolver if IFunc is
1254 // supported.
1255 if (Attr)
1256 Out << getCPUSpecificMangling(CGM, Attr->getCPUName(CPUIndex)->getName());
1257 else if (CGM.getTarget().supportsIFunc())
1258 Out << ".resolver";
1259}
1260
1261static void AppendTargetMangling(const CodeGenModule &CGM,
1262 const TargetAttr *Attr, raw_ostream &Out) {
1263 if (Attr->isDefaultVersion())
1264 return;
1265
1266 Out << '.';
1267 const TargetInfo &Target = CGM.getTarget();
1268 ParsedTargetAttr Info =
1269 Attr->parse([&Target](StringRef LHS, StringRef RHS) {
1270 // Multiversioning doesn't allow "no-${feature}", so we can
1271 // only have "+" prefixes here.
1272 assert(LHS.startswith("+") && RHS.startswith("+") &&(static_cast <bool> (LHS.startswith("+") && RHS
.startswith("+") && "Features should always have a prefix."
) ? void (0) : __assert_fail ("LHS.startswith(\"+\") && RHS.startswith(\"+\") && \"Features should always have a prefix.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1273, __extension__ __PRETTY_FUNCTION__
))
1273 "Features should always have a prefix.")(static_cast <bool> (LHS.startswith("+") && RHS
.startswith("+") && "Features should always have a prefix."
) ? void (0) : __assert_fail ("LHS.startswith(\"+\") && RHS.startswith(\"+\") && \"Features should always have a prefix.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1273, __extension__ __PRETTY_FUNCTION__
))
;
1274 return Target.multiVersionSortPriority(LHS.substr(1)) >
1275 Target.multiVersionSortPriority(RHS.substr(1));
1276 });
1277
1278 bool IsFirst = true;
1279
1280 if (!Info.Architecture.empty()) {
1281 IsFirst = false;
1282 Out << "arch_" << Info.Architecture;
1283 }
1284
1285 for (StringRef Feat : Info.Features) {
1286 if (!IsFirst)
1287 Out << '_';
1288 IsFirst = false;
1289 Out << Feat.substr(1);
1290 }
1291}
1292
1293// Returns true if GD is a function decl with internal linkage and
1294// needs a unique suffix after the mangled name.
1295static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
1296 CodeGenModule &CGM) {
1297 const Decl *D = GD.getDecl();
1298 return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(D) &&
1299 (CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
1300}
1301
1302static void AppendTargetClonesMangling(const CodeGenModule &CGM,
1303 const TargetClonesAttr *Attr,
1304 unsigned VersionIndex,
1305 raw_ostream &Out) {
1306 Out << '.';
1307 StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
1308 if (FeatureStr.startswith("arch="))
1309 Out << "arch_" << FeatureStr.substr(sizeof("arch=") - 1);
1310 else
1311 Out << FeatureStr;
1312
1313 Out << '.' << Attr->getMangledIndex(VersionIndex);
1314}
1315
1316static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
1317 const NamedDecl *ND,
1318 bool OmitMultiVersionMangling = false) {
1319 SmallString<256> Buffer;
1320 llvm::raw_svector_ostream Out(Buffer);
1321 MangleContext &MC = CGM.getCXXABI().getMangleContext();
1322 if (!CGM.getModuleNameHash().empty())
1323 MC.needsUniqueInternalLinkageNames();
1324 bool ShouldMangle = MC.shouldMangleDeclName(ND);
1325 if (ShouldMangle)
1326 MC.mangleName(GD.getWithDecl(ND), Out);
1327 else {
1328 IdentifierInfo *II = ND->getIdentifier();
1329 assert(II && "Attempt to mangle unnamed decl.")(static_cast <bool> (II && "Attempt to mangle unnamed decl."
) ? void (0) : __assert_fail ("II && \"Attempt to mangle unnamed decl.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1329, __extension__ __PRETTY_FUNCTION__
))
;
1330 const auto *FD = dyn_cast<FunctionDecl>(ND);
1331
1332 if (FD &&
1333 FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
1334 Out << "__regcall3__" << II->getName();
1335 } else if (FD && FD->hasAttr<CUDAGlobalAttr>() &&
1336 GD.getKernelReferenceKind() == KernelReferenceKind::Stub) {
1337 Out << "__device_stub__" << II->getName();
1338 } else {
1339 Out << II->getName();
1340 }
1341 }
1342
1343 // Check if the module name hash should be appended for internal linkage
1344 // symbols. This should come before multi-version target suffixes are
1345 // appended. This is to keep the name and module hash suffix of the
1346 // internal linkage function together. The unique suffix should only be
1347 // added when name mangling is done to make sure that the final name can
1348 // be properly demangled. For example, for C functions without prototypes,
1349 // name mangling is not done and the unique suffix should not be appeneded
1350 // then.
1351 if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) {
1352 assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames &&(static_cast <bool> (CGM.getCodeGenOpts().UniqueInternalLinkageNames
&& "Hash computed when not explicitly requested") ? void
(0) : __assert_fail ("CGM.getCodeGenOpts().UniqueInternalLinkageNames && \"Hash computed when not explicitly requested\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1353, __extension__ __PRETTY_FUNCTION__
))
1353 "Hash computed when not explicitly requested")(static_cast <bool> (CGM.getCodeGenOpts().UniqueInternalLinkageNames
&& "Hash computed when not explicitly requested") ? void
(0) : __assert_fail ("CGM.getCodeGenOpts().UniqueInternalLinkageNames && \"Hash computed when not explicitly requested\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1353, __extension__ __PRETTY_FUNCTION__
))
;
1354 Out << CGM.getModuleNameHash();
1355 }
1356
1357 if (const auto *FD = dyn_cast<FunctionDecl>(ND))
1358 if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
1359 switch (FD->getMultiVersionKind()) {
1360 case MultiVersionKind::CPUDispatch:
1361 case MultiVersionKind::CPUSpecific:
1362 AppendCPUSpecificCPUDispatchMangling(CGM,
1363 FD->getAttr<CPUSpecificAttr>(),
1364 GD.getMultiVersionIndex(), Out);
1365 break;
1366 case MultiVersionKind::Target:
1367 AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
1368 break;
1369 case MultiVersionKind::TargetClones:
1370 AppendTargetClonesMangling(CGM, FD->getAttr<TargetClonesAttr>(),
1371 GD.getMultiVersionIndex(), Out);
1372 break;
1373 case MultiVersionKind::None:
1374 llvm_unreachable("None multiversion type isn't valid here")::llvm::llvm_unreachable_internal("None multiversion type isn't valid here"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1374)
;
1375 }
1376 }
1377
1378 // Make unique name for device side static file-scope variable for HIP.
1379 if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
1380 CGM.getLangOpts().GPURelocatableDeviceCode &&
1381 CGM.getLangOpts().CUDAIsDevice && !CGM.getLangOpts().CUID.empty())
1382 CGM.printPostfixForExternalizedStaticVar(Out);
1383 return std::string(Out.str());
1384}
1385
1386void CodeGenModule::UpdateMultiVersionNames(GlobalDecl GD,
1387 const FunctionDecl *FD,
1388 StringRef &CurName) {
1389 if (!FD->isMultiVersion())
1390 return;
1391
1392 // Get the name of what this would be without the 'target' attribute. This
1393 // allows us to lookup the version that was emitted when this wasn't a
1394 // multiversion function.
1395 std::string NonTargetName =
1396 getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
1397 GlobalDecl OtherGD;
1398 if (lookupRepresentativeDecl(NonTargetName, OtherGD)) {
1399 assert(OtherGD.getCanonicalDecl()(static_cast <bool> (OtherGD.getCanonicalDecl() .getDecl
() ->getAsFunction() ->isMultiVersion() && "Other GD should now be a multiversioned function"
) ? void (0) : __assert_fail ("OtherGD.getCanonicalDecl() .getDecl() ->getAsFunction() ->isMultiVersion() && \"Other GD should now be a multiversioned function\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1403, __extension__ __PRETTY_FUNCTION__
))
1400 .getDecl()(static_cast <bool> (OtherGD.getCanonicalDecl() .getDecl
() ->getAsFunction() ->isMultiVersion() && "Other GD should now be a multiversioned function"
) ? void (0) : __assert_fail ("OtherGD.getCanonicalDecl() .getDecl() ->getAsFunction() ->isMultiVersion() && \"Other GD should now be a multiversioned function\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1403, __extension__ __PRETTY_FUNCTION__
))
1401 ->getAsFunction()(static_cast <bool> (OtherGD.getCanonicalDecl() .getDecl
() ->getAsFunction() ->isMultiVersion() && "Other GD should now be a multiversioned function"
) ? void (0) : __assert_fail ("OtherGD.getCanonicalDecl() .getDecl() ->getAsFunction() ->isMultiVersion() && \"Other GD should now be a multiversioned function\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1403, __extension__ __PRETTY_FUNCTION__
))
1402 ->isMultiVersion() &&(static_cast <bool> (OtherGD.getCanonicalDecl() .getDecl
() ->getAsFunction() ->isMultiVersion() && "Other GD should now be a multiversioned function"
) ? void (0) : __assert_fail ("OtherGD.getCanonicalDecl() .getDecl() ->getAsFunction() ->isMultiVersion() && \"Other GD should now be a multiversioned function\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1403, __extension__ __PRETTY_FUNCTION__
))
1403 "Other GD should now be a multiversioned function")(static_cast <bool> (OtherGD.getCanonicalDecl() .getDecl
() ->getAsFunction() ->isMultiVersion() && "Other GD should now be a multiversioned function"
) ? void (0) : __assert_fail ("OtherGD.getCanonicalDecl() .getDecl() ->getAsFunction() ->isMultiVersion() && \"Other GD should now be a multiversioned function\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1403, __extension__ __PRETTY_FUNCTION__
))
;
1404 // OtherFD is the version of this function that was mangled BEFORE
1405 // becoming a MultiVersion function. It potentially needs to be updated.
1406 const FunctionDecl *OtherFD = OtherGD.getCanonicalDecl()
1407 .getDecl()
1408 ->getAsFunction()
1409 ->getMostRecentDecl();
1410 std::string OtherName = getMangledNameImpl(*this, OtherGD, OtherFD);
1411 // This is so that if the initial version was already the 'default'
1412 // version, we don't try to update it.
1413 if (OtherName != NonTargetName) {
1414 // Remove instead of erase, since others may have stored the StringRef
1415 // to this.
1416 const auto ExistingRecord = Manglings.find(NonTargetName);
1417 if (ExistingRecord != std::end(Manglings))
1418 Manglings.remove(&(*ExistingRecord));
1419 auto Result = Manglings.insert(std::make_pair(OtherName, OtherGD));
1420 StringRef OtherNameRef = MangledDeclNames[OtherGD.getCanonicalDecl()] =
1421 Result.first->first();
1422 // If this is the current decl is being created, make sure we update the name.
1423 if (GD.getCanonicalDecl() == OtherGD.getCanonicalDecl())
1424 CurName = OtherNameRef;
1425 if (llvm::GlobalValue *Entry = GetGlobalValue(NonTargetName))
1426 Entry->setName(OtherName);
1427 }
1428 }
1429}
1430
1431StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
1432 GlobalDecl CanonicalGD = GD.getCanonicalDecl();
1433
1434 // Some ABIs don't have constructor variants. Make sure that base and
1435 // complete constructors get mangled the same.
1436 if (const auto *CD = dyn_cast<CXXConstructorDecl>(CanonicalGD.getDecl())) {
1437 if (!getTarget().getCXXABI().hasConstructorVariants()) {
1438 CXXCtorType OrigCtorType = GD.getCtorType();
1439 assert(OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete)(static_cast <bool> (OrigCtorType == Ctor_Base || OrigCtorType
== Ctor_Complete) ? void (0) : __assert_fail ("OrigCtorType == Ctor_Base || OrigCtorType == Ctor_Complete"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1439, __extension__ __PRETTY_FUNCTION__
))
;
1440 if (OrigCtorType == Ctor_Base)
1441 CanonicalGD = GlobalDecl(CD, Ctor_Complete);
1442 }
1443 }
1444
1445 // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
1446 // static device variable depends on whether the variable is referenced by
1447 // a host or device host function. Therefore the mangled name cannot be
1448 // cached.
1449 if (!LangOpts.CUDAIsDevice ||
1450 !getContext().mayExternalizeStaticVar(GD.getDecl())) {
1451 auto FoundName = MangledDeclNames.find(CanonicalGD);
1452 if (FoundName != MangledDeclNames.end())
1453 return FoundName->second;
1454 }
1455
1456 // Keep the first result in the case of a mangling collision.
1457 const auto *ND = cast<NamedDecl>(GD.getDecl());
1458 std::string MangledName = getMangledNameImpl(*this, GD, ND);
1459
1460 // Ensure either we have different ABIs between host and device compilations,
1461 // says host compilation following MSVC ABI but device compilation follows
1462 // Itanium C++ ABI or, if they follow the same ABI, kernel names after
1463 // mangling should be the same after name stubbing. The later checking is
1464 // very important as the device kernel name being mangled in host-compilation
1465 // is used to resolve the device binaries to be executed. Inconsistent naming
1466 // result in undefined behavior. Even though we cannot check that naming
1467 // directly between host- and device-compilations, the host- and
1468 // device-mangling in host compilation could help catching certain ones.
1469 assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||(static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
1470 getLangOpts().CUDAIsDevice ||(static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
1471 (getContext().getAuxTargetInfo() &&(static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
1472 (getContext().getAuxTargetInfo()->getCXXABI() !=(static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
1473 getContext().getTargetInfo().getCXXABI())) ||(static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
1474 getCUDARuntime().getDeviceSideName(ND) ==(static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
1475 getMangledNameImpl((static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
1476 *this,(static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
1477 GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel),(static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
1478 ND))(static_cast <bool> (!isa<FunctionDecl>(ND) || !ND
->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice
|| (getContext().getAuxTargetInfo() && (getContext()
.getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo
().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) ==
getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind
::Kernel), ND)) ? void (0) : __assert_fail ("!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() || getLangOpts().CUDAIsDevice || (getContext().getAuxTargetInfo() && (getContext().getAuxTargetInfo()->getCXXABI() != getContext().getTargetInfo().getCXXABI())) || getCUDARuntime().getDeviceSideName(ND) == getMangledNameImpl( *this, GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel), ND)"
, "clang/lib/CodeGen/CodeGenModule.cpp", 1478, __extension__ __PRETTY_FUNCTION__
))
;
1479
1480 auto Result = Manglings.insert(std::make_pair(MangledName, GD));
1481 return MangledDeclNames[CanonicalGD] = Result.first->first();
1482}
1483
1484StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
1485 const BlockDecl *BD) {
1486 MangleContext &MangleCtx = getCXXABI().getMangleContext();
1487 const Decl *D = GD.getDecl();
1488
1489 SmallString<256> Buffer;
1490 llvm::raw_svector_ostream Out(Buffer);
1491 if (!D)
1492 MangleCtx.mangleGlobalBlock(BD,
1493 dyn_cast_or_null<VarDecl>(initializedGlobalDecl.getDecl()), Out);
1494 else if (const auto *CD = dyn_cast<CXXConstructorDecl>(D))
1495 MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
1496 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(D))
1497 MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
1498 else
1499 MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
1500
1501 auto Result = Manglings.insert(std::make_pair(Out.str(), BD));
1502 return Result.first->first();
1503}
1504
1505llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
1506 return getModule().getNamedValue(Name);
1507}
1508
1509/// AddGlobalCtor - Add a function to the list that will be called before
1510/// main() runs.
1511void CodeGenModule::AddGlobalCtor(llvm::Function *Ctor, int Priority,
1512 llvm::Constant *AssociatedData) {
1513 // FIXME: Type coercion of void()* types.
1514 GlobalCtors.push_back(Structor(Priority, Ctor, AssociatedData));
1515}
1516
1517/// AddGlobalDtor - Add a function to the list that will be called
1518/// when the module is unloaded.
1519void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority,
1520 bool IsDtorAttrFunc) {
1521 if (CodeGenOpts.RegisterGlobalDtorsWithAtExit &&
1522 (!getContext().getTargetInfo().getTriple().isOSAIX() || IsDtorAttrFunc)) {
1523 DtorsUsingAtExit[Priority].push_back(Dtor);
1524 return;
1525 }
1526
1527 // FIXME: Type coercion of void()* types.
1528 GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
1529}
1530
1531void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
1532 if (Fns.empty()) return;
1533
1534 // Ctor function type is void()*.
1535 llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
1536 llvm::Type *CtorPFTy = llvm::PointerType::get(CtorFTy,
1537 TheModule.getDataLayout().getProgramAddressSpace());
1538
1539 // Get the type of a ctor entry, { i32, void ()*, i8* }.
1540 llvm::StructType *CtorStructTy = llvm::StructType::get(
1541 Int32Ty, CtorPFTy, VoidPtrTy);
1542
1543 // Construct the constructor and destructor arrays.
1544 ConstantInitBuilder builder(*this);
1545 auto ctors = builder.beginArray(CtorStructTy);
1546 for (const auto &I : Fns) {
1547 auto ctor = ctors.beginStruct(CtorStructTy);
1548 ctor.addInt(Int32Ty, I.Priority);
1549 ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
1550 if (I.AssociatedData)
1551 ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
1552 else
1553 ctor.addNullPointer(VoidPtrTy);
1554 ctor.finishAndAddTo(ctors);
1555 }
1556
1557 auto list =
1558 ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
1559 /*constant*/ false,
1560 llvm::GlobalValue::AppendingLinkage);
1561
1562 // The LTO linker doesn't seem to like it when we set an alignment
1563 // on appending variables. Take it off as a workaround.
1564 list->setAlignment(llvm::None);
1565
1566 Fns.clear();
1567}
1568
1569llvm::GlobalValue::LinkageTypes
1570CodeGenModule::getFunctionLinkage(GlobalDecl GD) {
1571 const auto *D = cast<FunctionDecl>(GD.getDecl());
1572
1573 GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
1574
1575 if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(D))
1576 return getCXXABI().getCXXDestructorLinkage(Linkage, Dtor, GD.getDtorType());
1577
1578 if (isa<CXXConstructorDecl>(D) &&
1579 cast<CXXConstructorDecl>(D)->isInheritingConstructor() &&
1580 Context.getTargetInfo().getCXXABI().isMicrosoft()) {
1581 // Our approach to inheriting constructors is fundamentally different from
1582 // that used by the MS ABI, so keep our inheriting constructor thunks
1583 // internal rather than trying to pick an unambiguous mangling for them.
1584 return llvm::GlobalValue::InternalLinkage;
1585 }
1586
1587 return getLLVMLinkageForDeclarator(D, Linkage, /*IsConstantVariable=*/false);
1588}
1589
1590llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
1591 llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
1592 if (!MDS) return nullptr;
1593
1594 return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
1595}
1596
1597void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
1598 const CGFunctionInfo &Info,
1599 llvm::Function *F, bool IsThunk) {
1600 unsigned CallingConv;
1601 llvm::AttributeList PAL;
1602 ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv,
1603 /*AttrOnCallSite=*/false, IsThunk);
1604 F->setAttributes(PAL);
1605 F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1606}
1607
1608static void removeImageAccessQualifier(std::string& TyName) {
1609 std::string ReadOnlyQual("__read_only");
1610 std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
1611 if (ReadOnlyPos != std::string::npos)
1612 // "+ 1" for the space after access qualifier.
1613 TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
1614 else {
1615 std::string WriteOnlyQual("__write_only");
1616 std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
1617 if (WriteOnlyPos != std::string::npos)
1618 TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
1619 else {
1620 std::string ReadWriteQual("__read_write");
1621 std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
1622 if (ReadWritePos != std::string::npos)
1623 TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
1624 }
1625 }
1626}
1627
1628// Returns the address space id that should be produced to the
1629// kernel_arg_addr_space metadata. This is always fixed to the ids
1630// as specified in the SPIR 2.0 specification in order to differentiate
1631// for example in clGetKernelArgInfo() implementation between the address
1632// spaces with targets without unique mapping to the OpenCL address spaces
1633// (basically all single AS CPUs).
1634static unsigned ArgInfoAddressSpace(LangAS AS) {
1635 switch (AS) {
1636 case LangAS::opencl_global:
1637 return 1;
1638 case LangAS::opencl_constant:
1639 return 2;
1640 case LangAS::opencl_local:
1641 return 3;
1642 case LangAS::opencl_generic:
1643 return 4; // Not in SPIR 2.0 specs.
1644 case LangAS::opencl_global_device:
1645 return 5;
1646 case LangAS::opencl_global_host:
1647 return 6;
1648 default:
1649 return 0; // Assume private.
1650 }
1651}
1652
1653void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
1654 const FunctionDecl *FD,
1655 CodeGenFunction *CGF) {
1656 assert(((FD && CGF) || (!FD && !CGF)) &&(static_cast <bool> (((FD && CGF) || (!FD &&
!CGF)) && "Incorrect use - FD and CGF should either be both null or not!"
) ? void (0) : __assert_fail ("((FD && CGF) || (!FD && !CGF)) && \"Incorrect use - FD and CGF should either be both null or not!\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1657, __extension__ __PRETTY_FUNCTION__
))
1657 "Incorrect use - FD and CGF should either be both null or not!")(static_cast <bool> (((FD && CGF) || (!FD &&
!CGF)) && "Incorrect use - FD and CGF should either be both null or not!"
) ? void (0) : __assert_fail ("((FD && CGF) || (!FD && !CGF)) && \"Incorrect use - FD and CGF should either be both null or not!\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 1657, __extension__ __PRETTY_FUNCTION__
))
;
1658 // Create MDNodes that represent the kernel arg metadata.
1659 // Each MDNode is a list in the form of "key", N number of values which is
1660 // the same number of values as their are kernel arguments.
1661
1662 const PrintingPolicy &Policy = Context.getPrintingPolicy();
1663
1664 // MDNode for the kernel argument address space qualifiers.
1665 SmallVector<llvm::Metadata *, 8> addressQuals;
1666
1667 // MDNode for the kernel argument access qualifiers (images only).
1668 SmallVector<llvm::Metadata *, 8> accessQuals;
1669
1670 // MDNode for the kernel argument type names.
1671 SmallVector<llvm::Metadata *, 8> argTypeNames;
1672
1673 // MDNode for the kernel argument base type names.
1674 SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
1675
1676 // MDNode for the kernel argument type qualifiers.
1677 SmallVector<llvm::Metadata *, 8> argTypeQuals;
1678
1679 // MDNode for the kernel argument names.
1680 SmallVector<llvm::Metadata *, 8> argNames;
1681
1682 if (FD && CGF)
1683 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
1684 const ParmVarDecl *parm = FD->getParamDecl(i);
1685 QualType ty = parm->getType();
1686 std::string typeQuals;
1687
1688 // Get image and pipe access qualifier:
1689 if (ty->isImageType() || ty->isPipeType()) {
1690 const Decl *PDecl = parm;
1691 if (auto *TD = dyn_cast<TypedefType>(ty))
1692 PDecl = TD->getDecl();
1693 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
1694 if (A && A->isWriteOnly())
1695 accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
1696 else if (A && A->isReadWrite())
1697 accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
1698 else
1699 accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
1700 } else
1701 accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
1702
1703 // Get argument name.
1704 argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
1705
1706 auto getTypeSpelling = [&](QualType Ty) {
1707 auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
1708
1709 if (Ty.isCanonical()) {
1710 StringRef typeNameRef = typeName;
1711 // Turn "unsigned type" to "utype"
1712 if (typeNameRef.consume_front("unsigned "))
1713 return std::string("u") + typeNameRef.str();
1714 if (typeNameRef.consume_front("signed "))
1715 return typeNameRef.str();
1716 }
1717
1718 return typeName;
1719 };
1720
1721 if (ty->isPointerType()) {
1722 QualType pointeeTy = ty->getPointeeType();
1723
1724 // Get address qualifier.
1725 addressQuals.push_back(
1726 llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(
1727 ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
1728
1729 // Get argument type name.
1730 std::string typeName = getTypeSpelling(pointeeTy) + "*";
1731 std::string baseTypeName =
1732 getTypeSpelling(pointeeTy.getCanonicalType()) + "*";
1733 argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1734 argBaseTypeNames.push_back(
1735 llvm::MDString::get(VMContext, baseTypeName));
1736
1737 // Get argument type qualifiers:
1738 if (ty.isRestrictQualified())
1739 typeQuals = "restrict";
1740 if (pointeeTy.isConstQualified() ||
1741 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
1742 typeQuals += typeQuals.empty() ? "const" : " const";
1743 if (pointeeTy.isVolatileQualified())
1744 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
1745 } else {
1746 uint32_t AddrSpc = 0;
1747 bool isPipe = ty->isPipeType();
1748 if (ty->isImageType() || isPipe)
1749 AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
1750
1751 addressQuals.push_back(
1752 llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
1753
1754 // Get argument type name.
1755 ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty;
1756 std::string typeName = getTypeSpelling(ty);
1757 std::string baseTypeName = getTypeSpelling(ty.getCanonicalType());
1758
1759 // Remove access qualifiers on images
1760 // (as they are inseparable from type in clang implementation,
1761 // but OpenCL spec provides a special query to get access qualifier
1762 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
1763 if (ty->isImageType()) {
1764 removeImageAccessQualifier(typeName);
1765 removeImageAccessQualifier(baseTypeName);
1766 }
1767
1768 argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
1769 argBaseTypeNames.push_back(
1770 llvm::MDString::get(VMContext, baseTypeName));
1771
1772 if (isPipe)
1773 typeQuals = "pipe";
1774 }
1775 argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
1776 }
1777
1778 Fn->setMetadata("kernel_arg_addr_space",
1779 llvm::MDNode::get(VMContext, addressQuals));
1780 Fn->setMetadata("kernel_arg_access_qual",
1781 llvm::MDNode::get(VMContext, accessQuals));
1782 Fn->setMetadata("kernel_arg_type",
1783 llvm::MDNode::get(VMContext, argTypeNames));
1784 Fn->setMetadata("kernel_arg_base_type",
1785 llvm::MDNode::get(VMContext, argBaseTypeNames));
1786 Fn->setMetadata("kernel_arg_type_qual",
1787 llvm::MDNode::get(VMContext, argTypeQuals));
1788 if (getCodeGenOpts().EmitOpenCLArgMetadata)
1789 Fn->setMetadata("kernel_arg_name",
1790 llvm::MDNode::get(VMContext, argNames));
1791}
1792
1793/// Determines whether the language options require us to model
1794/// unwind exceptions. We treat -fexceptions as mandating this
1795/// except under the fragile ObjC ABI with only ObjC exceptions
1796/// enabled. This means, for example, that C with -fexceptions
1797/// enables this.
1798static bool hasUnwindExceptions(const LangOptions &LangOpts) {
1799 // If exceptions are completely disabled, obviously this is false.
1800 if (!LangOpts.Exceptions) return false;
1801
1802 // If C++ exceptions are enabled, this is true.
1803 if (LangOpts.CXXExceptions) return true;
1804
1805 // If ObjC exceptions are enabled, this depends on the ABI.
1806 if (LangOpts.ObjCExceptions) {
1807 return LangOpts.ObjCRuntime.hasUnwindExceptions();
1808 }
1809
1810 return true;
1811}
1812
1813static bool requiresMemberFunctionPointerTypeMetadata(CodeGenModule &CGM,
1814 const CXXMethodDecl *MD) {
1815 // Check that the type metadata can ever actually be used by a call.
1816 if (!CGM.getCodeGenOpts().LTOUnit ||
1817 !CGM.HasHiddenLTOVisibility(MD->getParent()))
1818 return false;
1819
1820 // Only functions whose address can be taken with a member function pointer
1821 // need this sort of type metadata.
1822 return !MD->isStatic() && !MD->isVirtual() && !isa<CXXConstructorDecl>(MD) &&
1823 !isa<CXXDestructorDecl>(MD);
1824}
1825
1826std::vector<const CXXRecordDecl *>
1827CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
1828 llvm::SetVector<const CXXRecordDecl *> MostBases;
1829
1830 std::function<void (const CXXRecordDecl *)> CollectMostBases;
1831 CollectMostBases = [&](const CXXRecordDecl *RD) {
1832 if (RD->getNumBases() == 0)
1833 MostBases.insert(RD);
1834 for (const CXXBaseSpecifier &B : RD->bases())
1835 CollectMostBases(B.getType()->getAsCXXRecordDecl());
1836 };
1837 CollectMostBases(RD);
1838 return MostBases.takeVector();
1839}
1840
1841void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
1842 llvm::Function *F) {
1843 llvm::AttrBuilder B(F->getContext());
1844
1845 if (CodeGenOpts.UnwindTables)
1846 B.addUWTableAttr(llvm::UWTableKind(CodeGenOpts.UnwindTables));
1847
1848 if (CodeGenOpts.StackClashProtector)
1849 B.addAttribute("probe-stack", "inline-asm");
1850
1851 if (!hasUnwindExceptions(LangOpts))
1852 B.addAttribute(llvm::Attribute::NoUnwind);
1853
1854 if (!D || !D->hasAttr<NoStackProtectorAttr>()) {
1855 if (LangOpts.getStackProtector() == LangOptions::SSPOn)
1856 B.addAttribute(llvm::Attribute::StackProtect);
1857 else if (LangOpts.getStackProtector() == LangOptions::SSPStrong)
1858 B.addAttribute(llvm::Attribute::StackProtectStrong);
1859 else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
1860 B.addAttribute(llvm::Attribute::StackProtectReq);
1861 }
1862
1863 if (!D) {
1864 // If we don't have a declaration to control inlining, the function isn't
1865 // explicitly marked as alwaysinline for semantic reasons, and inlining is
1866 // disabled, mark the function as noinline.
1867 if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
1868 CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
1869 B.addAttribute(llvm::Attribute::NoInline);
1870
1871 F->addFnAttrs(B);
1872 return;
1873 }
1874
1875 // Track whether we need to add the optnone LLVM attribute,
1876 // starting with the default for this optimization level.
1877 bool ShouldAddOptNone =
1878 !CodeGenOpts.DisableO0ImplyOptNone && CodeGenOpts.OptimizationLevel == 0;
1879 // We can't add optnone in the following cases, it won't pass the verifier.
1880 ShouldAddOptNone &= !D->hasAttr<MinSizeAttr>();
1881 ShouldAddOptNone &= !D->hasAttr<AlwaysInlineAttr>();
1882
1883 // Add optnone, but do so only if the function isn't always_inline.
1884 if ((ShouldAddOptNone || D->hasAttr<OptimizeNoneAttr>()) &&
1885 !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1886 B.addAttribute(llvm::Attribute::OptimizeNone);
1887
1888 // OptimizeNone implies noinline; we should not be inlining such functions.
1889 B.addAttribute(llvm::Attribute::NoInline);
1890
1891 // We still need to handle naked functions even though optnone subsumes
1892 // much of their semantics.
1893 if (D->hasAttr<NakedAttr>())
1894 B.addAttribute(llvm::Attribute::Naked);
1895
1896 // OptimizeNone wins over OptimizeForSize and MinSize.
1897 F->removeFnAttr(llvm::Attribute::OptimizeForSize);
1898 F->removeFnAttr(llvm::Attribute::MinSize);
1899 } else if (D->hasAttr<NakedAttr>()) {
1900 // Naked implies noinline: we should not be inlining such functions.
1901 B.addAttribute(llvm::Attribute::Naked);
1902 B.addAttribute(llvm::Attribute::NoInline);
1903 } else if (D->hasAttr<NoDuplicateAttr>()) {
1904 B.addAttribute(llvm::Attribute::NoDuplicate);
1905 } else if (D->hasAttr<NoInlineAttr>() && !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1906 // Add noinline if the function isn't always_inline.
1907 B.addAttribute(llvm::Attribute::NoInline);
1908 } else if (D->hasAttr<AlwaysInlineAttr>() &&
1909 !F->hasFnAttribute(llvm::Attribute::NoInline)) {
1910 // (noinline wins over always_inline, and we can't specify both in IR)
1911 B.addAttribute(llvm::Attribute::AlwaysInline);
1912 } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
1913 // If we're not inlining, then force everything that isn't always_inline to
1914 // carry an explicit noinline attribute.
1915 if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline))
1916 B.addAttribute(llvm::Attribute::NoInline);
1917 } else {
1918 // Otherwise, propagate the inline hint attribute and potentially use its
1919 // absence to mark things as noinline.
1920 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1921 // Search function and template pattern redeclarations for inline.
1922 auto CheckForInline = [](const FunctionDecl *FD) {
1923 auto CheckRedeclForInline = [](const FunctionDecl *Redecl) {
1924 return Redecl->isInlineSpecified();
1925 };
1926 if (any_of(FD->redecls(), CheckRedeclForInline))
1927 return true;
1928 const FunctionDecl *Pattern = FD->getTemplateInstantiationPattern();
1929 if (!Pattern)
1930 return false;
1931 return any_of(Pattern->redecls(), CheckRedeclForInline);
1932 };
1933 if (CheckForInline(FD)) {
1934 B.addAttribute(llvm::Attribute::InlineHint);
1935 } else if (CodeGenOpts.getInlining() ==
1936 CodeGenOptions::OnlyHintInlining &&
1937 !FD->isInlined() &&
1938 !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
1939 B.addAttribute(llvm::Attribute::NoInline);
1940 }
1941 }
1942 }
1943
1944 // Add other optimization related attributes if we are optimizing this
1945 // function.
1946 if (!D->hasAttr<OptimizeNoneAttr>()) {
1947 if (D->hasAttr<ColdAttr>()) {
1948 if (!ShouldAddOptNone)
1949 B.addAttribute(llvm::Attribute::OptimizeForSize);
1950 B.addAttribute(llvm::Attribute::Cold);
1951 }
1952 if (D->hasAttr<HotAttr>())
1953 B.addAttribute(llvm::Attribute::Hot);
1954 if (D->hasAttr<MinSizeAttr>())
1955 B.addAttribute(llvm::Attribute::MinSize);
1956 }
1957
1958 F->addFnAttrs(B);
1959
1960 unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
1961 if (alignment)
1962 F->setAlignment(llvm::Align(alignment));
1963
1964 if (!D->hasAttr<AlignedAttr>())
1965 if (LangOpts.FunctionAlignment)
1966 F->setAlignment(llvm::Align(1ull << LangOpts.FunctionAlignment));
1967
1968 // Some C++ ABIs require 2-byte alignment for member functions, in order to
1969 // reserve a bit for differentiating between virtual and non-virtual member
1970 // functions. If the current target's C++ ABI requires this and this is a
1971 // member function, set its alignment accordingly.
1972 if (getTarget().getCXXABI().areMemberFunctionsAligned()) {
1973 if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
1974 F->setAlignment(llvm::Align(2));
1975 }
1976
1977 // In the cross-dso CFI mode with canonical jump tables, we want !type
1978 // attributes on definitions only.
1979 if (CodeGenOpts.SanitizeCfiCrossDso &&
1980 CodeGenOpts.SanitizeCfiCanonicalJumpTables) {
1981 if (auto *FD = dyn_cast<FunctionDecl>(D)) {
1982 // Skip available_externally functions. They won't be codegen'ed in the
1983 // current module anyway.
1984 if (getContext().GetGVALinkageForFunction(FD) != GVA_AvailableExternally)
1985 CreateFunctionTypeMetadataForIcall(FD, F);
1986 }
1987 }
1988
1989 // Emit type metadata on member functions for member function pointer checks.
1990 // These are only ever necessary on definitions; we're guaranteed that the
1991 // definition will be present in the LTO unit as a result of LTO visibility.
1992 auto *MD = dyn_cast<CXXMethodDecl>(D);
1993 if (MD && requiresMemberFunctionPointerTypeMetadata(*this, MD)) {
1994 for (const CXXRecordDecl *Base : getMostBaseClasses(MD->getParent())) {
1995 llvm::Metadata *Id =
1996 CreateMetadataIdentifierForType(Context.getMemberPointerType(
1997 MD->getType(), Context.getRecordType(Base).getTypePtr()));
1998 F->addTypeMetadata(0, Id);
1999 }
2000 }
2001}
2002
2003void CodeGenModule::setLLVMFunctionFEnvAttributes(const FunctionDecl *D,
2004 llvm::Function *F) {
2005 if (D->hasAttr<StrictFPAttr>()) {
2006 llvm::AttrBuilder FuncAttrs(F->getContext());
2007 FuncAttrs.addAttribute("strictfp");
2008 F->addFnAttrs(FuncAttrs);
2009 }
2010}
2011
2012void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
2013 const Decl *D = GD.getDecl();
2014 if (isa_and_nonnull<NamedDecl>(D))
2015 setGVProperties(GV, GD);
2016 else
2017 GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
2018
2019 if (D && D->hasAttr<UsedAttr>())
2020 addUsedOrCompilerUsedGlobal(GV);
2021
2022 if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
2023 const auto *VD = cast<VarDecl>(D);
2024 if (VD->getType().isConstQualified() &&
2025 VD->getStorageDuration() == SD_Static)
2026 addUsedOrCompilerUsedGlobal(GV);
2027 }
2028}
2029
2030bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
2031 llvm::AttrBuilder &Attrs) {
2032 // Add target-cpu and target-features attributes to functions. If
2033 // we have a decl for the function and it has a target attribute then
2034 // parse that and add it to the feature set.
2035 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
2036 StringRef TuneCPU = getTarget().getTargetOpts().TuneCPU;
2037 std::vector<std::string> Features;
2038 const auto *FD = dyn_cast_or_null<FunctionDecl>(GD.getDecl());
2039 FD = FD ? FD->getMostRecentDecl() : FD;
2040 const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
2041 const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
2042 const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr;
2043 bool AddedAttr = false;
2044 if (TD || SD || TC) {
2045 llvm::StringMap<bool> FeatureMap;
2046 getContext().getFunctionFeatureMap(FeatureMap, GD);
2047
2048 // Produce the canonical string for this set of features.
2049 for (const llvm::StringMap<bool>::value_type &Entry : FeatureMap)
2050 Features.push_back((Entry.getValue() ? "+" : "-") + Entry.getKey().str());
2051
2052 // Now add the target-cpu and target-features to the function.
2053 // While we populated the feature map above, we still need to
2054 // get and parse the target attribute so we can get the cpu for
2055 // the function.
2056 if (TD) {
2057 ParsedTargetAttr ParsedAttr = TD->parse();
2058 if (!ParsedAttr.Architecture.empty() &&
2059 getTarget().isValidCPUName(ParsedAttr.Architecture)) {
2060 TargetCPU = ParsedAttr.Architecture;
2061 TuneCPU = ""; // Clear the tune CPU.
2062 }
2063 if (!ParsedAttr.Tune.empty() &&
2064 getTarget().isValidCPUName(ParsedAttr.Tune))
2065 TuneCPU = ParsedAttr.Tune;
2066 }
2067
2068 if (SD) {
2069 // Apply the given CPU name as the 'tune-cpu' so that the optimizer can
2070 // favor this processor.
2071 TuneCPU = getTarget().getCPUSpecificTuneName(
2072 SD->getCPUName(GD.getMultiVersionIndex())->getName());
2073 }
2074 } else {
2075 // Otherwise just add the existing target cpu and target features to the
2076 // function.
2077 Features = getTarget().getTargetOpts().Features;
2078 }
2079
2080 if (!TargetCPU.empty()) {
2081 Attrs.addAttribute("target-cpu", TargetCPU);
2082 AddedAttr = true;
2083 }
2084 if (!TuneCPU.empty()) {
2085 Attrs.addAttribute("tune-cpu", TuneCPU);
2086 AddedAttr = true;
2087 }
2088 if (!Features.empty()) {
2089 llvm::sort(Features);
2090 Attrs.addAttribute("target-features", llvm::join(Features, ","));
2091 AddedAttr = true;
2092 }
2093
2094 return AddedAttr;
2095}
2096
2097void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
2098 llvm::GlobalObject *GO) {
2099 const Decl *D = GD.getDecl();
2100 SetCommonAttributes(GD, GO);
2101
2102 if (D) {
2103 if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
2104 if (D->hasAttr<RetainAttr>())
2105 addUsedGlobal(GV);
2106 if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
2107 GV->addAttribute("bss-section", SA->getName());
2108 if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
2109 GV->addAttribute("data-section", SA->getName());
2110 if (auto *SA = D->getAttr<PragmaClangRodataSectionAttr>())
2111 GV->addAttribute("rodata-section", SA->getName());
2112 if (auto *SA = D->getAttr<PragmaClangRelroSectionAttr>())
2113 GV->addAttribute("relro-section", SA->getName());
2114 }
2115
2116 if (auto *F = dyn_cast<llvm::Function>(GO)) {
2117 if (D->hasAttr<RetainAttr>())
2118 addUsedGlobal(F);
2119 if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
2120 if (!D->getAttr<SectionAttr>())
2121 F->addFnAttr("implicit-section-name", SA->getName());
2122
2123 llvm::AttrBuilder Attrs(F->getContext());
2124 if (GetCPUAndFeaturesAttributes(GD, Attrs)) {
2125 // We know that GetCPUAndFeaturesAttributes will always have the
2126 // newest set, since it has the newest possible FunctionDecl, so the
2127 // new ones should replace the old.
2128 llvm::AttributeMask RemoveAttrs;
2129 RemoveAttrs.addAttribute("target-cpu");
2130 RemoveAttrs.addAttribute("target-features");
2131 RemoveAttrs.addAttribute("tune-cpu");
2132 F->removeFnAttrs(RemoveAttrs);
2133 F->addFnAttrs(Attrs);
2134 }
2135 }
2136
2137 if (const auto *CSA = D->getAttr<CodeSegAttr>())
2138 GO->setSection(CSA->getName());
2139 else if (const auto *SA = D->getAttr<SectionAttr>())
2140 GO->setSection(SA->getName());
2141 }
2142
2143 getTargetCodeGenInfo().setTargetAttributes(D, GO, *this);
2144}
2145
2146void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
2147 llvm::Function *F,
2148 const CGFunctionInfo &FI) {
2149 const Decl *D = GD.getDecl();
2150 SetLLVMFunctionAttributes(GD, FI, F, /*IsThunk=*/false);
2151 SetLLVMFunctionAttributesForDefinition(D, F);
2152
2153 F->setLinkage(llvm::Function::InternalLinkage);
2154
2155 setNonAliasAttributes(GD, F);
2156}
2157
2158static void setLinkageForGV(llvm::GlobalValue *GV, const NamedDecl *ND) {
2159 // Set linkage and visibility in case we never see a definition.
2160 LinkageInfo LV = ND->getLinkageAndVisibility();
2161 // Don't set internal linkage on declarations.
2162 // "extern_weak" is overloaded in LLVM; we probably should have
2163 // separate linkage types for this.
2164 if (isExternallyVisible(LV.getLinkage()) &&
2165 (ND->hasAttr<WeakAttr>() || ND->isWeakImported()))
2166 GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
2167}
2168
2169void CodeGenModule::CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
2170 llvm::Function *F) {
2171 // Only if we are checking indirect calls.
2172 if (!LangOpts.Sanitize.has(SanitizerKind::CFIICall))
2173 return;
2174
2175 // Non-static class methods are handled via vtable or member function pointer
2176 // checks elsewhere.
2177 if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
2178 return;
2179
2180 llvm::Metadata *MD = CreateMetadataIdentifierForType(FD->getType());
2181 F->addTypeMetadata(0, MD);
2182 F->addTypeMetadata(0, CreateMetadataIdentifierGeneralized(FD->getType()));
2183
2184 // Emit a hash-based bit set entry for cross-DSO calls.
2185 if (CodeGenOpts.SanitizeCfiCrossDso)
2186 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
2187 F->addTypeMetadata(0, llvm::ConstantAsMetadata::get(CrossDsoTypeId));
2188}
2189
2190void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
2191 bool IsIncompleteFunction,
2192 bool IsThunk) {
2193
2194 if (llvm::Intrinsic::ID IID = F->getIntrinsicID()) {
2195 // If this is an intrinsic function, set the function's attributes
2196 // to the intrinsic's attributes.
2197 F->setAttributes(llvm::Intrinsic::getAttributes(getLLVMContext(), IID));
2198 return;
2199 }
2200
2201 const auto *FD = cast<FunctionDecl>(GD.getDecl());
2202
2203 if (!IsIncompleteFunction)
2204 SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F,
2205 IsThunk);
2206
2207 // Add the Returned attribute for "this", except for iOS 5 and earlier
2208 // where substantial code, including the libstdc++ dylib, was compiled with
2209 // GCC and does not actually return "this".
2210 if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
2211 !(getTriple().isiOS() && getTriple().isOSVersionLT(6))) {
2212 assert(!F->arg_empty() &&(static_cast <bool> (!F->arg_empty() && F->
arg_begin()->getType() ->canLosslesslyBitCastTo(F->getReturnType
()) && "unexpected this return") ? void (0) : __assert_fail
("!F->arg_empty() && F->arg_begin()->getType() ->canLosslesslyBitCastTo(F->getReturnType()) && \"unexpected this return\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2215, __extension__ __PRETTY_FUNCTION__
))
2213 F->arg_begin()->getType()(static_cast <bool> (!F->arg_empty() && F->
arg_begin()->getType() ->canLosslesslyBitCastTo(F->getReturnType
()) && "unexpected this return") ? void (0) : __assert_fail
("!F->arg_empty() && F->arg_begin()->getType() ->canLosslesslyBitCastTo(F->getReturnType()) && \"unexpected this return\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2215, __extension__ __PRETTY_FUNCTION__
))
2214 ->canLosslesslyBitCastTo(F->getReturnType()) &&(static_cast <bool> (!F->arg_empty() && F->
arg_begin()->getType() ->canLosslesslyBitCastTo(F->getReturnType
()) && "unexpected this return") ? void (0) : __assert_fail
("!F->arg_empty() && F->arg_begin()->getType() ->canLosslesslyBitCastTo(F->getReturnType()) && \"unexpected this return\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2215, __extension__ __PRETTY_FUNCTION__
))
2215 "unexpected this return")(static_cast <bool> (!F->arg_empty() && F->
arg_begin()->getType() ->canLosslesslyBitCastTo(F->getReturnType
()) && "unexpected this return") ? void (0) : __assert_fail
("!F->arg_empty() && F->arg_begin()->getType() ->canLosslesslyBitCastTo(F->getReturnType()) && \"unexpected this return\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2215, __extension__ __PRETTY_FUNCTION__
))
;
2216 F->addParamAttr(0, llvm::Attribute::Returned);
2217 }
2218
2219 // Only a few attributes are set on declarations; these may later be
2220 // overridden by a definition.
2221
2222 setLinkageForGV(F, FD);
2223 setGVProperties(F, FD);
2224
2225 // Setup target-specific attributes.
2226 if (!IsIncompleteFunction && F->isDeclaration())
2227 getTargetCodeGenInfo().setTargetAttributes(FD, F, *this);
2228
2229 if (const auto *CSA = FD->getAttr<CodeSegAttr>())
2230 F->setSection(CSA->getName());
2231 else if (const auto *SA = FD->getAttr<SectionAttr>())
2232 F->setSection(SA->getName());
2233
2234 if (const auto *EA = FD->getAttr<ErrorAttr>()) {
2235 if (EA->isError())
2236 F->addFnAttr("dontcall-error", EA->getUserDiagnostic());
2237 else if (EA->isWarning())
2238 F->addFnAttr("dontcall-warn", EA->getUserDiagnostic());
2239 }
2240
2241 // If we plan on emitting this inline builtin, we can't treat it as a builtin.
2242 if (FD->isInlineBuiltinDeclaration()) {
2243 const FunctionDecl *FDBody;
2244 bool HasBody = FD->hasBody(FDBody);
2245 (void)HasBody;
2246 assert(HasBody && "Inline builtin declarations should always have an "(static_cast <bool> (HasBody && "Inline builtin declarations should always have an "
"available body!") ? void (0) : __assert_fail ("HasBody && \"Inline builtin declarations should always have an \" \"available body!\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2247, __extension__ __PRETTY_FUNCTION__
))
2247 "available body!")(static_cast <bool> (HasBody && "Inline builtin declarations should always have an "
"available body!") ? void (0) : __assert_fail ("HasBody && \"Inline builtin declarations should always have an \" \"available body!\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2247, __extension__ __PRETTY_FUNCTION__
))
;
2248 if (shouldEmitFunction(FDBody))
2249 F->addFnAttr(llvm::Attribute::NoBuiltin);
2250 }
2251
2252 if (FD->isReplaceableGlobalAllocationFunction()) {
2253 // A replaceable global allocation function does not act like a builtin by
2254 // default, only if it is invoked by a new-expression or delete-expression.
2255 F->addFnAttr(llvm::Attribute::NoBuiltin);
2256 }
2257
2258 if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD))
2259 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2260 else if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
2261 if (MD->isVirtual())
2262 F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2263
2264 // Don't emit entries for function declarations in the cross-DSO mode. This
2265 // is handled with better precision by the receiving DSO. But if jump tables
2266 // are non-canonical then we need type metadata in order to produce the local
2267 // jump table.
2268 if (!CodeGenOpts.SanitizeCfiCrossDso ||
2269 !CodeGenOpts.SanitizeCfiCanonicalJumpTables)
2270 CreateFunctionTypeMetadataForIcall(FD, F);
2271
2272 if (getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
2273 getOpenMPRuntime().emitDeclareSimdFunction(FD, F);
2274
2275 if (const auto *CB = FD->getAttr<CallbackAttr>()) {
2276 // Annotate the callback behavior as metadata:
2277 // - The callback callee (as argument number).
2278 // - The callback payloads (as argument numbers).
2279 llvm::LLVMContext &Ctx = F->getContext();
2280 llvm::MDBuilder MDB(Ctx);
2281
2282 // The payload indices are all but the first one in the encoding. The first
2283 // identifies the callback callee.
2284 int CalleeIdx = *CB->encoding_begin();
2285 ArrayRef<int> PayloadIndices(CB->encoding_begin() + 1, CB->encoding_end());
2286 F->addMetadata(llvm::LLVMContext::MD_callback,
2287 *llvm::MDNode::get(Ctx, {MDB.createCallbackEncoding(
2288 CalleeIdx, PayloadIndices,
2289 /* VarArgsArePassed */ false)}));
2290 }
2291}
2292
2293void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
2294 assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&(static_cast <bool> ((isa<llvm::Function>(GV) || !
GV->isDeclaration()) && "Only globals with definition can force usage."
) ? void (0) : __assert_fail ("(isa<llvm::Function>(GV) || !GV->isDeclaration()) && \"Only globals with definition can force usage.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2295, __extension__ __PRETTY_FUNCTION__
))
2295 "Only globals with definition can force usage.")(static_cast <bool> ((isa<llvm::Function>(GV) || !
GV->isDeclaration()) && "Only globals with definition can force usage."
) ? void (0) : __assert_fail ("(isa<llvm::Function>(GV) || !GV->isDeclaration()) && \"Only globals with definition can force usage.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2295, __extension__ __PRETTY_FUNCTION__
))
;
2296 LLVMUsed.emplace_back(GV);
2297}
2298
2299void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
2300 assert(!GV->isDeclaration() &&(static_cast <bool> (!GV->isDeclaration() &&
"Only globals with definition can force usage.") ? void (0) :
__assert_fail ("!GV->isDeclaration() && \"Only globals with definition can force usage.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2301, __extension__ __PRETTY_FUNCTION__
))
2301 "Only globals with definition can force usage.")(static_cast <bool> (!GV->isDeclaration() &&
"Only globals with definition can force usage.") ? void (0) :
__assert_fail ("!GV->isDeclaration() && \"Only globals with definition can force usage.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2301, __extension__ __PRETTY_FUNCTION__
))
;
2302 LLVMCompilerUsed.emplace_back(GV);
2303}
2304
2305void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) {
2306 assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&(static_cast <bool> ((isa<llvm::Function>(GV) || !
GV->isDeclaration()) && "Only globals with definition can force usage."
) ? void (0) : __assert_fail ("(isa<llvm::Function>(GV) || !GV->isDeclaration()) && \"Only globals with definition can force usage.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2307, __extension__ __PRETTY_FUNCTION__
))
2307 "Only globals with definition can force usage.")(static_cast <bool> ((isa<llvm::Function>(GV) || !
GV->isDeclaration()) && "Only globals with definition can force usage."
) ? void (0) : __assert_fail ("(isa<llvm::Function>(GV) || !GV->isDeclaration()) && \"Only globals with definition can force usage.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2307, __extension__ __PRETTY_FUNCTION__
))
;
2308 if (getTriple().isOSBinFormatELF())
2309 LLVMCompilerUsed.emplace_back(GV);
2310 else
2311 LLVMUsed.emplace_back(GV);
2312}
2313
2314static void emitUsed(CodeGenModule &CGM, StringRef Name,
2315 std::vector<llvm::WeakTrackingVH> &List) {
2316 // Don't create llvm.used if there is no need.
2317 if (List.empty())
2318 return;
2319
2320 // Convert List to what ConstantArray needs.
2321 SmallVector<llvm::Constant*, 8> UsedArray;
2322 UsedArray.resize(List.size());
2323 for (unsigned i = 0, e = List.size(); i != e; ++i) {
2324 UsedArray[i] =
2325 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
2326 cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
2327 }
2328
2329 if (UsedArray.empty())
2330 return;
2331 llvm::ArrayType *ATy = llvm::ArrayType::get(CGM.Int8PtrTy, UsedArray.size());
2332
2333 auto *GV = new llvm::GlobalVariable(
2334 CGM.getModule(), ATy, false, llvm::GlobalValue::AppendingLinkage,
2335 llvm::ConstantArray::get(ATy, UsedArray), Name);
2336
2337 GV->setSection("llvm.metadata");
2338}
2339
2340void CodeGenModule::emitLLVMUsed() {
2341 emitUsed(*this, "llvm.used", LLVMUsed);
2342 emitUsed(*this, "llvm.compiler.used", LLVMCompilerUsed);
2343}
2344
2345void CodeGenModule::AppendLinkerOptions(StringRef Opts) {
2346 auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opts);
2347 LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2348}
2349
2350void CodeGenModule::AddDetectMismatch(StringRef Name, StringRef Value) {
2351 llvm::SmallString<32> Opt;
2352 getTargetCodeGenInfo().getDetectMismatchOption(Name, Value, Opt);
2353 if (Opt.empty())
2354 return;
2355 auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2356 LinkerOptionsMetadata.push_back(llvm::MDNode::get(getLLVMContext(), MDOpts));
2357}
2358
2359void CodeGenModule::AddDependentLib(StringRef Lib) {
2360 auto &C = getLLVMContext();
2361 if (getTarget().getTriple().isOSBinFormatELF()) {
2362 ELFDependentLibraries.push_back(
2363 llvm::MDNode::get(C, llvm::MDString::get(C, Lib)));
2364 return;
2365 }
2366
2367 llvm::SmallString<24> Opt;
2368 getTargetCodeGenInfo().getDependentLibraryOption(Lib, Opt);
2369 auto *MDOpts = llvm::MDString::get(getLLVMContext(), Opt);
2370 LinkerOptionsMetadata.push_back(llvm::MDNode::get(C, MDOpts));
2371}
2372
2373/// Add link options implied by the given module, including modules
2374/// it depends on, using a postorder walk.
2375static void addLinkOptionsPostorder(CodeGenModule &CGM, Module *Mod,
2376 SmallVectorImpl<llvm::MDNode *> &Metadata,
2377 llvm::SmallPtrSet<Module *, 16> &Visited) {
2378 // Import this module's parent.
2379 if (Mod->Parent && Visited.insert(Mod->Parent).second) {
2380 addLinkOptionsPostorder(CGM, Mod->Parent, Metadata, Visited);
2381 }
2382
2383 // Import this module's dependencies.
2384 for (Module *Import : llvm::reverse(Mod->Imports)) {
2385 if (Visited.insert(Import).second)
2386 addLinkOptionsPostorder(CGM, Import, Metadata, Visited);
2387 }
2388
2389 // Add linker options to link against the libraries/frameworks
2390 // described by this module.
2391 llvm::LLVMContext &Context = CGM.getLLVMContext();
2392 bool IsELF = CGM.getTarget().getTriple().isOSBinFormatELF();
2393
2394 // For modules that use export_as for linking, use that module
2395 // name instead.
2396 if (Mod->UseExportAsModuleLinkName)
2397 return;
2398
2399 for (const Module::LinkLibrary &LL : llvm::reverse(Mod->LinkLibraries)) {
2400 // Link against a framework. Frameworks are currently Darwin only, so we
2401 // don't to ask TargetCodeGenInfo for the spelling of the linker option.
2402 if (LL.IsFramework) {
2403 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"),
2404 llvm::MDString::get(Context, LL.Library)};
2405
2406 Metadata.push_back(llvm::MDNode::get(Context, Args));
2407 continue;
2408 }
2409
2410 // Link against a library.
2411 if (IsELF) {
2412 llvm::Metadata *Args[2] = {
2413 llvm::MDString::get(Context, "lib"),
2414 llvm::MDString::get(Context, LL.Library),
2415 };
2416 Metadata.push_back(llvm::MDNode::get(Context, Args));
2417 } else {
2418 llvm::SmallString<24> Opt;
2419 CGM.getTargetCodeGenInfo().getDependentLibraryOption(LL.Library, Opt);
2420 auto *OptString = llvm::MDString::get(Context, Opt);
2421 Metadata.push_back(llvm::MDNode::get(Context, OptString));
2422 }
2423 }
2424}
2425
2426void CodeGenModule::EmitModuleLinkOptions() {
2427 // Collect the set of all of the modules we want to visit to emit link
2428 // options, which is essentially the imported modules and all of their
2429 // non-explicit child modules.
2430 llvm::SetVector<clang::Module *> LinkModules;
2431 llvm::SmallPtrSet<clang::Module *, 16> Visited;
2432 SmallVector<clang::Module *, 16> Stack;
2433
2434 // Seed the stack with imported modules.
2435 for (Module *M : ImportedModules) {
2436 // Do not add any link flags when an implementation TU of a module imports
2437 // a header of that same module.
2438 if (M->getTopLevelModuleName() == getLangOpts().CurrentModule &&
2439 !getLangOpts().isCompilingModule())
2440 continue;
2441 if (Visited.insert(M).second)
2442 Stack.push_back(M);
2443 }
2444
2445 // Find all of the modules to import, making a little effort to prune
2446 // non-leaf modules.
2447 while (!Stack.empty()) {
2448 clang::Module *Mod = Stack.pop_back_val();
2449
2450 bool AnyChildren = false;
2451
2452 // Visit the submodules of this module.
2453 for (const auto &SM : Mod->submodules()) {
2454 // Skip explicit children; they need to be explicitly imported to be
2455 // linked against.
2456 if (SM->IsExplicit)
2457 continue;
2458
2459 if (Visited.insert(SM).second) {
2460 Stack.push_back(SM);
2461 AnyChildren = true;
2462 }
2463 }
2464
2465 // We didn't find any children, so add this module to the list of
2466 // modules to link against.
2467 if (!AnyChildren) {
2468 LinkModules.insert(Mod);
2469 }
2470 }
2471
2472 // Add link options for all of the imported modules in reverse topological
2473 // order. We don't do anything to try to order import link flags with respect
2474 // to linker options inserted by things like #pragma comment().
2475 SmallVector<llvm::MDNode *, 16> MetadataArgs;
2476 Visited.clear();
2477 for (Module *M : LinkModules)
2478 if (Visited.insert(M).second)
2479 addLinkOptionsPostorder(*this, M, MetadataArgs, Visited);
2480 std::reverse(MetadataArgs.begin(), MetadataArgs.end());
2481 LinkerOptionsMetadata.append(MetadataArgs.begin(), MetadataArgs.end());
2482
2483 // Add the linker options metadata flag.
2484 auto *NMD = getModule().getOrInsertNamedMetadata("llvm.linker.options");
2485 for (auto *MD : LinkerOptionsMetadata)
2486 NMD->addOperand(MD);
2487}
2488
2489void CodeGenModule::EmitDeferred() {
2490 // Emit deferred declare target declarations.
2491 if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
2492 getOpenMPRuntime().emitDeferredTargetDecls();
2493
2494 // Emit code for any potentially referenced deferred decls. Since a
2495 // previously unused static decl may become used during the generation of code
2496 // for a static function, iterate until no changes are made.
2497
2498 if (!DeferredVTables.empty()) {
2499 EmitDeferredVTables();
2500
2501 // Emitting a vtable doesn't directly cause more vtables to
2502 // become deferred, although it can cause functions to be
2503 // emitted that then need those vtables.
2504 assert(DeferredVTables.empty())(static_cast <bool> (DeferredVTables.empty()) ? void (0
) : __assert_fail ("DeferredVTables.empty()", "clang/lib/CodeGen/CodeGenModule.cpp"
, 2504, __extension__ __PRETTY_FUNCTION__))
;
2505 }
2506
2507 // Emit CUDA/HIP static device variables referenced by host code only.
2508 // Note we should not clear CUDADeviceVarODRUsedByHost since it is still
2509 // needed for further handling.
2510 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
2511 llvm::append_range(DeferredDeclsToEmit,
2512 getContext().CUDADeviceVarODRUsedByHost);
2513
2514 // Stop if we're out of both deferred vtables and deferred declarations.
2515 if (DeferredDeclsToEmit.empty())
2516 return;
2517
2518 // Grab the list of decls to emit. If EmitGlobalDefinition schedules more
2519 // work, it will not interfere with this.
2520 std::vector<GlobalDecl> CurDeclsToEmit;
2521 CurDeclsToEmit.swap(DeferredDeclsToEmit);
2522
2523 for (GlobalDecl &D : CurDeclsToEmit) {
2524 // We should call GetAddrOfGlobal with IsForDefinition set to true in order
2525 // to get GlobalValue with exactly the type we need, not something that
2526 // might had been created for another decl with the same mangled name but
2527 // different type.
2528 llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
2529 GetAddrOfGlobal(D, ForDefinition));
2530
2531 // In case of different address spaces, we may still get a cast, even with
2532 // IsForDefinition equal to true. Query mangled names table to get
2533 // GlobalValue.
2534 if (!GV)
2535 GV = GetGlobalValue(getMangledName(D));
2536
2537 // Make sure GetGlobalValue returned non-null.
2538 assert(GV)(static_cast <bool> (GV) ? void (0) : __assert_fail ("GV"
, "clang/lib/CodeGen/CodeGenModule.cpp", 2538, __extension__ __PRETTY_FUNCTION__
))
;
2539
2540 // Check to see if we've already emitted this. This is necessary
2541 // for a couple of reasons: first, decls can end up in the
2542 // deferred-decls queue multiple times, and second, decls can end
2543 // up with definitions in unusual ways (e.g. by an extern inline
2544 // function acquiring a strong function redefinition). Just
2545 // ignore these cases.
2546 if (!GV->isDeclaration())
2547 continue;
2548
2549 // If this is OpenMP, check if it is legal to emit this global normally.
2550 if (LangOpts.OpenMP && OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(D))
2551 continue;
2552
2553 // Otherwise, emit the definition and move on to the next one.
2554 EmitGlobalDefinition(D, GV);
2555
2556 // If we found out that we need to emit more decls, do that recursively.
2557 // This has the advantage that the decls are emitted in a DFS and related
2558 // ones are close together, which is convenient for testing.
2559 if (!DeferredVTables.empty() || !DeferredDeclsToEmit.empty()) {
2560 EmitDeferred();
2561 assert(DeferredVTables.empty() && DeferredDeclsToEmit.empty())(static_cast <bool> (DeferredVTables.empty() &&
DeferredDeclsToEmit.empty()) ? void (0) : __assert_fail ("DeferredVTables.empty() && DeferredDeclsToEmit.empty()"
, "clang/lib/CodeGen/CodeGenModule.cpp", 2561, __extension__ __PRETTY_FUNCTION__
))
;
2562 }
2563 }
2564}
2565
2566void CodeGenModule::EmitVTablesOpportunistically() {
2567 // Try to emit external vtables as available_externally if they have emitted
2568 // all inlined virtual functions. It runs after EmitDeferred() and therefore
2569 // is not allowed to create new references to things that need to be emitted
2570 // lazily. Note that it also uses fact that we eagerly emitting RTTI.
2571
2572 assert((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables())(static_cast <bool> ((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables
()) && "Only emit opportunistic vtables with optimizations"
) ? void (0) : __assert_fail ("(OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables()) && \"Only emit opportunistic vtables with optimizations\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2573, __extension__ __PRETTY_FUNCTION__
))
2573 && "Only emit opportunistic vtables with optimizations")(static_cast <bool> ((OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables
()) && "Only emit opportunistic vtables with optimizations"
) ? void (0) : __assert_fail ("(OpportunisticVTables.empty() || shouldOpportunisticallyEmitVTables()) && \"Only emit opportunistic vtables with optimizations\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2573, __extension__ __PRETTY_FUNCTION__
))
;
2574
2575 for (const CXXRecordDecl *RD : OpportunisticVTables) {
2576 assert(getVTables().isVTableExternal(RD) &&(static_cast <bool> (getVTables().isVTableExternal(RD) &&
"This queue should only contain external vtables") ? void (0
) : __assert_fail ("getVTables().isVTableExternal(RD) && \"This queue should only contain external vtables\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2577, __extension__ __PRETTY_FUNCTION__
))
2577 "This queue should only contain external vtables")(static_cast <bool> (getVTables().isVTableExternal(RD) &&
"This queue should only contain external vtables") ? void (0
) : __assert_fail ("getVTables().isVTableExternal(RD) && \"This queue should only contain external vtables\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2577, __extension__ __PRETTY_FUNCTION__
))
;
2578 if (getCXXABI().canSpeculativelyEmitVTable(RD))
2579 VTables.GenerateClassData(RD);
2580 }
2581 OpportunisticVTables.clear();
2582}
2583
2584void CodeGenModule::EmitGlobalAnnotations() {
2585 if (Annotations.empty())
2586 return;
2587
2588 // Create a new global variable for the ConstantStruct in the Module.
2589 llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
2590 Annotations[0]->getType(), Annotations.size()), Annotations);
2591 auto *gv = new llvm::GlobalVariable(getModule(), Array->getType(), false,
2592 llvm::GlobalValue::AppendingLinkage,
2593 Array, "llvm.global.annotations");
2594 gv->setSection(AnnotationSection);
2595}
2596
2597llvm::Constant *CodeGenModule::EmitAnnotationString(StringRef Str) {
2598 llvm::Constant *&AStr = AnnotationStrings[Str];
2599 if (AStr)
2600 return AStr;
2601
2602 // Not found yet, create a new global.
2603 llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
2604 auto *gv =
2605 new llvm::GlobalVariable(getModule(), s->getType(), true,
2606 llvm::GlobalValue::PrivateLinkage, s, ".str");
2607 gv->setSection(AnnotationSection);
2608 gv->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2609 AStr = gv;
2610 return gv;
2611}
2612
2613llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
2614 SourceManager &SM = getContext().getSourceManager();
2615 PresumedLoc PLoc = SM.getPresumedLoc(Loc);
2616 if (PLoc.isValid())
2617 return EmitAnnotationString(PLoc.getFilename());
2618 return EmitAnnotationString(SM.getBufferName(Loc));
2619}
2620
2621llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
2622 SourceManager &SM = getContext().getSourceManager();
2623 PresumedLoc PLoc = SM.getPresumedLoc(L);
2624 unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
2625 SM.getExpansionLineNumber(L);
2626 return llvm::ConstantInt::get(Int32Ty, LineNo);
2627}
2628
2629llvm::Constant *CodeGenModule::EmitAnnotationArgs(const AnnotateAttr *Attr) {
2630 ArrayRef<Expr *> Exprs = {Attr->args_begin(), Attr->args_size()};
2631 if (Exprs.empty())
2632 return llvm::ConstantPointerNull::get(GlobalsInt8PtrTy);
2633
2634 llvm::FoldingSetNodeID ID;
2635 for (Expr *E : Exprs) {
2636 ID.Add(cast<clang::ConstantExpr>(E)->getAPValueResult());
2637 }
2638 llvm::Constant *&Lookup = AnnotationArgs[ID.ComputeHash()];
2639 if (Lookup)
2640 return Lookup;
2641
2642 llvm::SmallVector<llvm::Constant *, 4> LLVMArgs;
2643 LLVMArgs.reserve(Exprs.size());
2644 ConstantEmitter ConstEmiter(*this);
2645 llvm::transform(Exprs, std::back_inserter(LLVMArgs), [&](const Expr *E) {
2646 const auto *CE = cast<clang::ConstantExpr>(E);
2647 return ConstEmiter.emitAbstract(CE->getBeginLoc(), CE->getAPValueResult(),
2648 CE->getType());
2649 });
2650 auto *Struct = llvm::ConstantStruct::getAnon(LLVMArgs);
2651 auto *GV = new llvm::GlobalVariable(getModule(), Struct->getType(), true,
2652 llvm::GlobalValue::PrivateLinkage, Struct,
2653 ".args");
2654 GV->setSection(AnnotationSection);
2655 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2656 auto *Bitcasted = llvm::ConstantExpr::getBitCast(GV, GlobalsInt8PtrTy);
2657
2658 Lookup = Bitcasted;
2659 return Bitcasted;
2660}
2661
2662llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
2663 const AnnotateAttr *AA,
2664 SourceLocation L) {
2665 // Get the globals for file name, annotation, and the line number.
2666 llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
2667 *UnitGV = EmitAnnotationUnit(L),
2668 *LineNoCst = EmitAnnotationLineNo(L),
2669 *Args = EmitAnnotationArgs(AA);
2670
2671 llvm::Constant *GVInGlobalsAS = GV;
2672 if (GV->getAddressSpace() !=
2673 getDataLayout().getDefaultGlobalsAddressSpace()) {
2674 GVInGlobalsAS = llvm::ConstantExpr::getAddrSpaceCast(
2675 GV, GV->getValueType()->getPointerTo(
2676 getDataLayout().getDefaultGlobalsAddressSpace()));
2677 }
2678
2679 // Create the ConstantStruct for the global annotation.
2680 llvm::Constant *Fields[] = {
2681 llvm::ConstantExpr::getBitCast(GVInGlobalsAS, GlobalsInt8PtrTy),
2682 llvm::ConstantExpr::getBitCast(AnnoGV, GlobalsInt8PtrTy),
2683 llvm::ConstantExpr::getBitCast(UnitGV, GlobalsInt8PtrTy),
2684 LineNoCst,
2685 Args,
2686 };
2687 return llvm::ConstantStruct::getAnon(Fields);
2688}
2689
2690void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
2691 llvm::GlobalValue *GV) {
2692 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute")(static_cast <bool> (D->hasAttr<AnnotateAttr>(
) && "no annotate attribute") ? void (0) : __assert_fail
("D->hasAttr<AnnotateAttr>() && \"no annotate attribute\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 2692, __extension__ __PRETTY_FUNCTION__
))
;
2693 // Get the struct elements for these annotations.
2694 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2695 Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
2696}
2697
2698bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
2699 SourceLocation Loc) const {
2700 const auto &NoSanitizeL = getContext().getNoSanitizeList();
2701 // NoSanitize by function name.
2702 if (NoSanitizeL.containsFunction(Kind, Fn->getName()))
2703 return true;
2704 // NoSanitize by location.
2705 if (Loc.isValid())
2706 return NoSanitizeL.containsLocation(Kind, Loc);
2707 // If location is unknown, this may be a compiler-generated function. Assume
2708 // it's located in the main file.
2709 auto &SM = Context.getSourceManager();
2710 if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
2711 return NoSanitizeL.containsFile(Kind, MainFile->getName());
2712 }
2713 return false;
2714}
2715
2716bool CodeGenModule::isInNoSanitizeList(llvm::GlobalVariable *GV,
2717 SourceLocation Loc, QualType Ty,
2718 StringRef Category) const {
2719 // For now globals can be ignored only in ASan and KASan.
2720 const SanitizerMask EnabledAsanMask =
2721 LangOpts.Sanitize.Mask &
2722 (SanitizerKind::Address | SanitizerKind::KernelAddress |
2723 SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
2724 SanitizerKind::MemTag);
2725 if (!EnabledAsanMask)
2726 return false;
2727 const auto &NoSanitizeL = getContext().getNoSanitizeList();
2728 if (NoSanitizeL.containsGlobal(EnabledAsanMask, GV->getName(), Category))
2729 return true;
2730 if (NoSanitizeL.containsLocation(EnabledAsanMask, Loc, Category))
2731 return true;
2732 // Check global type.
2733 if (!Ty.isNull()) {
2734 // Drill down the array types: if global variable of a fixed type is
2735 // not sanitized, we also don't instrument arrays of them.
2736 while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
2737 Ty = AT->getElementType();
2738 Ty = Ty.getCanonicalType().getUnqualifiedType();
2739 // Only record types (classes, structs etc.) are ignored.
2740 if (Ty->isRecordType()) {
2741 std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
2742 if (NoSanitizeL.containsType(EnabledAsanMask, TypeStr, Category))
2743 return true;
2744 }
2745 }
2746 return false;
2747}
2748
2749bool CodeGenModule::imbueXRayAttrs(llvm::Function *Fn, SourceLocation Loc,
2750 StringRef Category) const {
2751 const auto &XRayFilter = getContext().getXRayFilter();
2752 using ImbueAttr = XRayFunctionFilter::ImbueAttribute;
2753 auto Attr = ImbueAttr::NONE;
2754 if (Loc.isValid())
2755 Attr = XRayFilter.shouldImbueLocation(Loc, Category);
2756 if (Attr == ImbueAttr::NONE)
2757 Attr = XRayFilter.shouldImbueFunction(Fn->getName());
2758 switch (Attr) {
2759 case ImbueAttr::NONE:
2760 return false;
2761 case ImbueAttr::ALWAYS:
2762 Fn->addFnAttr("function-instrument", "xray-always");
2763 break;
2764 case ImbueAttr::ALWAYS_ARG1:
2765 Fn->addFnAttr("function-instrument", "xray-always");
2766 Fn->addFnAttr("xray-log-args", "1");
2767 break;
2768 case ImbueAttr::NEVER:
2769 Fn->addFnAttr("function-instrument", "xray-never");
2770 break;
2771 }
2772 return true;
2773}
2774
2775bool CodeGenModule::isProfileInstrExcluded(llvm::Function *Fn,
2776 SourceLocation Loc) const {
2777 const auto &ProfileList = getContext().getProfileList();
2778 // If the profile list is empty, then instrument everything.
2779 if (ProfileList.isEmpty())
2780 return false;
2781 CodeGenOptions::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr();
2782 // First, check the function name.
2783 Optional<bool> V = ProfileList.isFunctionExcluded(Fn->getName(), Kind);
2784 if (V.hasValue())
2785 return *V;
2786 // Next, check the source location.
2787 if (Loc.isValid()) {
2788 Optional<bool> V = ProfileList.isLocationExcluded(Loc, Kind);
2789 if (V.hasValue())
2790 return *V;
2791 }
2792 // If location is unknown, this may be a compiler-generated function. Assume
2793 // it's located in the main file.
2794 auto &SM = Context.getSourceManager();
2795 if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
2796 Optional<bool> V = ProfileList.isFileExcluded(MainFile->getName(), Kind);
2797 if (V.hasValue())
2798 return *V;
2799 }
2800 return ProfileList.getDefault();
2801}
2802
2803bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
2804 // Never defer when EmitAllDecls is specified.
2805 if (LangOpts.EmitAllDecls)
2806 return true;
2807
2808 if (CodeGenOpts.KeepStaticConsts) {
2809 const auto *VD = dyn_cast<VarDecl>(Global);
2810 if (VD && VD->getType().isConstQualified() &&
2811 VD->getStorageDuration() == SD_Static)
2812 return true;
2813 }
2814
2815 return getContext().DeclMustBeEmitted(Global);
2816}
2817
2818bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
2819 // In OpenMP 5.0 variables and function may be marked as
2820 // device_type(host/nohost) and we should not emit them eagerly unless we sure
2821 // that they must be emitted on the host/device. To be sure we need to have
2822 // seen a declare target with an explicit mentioning of the function, we know
2823 // we have if the level of the declare target attribute is -1. Note that we
2824 // check somewhere else if we should emit this at all.
2825 if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) {
2826 llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
2827 OMPDeclareTargetDeclAttr::getActiveAttr(Global);
2828 if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1)
2829 return false;
2830 }
2831
2832 if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
2833 if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
2834 // Implicit template instantiations may change linkage if they are later
2835 // explicitly instantiated, so they should not be emitted eagerly.
2836 return false;
2837 }
2838 if (const auto *VD = dyn_cast<VarDecl>(Global))
2839 if (Context.getInlineVariableDefinitionKind(VD) ==
2840 ASTContext::InlineVariableDefinitionKind::WeakUnknown)
2841 // A definition of an inline constexpr static data member may change
2842 // linkage later if it's redeclared outside the class.
2843 return false;
2844 // If OpenMP is enabled and threadprivates must be generated like TLS, delay
2845 // codegen for global variables, because they may be marked as threadprivate.
2846 if (LangOpts.OpenMP && LangOpts.OpenMPUseTLS &&
2847 getContext().getTargetInfo().isTLSSupported() && isa<VarDecl>(Global) &&
2848 !isTypeConstant(Global->getType(), false) &&
2849 !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(Global))
2850 return false;
2851
2852 return true;
2853}
2854
2855ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
2856 StringRef Name = getMangledName(GD);
2857
2858 // The UUID descriptor should be pointer aligned.
2859 CharUnits Alignment = CharUnits::fromQuantity(PointerAlignInBytes);
2860
2861 // Look for an existing global.
2862 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
2863 return ConstantAddress(GV, GV->getValueType(), Alignment);
2864
2865 ConstantEmitter Emitter(*this);
2866 llvm::Constant *Init;
2867
2868 APValue &V = GD->getAsAPValue();
2869 if (!V.isAbsent()) {
2870 // If possible, emit the APValue version of the initializer. In particular,
2871 // this gets the type of the constant right.
2872 Init = Emitter.emitForInitializer(
2873 GD->getAsAPValue(), GD->getType().getAddressSpace(), GD->getType());
2874 } else {
2875 // As a fallback, directly construct the constant.
2876 // FIXME: This may get padding wrong under esoteric struct layout rules.
2877 // MSVC appears to create a complete type 'struct __s_GUID' that it
2878 // presumably uses to represent these constants.
2879 MSGuidDecl::Parts Parts = GD->getParts();
2880 llvm::Constant *Fields[4] = {
2881 llvm::ConstantInt::get(Int32Ty, Parts.Part1),
2882 llvm::ConstantInt::get(Int16Ty, Parts.Part2),
2883 llvm::ConstantInt::get(Int16Ty, Parts.Part3),
2884 llvm::ConstantDataArray::getRaw(
2885 StringRef(reinterpret_cast<char *>(Parts.Part4And5), 8), 8,
2886 Int8Ty)};
2887 Init = llvm::ConstantStruct::getAnon(Fields);
2888 }
2889
2890 auto *GV = new llvm::GlobalVariable(
2891 getModule(), Init->getType(),
2892 /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
2893 if (supportsCOMDAT())
2894 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
2895 setDSOLocal(GV);
2896
2897 if (!V.isAbsent()) {
2898 Emitter.finalize(GV);
2899 return ConstantAddress(GV, GV->getValueType(), Alignment);
2900 }
2901
2902 llvm::Type *Ty = getTypes().ConvertTypeForMem(GD->getType());
2903 llvm::Constant *Addr = llvm::ConstantExpr::getBitCast(
2904 GV, Ty->getPointerTo(GV->getAddressSpace()));
2905 return ConstantAddress(Addr, Ty, Alignment);
2906}
2907
2908ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
2909 const TemplateParamObjectDecl *TPO) {
2910 StringRef Name = getMangledName(TPO);
2911 CharUnits Alignment = getNaturalTypeAlignment(TPO->getType());
2912
2913 if (llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name))
2914 return ConstantAddress(GV, GV->getValueType(), Alignment);
2915
2916 ConstantEmitter Emitter(*this);
2917 llvm::Constant *Init = Emitter.emitForInitializer(
2918 TPO->getValue(), TPO->getType().getAddressSpace(), TPO->getType());
2919
2920 if (!Init) {
2921 ErrorUnsupported(TPO, "template parameter object");
2922 return ConstantAddress::invalid();
2923 }
2924
2925 auto *GV = new llvm::GlobalVariable(
2926 getModule(), Init->getType(),
2927 /*isConstant=*/true, llvm::GlobalValue::LinkOnceODRLinkage, Init, Name);
2928 if (supportsCOMDAT())
2929 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
2930 Emitter.finalize(GV);
2931
2932 return ConstantAddress(GV, GV->getValueType(), Alignment);
2933}
2934
2935ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
2936 const AliasAttr *AA = VD->getAttr<AliasAttr>();
2937 assert(AA && "No alias?")(static_cast <bool> (AA && "No alias?") ? void (
0) : __assert_fail ("AA && \"No alias?\"", "clang/lib/CodeGen/CodeGenModule.cpp"
, 2937, __extension__ __PRETTY_FUNCTION__))
;
2938
2939 CharUnits Alignment = getContext().getDeclAlign(VD);
2940 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
2941
2942 // See if there is already something with the target's name in the module.
2943 llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
2944 if (Entry) {
2945 unsigned AS = getContext().getTargetAddressSpace(VD->getType());
2946 auto Ptr = llvm::ConstantExpr::getBitCast(Entry, DeclTy->getPointerTo(AS));
2947 return ConstantAddress(Ptr, DeclTy, Alignment);
2948 }
2949
2950 llvm::Constant *Aliasee;
2951 if (isa<llvm::FunctionType>(DeclTy))
2952 Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy,
2953 GlobalDecl(cast<FunctionDecl>(VD)),
2954 /*ForVTable=*/false);
2955 else
2956 Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
2957 nullptr);
2958
2959 auto *F = cast<llvm::GlobalValue>(Aliasee);
2960 F->setLinkage(llvm::Function::ExternalWeakLinkage);
2961 WeakRefReferences.insert(F);
2962
2963 return ConstantAddress(Aliasee, DeclTy, Alignment);
2964}
2965
2966void CodeGenModule::EmitGlobal(GlobalDecl GD) {
2967 const auto *Global = cast<ValueDecl>(GD.getDecl());
2968
2969 // Weak references don't produce any output by themselves.
2970 if (Global->hasAttr<WeakRefAttr>())
2971 return;
2972
2973 // If this is an alias definition (which otherwise looks like a declaration)
2974 // emit it now.
2975 if (Global->hasAttr<AliasAttr>())
2976 return EmitAliasDefinition(GD);
2977
2978 // IFunc like an alias whose value is resolved at runtime by calling resolver.
2979 if (Global->hasAttr<IFuncAttr>())
2980 return emitIFuncDefinition(GD);
2981
2982 // If this is a cpu_dispatch multiversion function, emit the resolver.
2983 if (Global->hasAttr<CPUDispatchAttr>())
2984 return emitCPUDispatchDefinition(GD);
2985
2986 // If this is CUDA, be selective about which declarations we emit.
2987 if (LangOpts.CUDA) {
2988 if (LangOpts.CUDAIsDevice) {
2989 if (!Global->hasAttr<CUDADeviceAttr>() &&
2990 !Global->hasAttr<CUDAGlobalAttr>() &&
2991 !Global->hasAttr<CUDAConstantAttr>() &&
2992 !Global->hasAttr<CUDASharedAttr>() &&
2993 !Global->getType()->isCUDADeviceBuiltinSurfaceType() &&
2994 !Global->getType()->isCUDADeviceBuiltinTextureType())
2995 return;
2996 } else {
2997 // We need to emit host-side 'shadows' for all global
2998 // device-side variables because the CUDA runtime needs their
2999 // size and host-side address in order to provide access to
3000 // their device-side incarnations.
3001
3002 // So device-only functions are the only things we skip.
3003 if (isa<FunctionDecl>(Global) && !Global->hasAttr<CUDAHostAttr>() &&
3004 Global->hasAttr<CUDADeviceAttr>())
3005 return;
3006
3007 assert((isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) &&(static_cast <bool> ((isa<FunctionDecl>(Global) ||
isa<VarDecl>(Global)) && "Expected Variable or Function"
) ? void (0) : __assert_fail ("(isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) && \"Expected Variable or Function\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3008, __extension__ __PRETTY_FUNCTION__
))
3008 "Expected Variable or Function")(static_cast <bool> ((isa<FunctionDecl>(Global) ||
isa<VarDecl>(Global)) && "Expected Variable or Function"
) ? void (0) : __assert_fail ("(isa<FunctionDecl>(Global) || isa<VarDecl>(Global)) && \"Expected Variable or Function\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3008, __extension__ __PRETTY_FUNCTION__
))
;
3009 }
3010 }
3011
3012 if (LangOpts.OpenMP) {
3013 // If this is OpenMP, check if it is legal to emit this global normally.
3014 if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
3015 return;
3016 if (auto *DRD = dyn_cast<OMPDeclareReductionDecl>(Global)) {
3017 if (MustBeEmitted(Global))
3018 EmitOMPDeclareReduction(DRD);
3019 return;
3020 } else if (auto *DMD = dyn_cast<OMPDeclareMapperDecl>(Global)) {
3021 if (MustBeEmitted(Global))
3022 EmitOMPDeclareMapper(DMD);
3023 return;
3024 }
3025 }
3026
3027 // Ignore declarations, they will be emitted on their first use.
3028 if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
3029 // Forward declarations are emitted lazily on first use.
3030 if (!FD->doesThisDeclarationHaveABody()) {
3031 if (!FD->doesDeclarationForceExternallyVisibleDefinition())
3032 return;
3033
3034 StringRef MangledName = getMangledName(GD);
3035
3036 // Compute the function info and LLVM type.
3037 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
3038 llvm::Type *Ty = getTypes().GetFunctionType(FI);
3039
3040 GetOrCreateLLVMFunction(MangledName, Ty, GD, /*ForVTable=*/false,
3041 /*DontDefer=*/false);
3042 return;
3043 }
3044 } else {
3045 const auto *VD = cast<VarDecl>(Global);
3046 assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.")(static_cast <bool> (VD->isFileVarDecl() && "Cannot emit local var decl as global."
) ? void (0) : __assert_fail ("VD->isFileVarDecl() && \"Cannot emit local var decl as global.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3046, __extension__ __PRETTY_FUNCTION__
))
;
3047 if (VD->isThisDeclarationADefinition() != VarDecl::Definition &&
3048 !Context.isMSStaticDataMemberInlineDefinition(VD)) {
3049 if (LangOpts.OpenMP) {
3050 // Emit declaration of the must-be-emitted declare target variable.
3051 if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
3052 OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
3053 bool UnifiedMemoryEnabled =
3054 getOpenMPRuntime().hasRequiresUnifiedSharedMemory();
3055 if (*Res == OMPDeclareTargetDeclAttr::MT_To &&
3056 !UnifiedMemoryEnabled) {
3057 (void)GetAddrOfGlobalVar(VD);
3058 } else {
3059 assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||(static_cast <bool> (((*Res == OMPDeclareTargetDeclAttr
::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To &&
UnifiedMemoryEnabled)) && "Link clause or to clause with unified memory expected."
) ? void (0) : __assert_fail ("((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && UnifiedMemoryEnabled)) && \"Link clause or to clause with unified memory expected.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3062, __extension__ __PRETTY_FUNCTION__
))
3060 (*Res == OMPDeclareTargetDeclAttr::MT_To &&(static_cast <bool> (((*Res == OMPDeclareTargetDeclAttr
::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To &&
UnifiedMemoryEnabled)) && "Link clause or to clause with unified memory expected."
) ? void (0) : __assert_fail ("((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && UnifiedMemoryEnabled)) && \"Link clause or to clause with unified memory expected.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3062, __extension__ __PRETTY_FUNCTION__
))
3061 UnifiedMemoryEnabled)) &&(static_cast <bool> (((*Res == OMPDeclareTargetDeclAttr
::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To &&
UnifiedMemoryEnabled)) && "Link clause or to clause with unified memory expected."
) ? void (0) : __assert_fail ("((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && UnifiedMemoryEnabled)) && \"Link clause or to clause with unified memory expected.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3062, __extension__ __PRETTY_FUNCTION__
))
3062 "Link clause or to clause with unified memory expected.")(static_cast <bool> (((*Res == OMPDeclareTargetDeclAttr
::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To &&
UnifiedMemoryEnabled)) && "Link clause or to clause with unified memory expected."
) ? void (0) : __assert_fail ("((*Res == OMPDeclareTargetDeclAttr::MT_Link) || (*Res == OMPDeclareTargetDeclAttr::MT_To && UnifiedMemoryEnabled)) && \"Link clause or to clause with unified memory expected.\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3062, __extension__ __PRETTY_FUNCTION__
))
;
3063 (void)getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
3064 }
3065
3066 return;
3067 }
3068 }
3069 // If this declaration may have caused an inline variable definition to
3070 // change linkage, make sure that it's emitted.
3071 if (Context.getInlineVariableDefinitionKind(VD) ==
3072 ASTContext::InlineVariableDefinitionKind::Strong)
3073 GetAddrOfGlobalVar(VD);
3074 return;
3075 }
3076 }
3077
3078 // Defer code generation to first use when possible, e.g. if this is an inline
3079 // function. If the global must always be emitted, do it eagerly if possible
3080 // to benefit from cache locality.
3081 if (MustBeEmitted(Global) && MayBeEmittedEagerly(Global)) {
3082 // Emit the definition if it can't be deferred.
3083 EmitGlobalDefinition(GD);
3084 return;
3085 }
3086
3087 // If we're deferring emission of a C++ variable with an
3088 // initializer, remember the order in which it appeared in the file.
3089 if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
3090 cast<VarDecl>(Global)->hasInit()) {
3091 DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
3092 CXXGlobalInits.push_back(nullptr);
3093 }
3094
3095 StringRef MangledName = getMangledName(GD);
3096 if (GetGlobalValue(MangledName) != nullptr) {
3097 // The value has already been used and should therefore be emitted.
3098 addDeferredDeclToEmit(GD);
3099 } else if (MustBeEmitted(Global)) {
3100 // The value must be emitted, but cannot be emitted eagerly.
3101 assert(!MayBeEmittedEagerly(Global))(static_cast <bool> (!MayBeEmittedEagerly(Global)) ? void
(0) : __assert_fail ("!MayBeEmittedEagerly(Global)", "clang/lib/CodeGen/CodeGenModule.cpp"
, 3101, __extension__ __PRETTY_FUNCTION__))
;
3102 addDeferredDeclToEmit(GD);
3103 } else {
3104 // Otherwise, remember that we saw a deferred decl with this name. The
3105 // first use of the mangled name will cause it to move into
3106 // DeferredDeclsToEmit.
3107 DeferredDecls[MangledName] = GD;
3108 }
3109}
3110
3111// Check if T is a class type with a destructor that's not dllimport.
3112static bool HasNonDllImportDtor(QualType T) {
3113 if (const auto *RT = T->getBaseElementTypeUnsafe()->getAs<RecordType>())
3114 if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
3115 if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
3116 return true;
3117
3118 return false;
3119}
3120
3121namespace {
3122 struct FunctionIsDirectlyRecursive
3123 : public ConstStmtVisitor<FunctionIsDirectlyRecursive, bool> {
3124 const StringRef Name;
3125 const Builtin::Context &BI;
3126 FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C)
3127 : Name(N), BI(C) {}
3128
3129 bool VisitCallExpr(const CallExpr *E) {
3130 const FunctionDecl *FD = E->getDirectCallee();
3131 if (!FD)
3132 return false;
3133 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
3134 if (Attr && Name == Attr->getLabel())
3135 return true;
3136 unsigned BuiltinID = FD->getBuiltinID();
3137 if (!BuiltinID || !BI.isLibFunction(BuiltinID))
3138 return false;
3139 StringRef BuiltinName = BI.getName(BuiltinID);
3140 if (BuiltinName.startswith("__builtin_") &&
3141 Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
3142 return true;
3143 }
3144 return false;
3145 }
3146
3147 bool VisitStmt(const Stmt *S) {
3148 for (const Stmt *Child : S->children())
3149 if (Child && this->Visit(Child))
3150 return true;
3151 return false;
3152 }
3153 };
3154
3155 // Make sure we're not referencing non-imported vars or functions.
3156 struct DLLImportFunctionVisitor
3157 : public RecursiveASTVisitor<DLLImportFunctionVisitor> {
3158 bool SafeToInline = true;
3159
3160 bool shouldVisitImplicitCode() const { return true; }
3161
3162 bool VisitVarDecl(VarDecl *VD) {
3163 if (VD->getTLSKind()) {
3164 // A thread-local variable cannot be imported.
3165 SafeToInline = false;
3166 return SafeToInline;
3167 }
3168
3169 // A variable definition might imply a destructor call.
3170 if (VD->isThisDeclarationADefinition())
3171 SafeToInline = !HasNonDllImportDtor(VD->getType());
3172
3173 return SafeToInline;
3174 }
3175
3176 bool VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
3177 if (const auto *D = E->getTemporary()->getDestructor())
3178 SafeToInline = D->hasAttr<DLLImportAttr>();
3179 return SafeToInline;
3180 }
3181
3182 bool VisitDeclRefExpr(DeclRefExpr *E) {
3183 ValueDecl *VD = E->getDecl();
3184 if (isa<FunctionDecl>(VD))
3185 SafeToInline = VD->hasAttr<DLLImportAttr>();
3186 else if (VarDecl *V = dyn_cast<VarDecl>(VD))
3187 SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
3188 return SafeToInline;
3189 }
3190
3191 bool VisitCXXConstructExpr(CXXConstructExpr *E) {
3192 SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>();
3193 return SafeToInline;
3194 }
3195
3196 bool VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
3197 CXXMethodDecl *M = E->getMethodDecl();
3198 if (!M) {
3199 // Call through a pointer to member function. This is safe to inline.
3200 SafeToInline = true;
3201 } else {
3202 SafeToInline = M->hasAttr<DLLImportAttr>();
3203 }
3204 return SafeToInline;
3205 }
3206
3207 bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
3208 SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
3209 return SafeToInline;
3210 }
3211
3212 bool VisitCXXNewExpr(CXXNewExpr *E) {
3213 SafeToInline = E->getOperatorNew()->hasAttr<DLLImportAttr>();
3214 return SafeToInline;
3215 }
3216 };
3217}
3218
3219// isTriviallyRecursive - Check if this function calls another
3220// decl that, because of the asm attribute or the other decl being a builtin,
3221// ends up pointing to itself.
3222bool
3223CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
3224 StringRef Name;
3225 if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
3226 // asm labels are a special kind of mangling we have to support.
3227 AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
3228 if (!Attr)
3229 return false;
3230 Name = Attr->getLabel();
3231 } else {
3232 Name = FD->getName();
3233 }
3234
3235 FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
3236 const Stmt *Body = FD->getBody();
3237 return Body ? Walker.Visit(Body) : false;
3238}
3239
3240bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
3241 if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
3242 return true;
3243 const auto *F = cast<FunctionDecl>(GD.getDecl());
3244 if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
3245 return false;
3246
3247 if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) {
3248 // Check whether it would be safe to inline this dllimport function.
3249 DLLImportFunctionVisitor Visitor;
3250 Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
3251 if (!Visitor.SafeToInline)
3252 return false;
3253
3254 if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(F)) {
3255 // Implicit destructor invocations aren't captured in the AST, so the
3256 // check above can't see them. Check for them manually here.
3257 for (const Decl *Member : Dtor->getParent()->decls())
3258 if (isa<FieldDecl>(Member))
3259 if (HasNonDllImportDtor(cast<FieldDecl>(Member)->getType()))
3260 return false;
3261 for (const CXXBaseSpecifier &B : Dtor->getParent()->bases())
3262 if (HasNonDllImportDtor(B.getType()))
3263 return false;
3264 }
3265 }
3266
3267 // Inline builtins declaration must be emitted. They often are fortified
3268 // functions.
3269 if (F->isInlineBuiltinDeclaration())
3270 return true;
3271
3272 // PR9614. Avoid cases where the source code is lying to us. An available
3273 // externally function should have an equivalent function somewhere else,
3274 // but a function that calls itself through asm label/`__builtin_` trickery is
3275 // clearly not equivalent to the real implementation.
3276 // This happens in glibc's btowc and in some configure checks.
3277 return !isTriviallyRecursive(F);
3278}
3279
3280bool CodeGenModule::shouldOpportunisticallyEmitVTables() {
3281 return CodeGenOpts.OptimizationLevel > 0;
3282}
3283
3284void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
3285 llvm::GlobalValue *GV) {
3286 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3287
3288 if (FD->isCPUSpecificMultiVersion()) {
3289 auto *Spec = FD->getAttr<CPUSpecificAttr>();
3290 for (unsigned I = 0; I < Spec->cpus_size(); ++I)
3291 EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
3292 // Requires multiple emits.
3293 } else if (FD->isTargetClonesMultiVersion()) {
3294 auto *Clone = FD->getAttr<TargetClonesAttr>();
3295 for (unsigned I = 0; I < Clone->featuresStrs_size(); ++I)
3296 if (Clone->isFirstOfVersion(I))
3297 EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
3298 EmitTargetClonesResolver(GD);
3299 } else
3300 EmitGlobalFunctionDefinition(GD, GV);
3301}
3302
3303void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD, llvm::GlobalValue *GV) {
3304 const auto *D = cast<ValueDecl>(GD.getDecl());
3305
3306 PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
3307 Context.getSourceManager(),
3308 "Generating code for declaration");
3309
3310 if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
3311 // At -O0, don't generate IR for functions with available_externally
3312 // linkage.
3313 if (!shouldEmitFunction(GD))
3314 return;
3315
3316 llvm::TimeTraceScope TimeScope("CodeGen Function", [&]() {
3317 std::string Name;
3318 llvm::raw_string_ostream OS(Name);
3319 FD->getNameForDiagnostic(OS, getContext().getPrintingPolicy(),
3320 /*Qualified=*/true);
3321 return Name;
3322 });
3323
3324 if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
3325 // Make sure to emit the definition(s) before we emit the thunks.
3326 // This is necessary for the generation of certain thunks.
3327 if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
3328 ABI->emitCXXStructor(GD);
3329 else if (FD->isMultiVersion())
3330 EmitMultiVersionFunctionDefinition(GD, GV);
3331 else
3332 EmitGlobalFunctionDefinition(GD, GV);
3333
3334 if (Method->isVirtual())
3335 getVTables().EmitThunks(GD);
3336
3337 return;
3338 }
3339
3340 if (FD->isMultiVersion())
3341 return EmitMultiVersionFunctionDefinition(GD, GV);
3342 return EmitGlobalFunctionDefinition(GD, GV);
3343 }
3344
3345 if (const auto *VD = dyn_cast<VarDecl>(D))
3346 return EmitGlobalVarDefinition(VD, !VD->hasDefinition());
3347
3348 llvm_unreachable("Invalid argument to EmitGlobalDefinition()")::llvm::llvm_unreachable_internal("Invalid argument to EmitGlobalDefinition()"
, "clang/lib/CodeGen/CodeGenModule.cpp", 3348)
;
3349}
3350
3351static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
3352 llvm::Function *NewFn);
3353
3354static unsigned
3355TargetMVPriority(const TargetInfo &TI,
3356 const CodeGenFunction::MultiVersionResolverOption &RO) {
3357 unsigned Priority = 0;
3358 for (StringRef Feat : RO.Conditions.Features)
3359 Priority = std::max(Priority, TI.multiVersionSortPriority(Feat));
3360
3361 if (!RO.Conditions.Architecture.empty())
3362 Priority = std::max(
3363 Priority, TI.multiVersionSortPriority(RO.Conditions.Architecture));
3364 return Priority;
3365}
3366
3367// Multiversion functions should be at most 'WeakODRLinkage' so that a different
3368// TU can forward declare the function without causing problems. Particularly
3369// in the cases of CPUDispatch, this causes issues. This also makes sure we
3370// work with internal linkage functions, so that the same function name can be
3371// used with internal linkage in multiple TUs.
3372llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
3373 GlobalDecl GD) {
3374 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3375 if (FD->getFormalLinkage() == InternalLinkage)
3376 return llvm::GlobalValue::InternalLinkage;
3377 return llvm::GlobalValue::WeakODRLinkage;
3378}
3379
3380void CodeGenModule::EmitTargetClonesResolver(GlobalDecl GD) {
3381 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3382 assert(FD && "Not a FunctionDecl?")(static_cast <bool> (FD && "Not a FunctionDecl?"
) ? void (0) : __assert_fail ("FD && \"Not a FunctionDecl?\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3382, __extension__ __PRETTY_FUNCTION__
))
;
3383 const auto *TC = FD->getAttr<TargetClonesAttr>();
3384 assert(TC && "Not a target_clones Function?")(static_cast <bool> (TC && "Not a target_clones Function?"
) ? void (0) : __assert_fail ("TC && \"Not a target_clones Function?\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3384, __extension__ __PRETTY_FUNCTION__
))
;
3385
3386 QualType CanonTy = Context.getCanonicalType(FD->getType());
3387 llvm::Type *DeclTy = getTypes().ConvertType(CanonTy);
3388
3389 if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
3390 const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
3391 DeclTy = getTypes().GetFunctionType(FInfo);
3392 }
3393
3394 llvm::Function *ResolverFunc;
3395 if (getTarget().supportsIFunc()) {
3396 auto *IFunc = cast<llvm::GlobalIFunc>(
3397 GetOrCreateMultiVersionResolver(GD, DeclTy, FD));
3398 ResolverFunc = cast<llvm::Function>(IFunc->getResolver());
3399 } else
3400 ResolverFunc =
3401 cast<llvm::Function>(GetOrCreateMultiVersionResolver(GD, DeclTy, FD));
3402
3403 SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
3404 for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
3405 ++VersionIndex) {
3406 if (!TC->isFirstOfVersion(VersionIndex))
3407 continue;
3408 StringRef Version = TC->getFeatureStr(VersionIndex);
3409 StringRef MangledName =
3410 getMangledName(GD.getWithMultiVersionIndex(VersionIndex));
3411 llvm::Constant *Func = GetGlobalValue(MangledName);
3412 assert(Func &&(static_cast <bool> (Func && "Should have already been created before calling resolver emit"
) ? void (0) : __assert_fail ("Func && \"Should have already been created before calling resolver emit\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3413, __extension__ __PRETTY_FUNCTION__
))
3413 "Should have already been created before calling resolver emit")(static_cast <bool> (Func && "Should have already been created before calling resolver emit"
) ? void (0) : __assert_fail ("Func && \"Should have already been created before calling resolver emit\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3413, __extension__ __PRETTY_FUNCTION__
))
;
3414
3415 StringRef Architecture;
3416 llvm::SmallVector<StringRef, 1> Feature;
3417
3418 if (Version.startswith("arch="))
3419 Architecture = Version.drop_front(sizeof("arch=") - 1);
3420 else if (Version != "default")
3421 Feature.push_back(Version);
3422
3423 Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
3424 }
3425
3426 const TargetInfo &TI = getTarget();
3427 std::stable_sort(
3428 Options.begin(), Options.end(),
3429 [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
3430 const CodeGenFunction::MultiVersionResolverOption &RHS) {
3431 return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
3432 });
3433 CodeGenFunction CGF(*this);
3434 CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3435}
3436
3437void CodeGenModule::emitMultiVersionFunctions() {
3438 std::vector<GlobalDecl> MVFuncsToEmit;
3439 MultiVersionFuncs.swap(MVFuncsToEmit);
3440 for (GlobalDecl GD : MVFuncsToEmit) {
3441 SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
3442 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
3443 getContext().forEachMultiversionedFunctionVersion(
3444 FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
3445 GlobalDecl CurGD{
3446 (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
3447 StringRef MangledName = getMangledName(CurGD);
3448 llvm::Constant *Func = GetGlobalValue(MangledName);
3449 if (!Func) {
3450 if (CurFD->isDefined()) {
3451 EmitGlobalFunctionDefinition(CurGD, nullptr);
3452 Func = GetGlobalValue(MangledName);
3453 } else {
3454 const CGFunctionInfo &FI =
3455 getTypes().arrangeGlobalDeclaration(GD);
3456 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
3457 Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
3458 /*DontDefer=*/false, ForDefinition);
3459 }
3460 assert(Func && "This should have just been created")(static_cast <bool> (Func && "This should have just been created"
) ? void (0) : __assert_fail ("Func && \"This should have just been created\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3460, __extension__ __PRETTY_FUNCTION__
))
;
3461 }
3462
3463 const auto *TA = CurFD->getAttr<TargetAttr>();
3464 llvm::SmallVector<StringRef, 8> Feats;
3465 TA->getAddedFeatures(Feats);
3466
3467 Options.emplace_back(cast<llvm::Function>(Func),
3468 TA->getArchitecture(), Feats);
3469 });
3470
3471 llvm::Function *ResolverFunc;
3472 const TargetInfo &TI = getTarget();
3473
3474 if (TI.supportsIFunc() || FD->isTargetMultiVersion()) {
3475 ResolverFunc = cast<llvm::Function>(
3476 GetGlobalValue((getMangledName(GD) + ".resolver").str()));
3477 ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
3478 } else {
3479 ResolverFunc = cast<llvm::Function>(GetGlobalValue(getMangledName(GD)));
3480 }
3481
3482 if (supportsCOMDAT())
3483 ResolverFunc->setComdat(
3484 getModule().getOrInsertComdat(ResolverFunc->getName()));
3485
3486 llvm::stable_sort(
3487 Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
3488 const CodeGenFunction::MultiVersionResolverOption &RHS) {
3489 return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
3490 });
3491 CodeGenFunction CGF(*this);
3492 CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3493 }
3494
3495 // Ensure that any additions to the deferred decls list caused by emitting a
3496 // variant are emitted. This can happen when the variant itself is inline and
3497 // calls a function without linkage.
3498 if (!MVFuncsToEmit.empty())
3499 EmitDeferred();
3500
3501 // Ensure that any additions to the multiversion funcs list from either the
3502 // deferred decls or the multiversion functions themselves are emitted.
3503 if (!MultiVersionFuncs.empty())
3504 emitMultiVersionFunctions();
3505}
3506
3507void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
3508 const auto *FD = cast<FunctionDecl>(GD.getDecl());
3509 assert(FD && "Not a FunctionDecl?")(static_cast <bool> (FD && "Not a FunctionDecl?"
) ? void (0) : __assert_fail ("FD && \"Not a FunctionDecl?\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3509, __extension__ __PRETTY_FUNCTION__
))
;
3510 assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?")(static_cast <bool> (FD->isCPUDispatchMultiVersion()
&& "Not a multiversion function?") ? void (0) : __assert_fail
("FD->isCPUDispatchMultiVersion() && \"Not a multiversion function?\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3510, __extension__ __PRETTY_FUNCTION__
))
;
3511 const auto *DD = FD->getAttr<CPUDispatchAttr>();
3512 assert(DD && "Not a cpu_dispatch Function?")(static_cast <bool> (DD && "Not a cpu_dispatch Function?"
) ? void (0) : __assert_fail ("DD && \"Not a cpu_dispatch Function?\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3512, __extension__ __PRETTY_FUNCTION__
))
;
3513 llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
3514
3515 if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
3516 const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
3517 DeclTy = getTypes().GetFunctionType(FInfo);
3518 }
3519
3520 StringRef ResolverName = getMangledName(GD);
3521 UpdateMultiVersionNames(GD, FD, ResolverName);
3522
3523 llvm::Type *ResolverType;
3524 GlobalDecl ResolverGD;
3525 if (getTarget().supportsIFunc()) {
3526 ResolverType = llvm::FunctionType::get(
3527 llvm::PointerType::get(DeclTy,
3528 Context.getTargetAddressSpace(FD->getType())),
3529 false);
3530 }
3531 else {
3532 ResolverType = DeclTy;
3533 ResolverGD = GD;
3534 }
3535
3536 auto *ResolverFunc = cast<llvm::Function>(GetOrCreateLLVMFunction(
3537 ResolverName, ResolverType, ResolverGD, /*ForVTable=*/false));
3538 ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
3539 if (supportsCOMDAT())
3540 ResolverFunc->setComdat(
3541 getModule().getOrInsertComdat(ResolverFunc->getName()));
3542
3543 SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
3544 const TargetInfo &Target = getTarget();
3545 unsigned Index = 0;
3546 for (const IdentifierInfo *II : DD->cpus()) {
3547 // Get the name of the target function so we can look it up/create it.
3548 std::string MangledName = getMangledNameImpl(*this, GD, FD, true) +
3549 getCPUSpecificMangling(*this, II->getName());
3550
3551 llvm::Constant *Func = GetGlobalValue(MangledName);
3552
3553 if (!Func) {
3554 GlobalDecl ExistingDecl = Manglings.lookup(MangledName);
3555 if (ExistingDecl.getDecl() &&
3556 ExistingDecl.getDecl()->getAsFunction()->isDefined()) {
3557 EmitGlobalFunctionDefinition(ExistingDecl, nullptr);
3558 Func = GetGlobalValue(MangledName);
3559 } else {
3560 if (!ExistingDecl.getDecl())
3561 ExistingDecl = GD.getWithMultiVersionIndex(Index);
3562
3563 Func = GetOrCreateLLVMFunction(
3564 MangledName, DeclTy, ExistingDecl,
3565 /*ForVTable=*/false, /*DontDefer=*/true,
3566 /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
3567 }
3568 }
3569
3570 llvm::SmallVector<StringRef, 32> Features;
3571 Target.getCPUSpecificCPUDispatchFeatures(II->getName(), Features);
3572 llvm::transform(Features, Features.begin(),
3573 [](StringRef Str) { return Str.substr(1); });
3574 llvm::erase_if(Features, [&Target](StringRef Feat) {
3575 return !Target.validateCpuSupports(Feat);
3576 });
3577 Options.emplace_back(cast<llvm::Function>(Func), StringRef{}, Features);
3578 ++Index;
3579 }
3580
3581 llvm::stable_sort(
3582 Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
3583 const CodeGenFunction::MultiVersionResolverOption &RHS) {
3584 return llvm::X86::getCpuSupportsMask(LHS.Conditions.Features) >
3585 llvm::X86::getCpuSupportsMask(RHS.Conditions.Features);
3586 });
3587
3588 // If the list contains multiple 'default' versions, such as when it contains
3589 // 'pentium' and 'generic', don't emit the call to the generic one (since we
3590 // always run on at least a 'pentium'). We do this by deleting the 'least
3591 // advanced' (read, lowest mangling letter).
3592 while (Options.size() > 1 &&
3593 llvm::X86::getCpuSupportsMask(
3594 (Options.end() - 2)->Conditions.Features) == 0) {
3595 StringRef LHSName = (Options.end() - 2)->Function->getName();
3596 StringRef RHSName = (Options.end() - 1)->Function->getName();
3597 if (LHSName.compare(RHSName) < 0)
3598 Options.erase(Options.end() - 2);
3599 else
3600 Options.erase(Options.end() - 1);
3601 }
3602
3603 CodeGenFunction CGF(*this);
3604 CGF.EmitMultiVersionResolver(ResolverFunc, Options);
3605
3606 if (getTarget().supportsIFunc()) {
3607 llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(*this, GD);
3608 auto *IFunc = cast<llvm::GlobalValue>(
3609 GetOrCreateMultiVersionResolver(GD, DeclTy, FD));
3610
3611 // Fix up function declarations that were created for cpu_specific before
3612 // cpu_dispatch was known
3613 if (!dyn_cast<llvm::GlobalIFunc>(IFunc)) {
3614 assert(cast<llvm::Function>(IFunc)->isDeclaration())(static_cast <bool> (cast<llvm::Function>(IFunc)->
isDeclaration()) ? void (0) : __assert_fail ("cast<llvm::Function>(IFunc)->isDeclaration()"
, "clang/lib/CodeGen/CodeGenModule.cpp", 3614, __extension__ __PRETTY_FUNCTION__
))
;
3615 auto *GI = llvm::GlobalIFunc::create(DeclTy, 0, Linkage, "", ResolverFunc,
3616 &getModule());
3617 GI->takeName(IFunc);
3618 IFunc->replaceAllUsesWith(GI);
3619 IFunc->eraseFromParent();
3620 IFunc = GI;
3621 }
3622
3623 std::string AliasName = getMangledNameImpl(
3624 *this, GD, FD, /*OmitMultiVersionMangling=*/true);
3625 llvm::Constant *AliasFunc = GetGlobalValue(AliasName);
3626 if (!AliasFunc) {
3627 auto *GA = llvm::GlobalAlias::create(DeclTy, 0, Linkage, AliasName, IFunc,
3628 &getModule());
3629 SetCommonAttributes(GD, GA);
3630 }
3631 }
3632}
3633
3634/// If a dispatcher for the specified mangled name is not in the module, create
3635/// and return an llvm Function with the specified type.
3636llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
3637 GlobalDecl GD, llvm::Type *DeclTy, const FunctionDecl *FD) {
3638 std::string MangledName =
3639 getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
3640
3641 // Holds the name of the resolver, in ifunc mode this is the ifunc (which has
3642 // a separate resolver).
3643 std::string ResolverName = MangledName;
3644 if (getTarget().supportsIFunc())
15
Taking false branch
3645 ResolverName += ".ifunc";
3646 else if (FD->isTargetMultiVersion())
16
Assuming the condition is false
17
Taking false branch
3647 ResolverName += ".resolver";
3648
3649 // If this already exists, just return that one.
3650 if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
18
Assuming 'ResolverGV' is null
19
Taking false branch
3651 return ResolverGV;
3652
3653 // Since this is the first time we've created this IFunc, make sure
3654 // that we put this multiversioned function into the list to be
3655 // replaced later if necessary (target multiversioning only).
3656 if (FD->isTargetMultiVersion())
20
Assuming the condition is false
21
Taking false branch
3657 MultiVersionFuncs.push_back(GD);
3658 else if (FD->isTargetClonesMultiVersion()) {
22
Assuming the condition is true
23
Taking true branch
3659 // In target_clones multiversioning, make sure we emit this if used.
3660 auto DDI =
3661 DeferredDecls.find(getMangledName(GD.getWithMultiVersionIndex(0)));
3662 if (DDI != DeferredDecls.end()) {
24
Calling 'operator!='
35
Returning from 'operator!='
36
Taking false branch
3663 addDeferredDeclToEmit(GD);
3664 DeferredDecls.erase(DDI);
3665 } else {
3666 // Emit the symbol of the 1st variant, so that the deferred decls know we
3667 // need it, otherwise the only global value will be the resolver/ifunc,
3668 // which end up getting broken if we search for them with GetGlobalValue'.
3669 GetOrCreateLLVMFunction(
37
Calling 'CodeGenModule::GetOrCreateLLVMFunction'
3670 getMangledName(GD.getWithMultiVersionIndex(0)), DeclTy, FD,
3671 /*ForVTable=*/false, /*DontDefer=*/true,
3672 /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
3673 }
3674 }
3675
3676 // For cpu_specific, don't create an ifunc yet because we don't know if the
3677 // cpu_dispatch will be emitted in this translation unit.
3678 if (getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion()) {
3679 llvm::Type *ResolverType = llvm::FunctionType::get(
3680 llvm::PointerType::get(
3681 DeclTy, getContext().getTargetAddressSpace(FD->getType())),
3682 false);
3683 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
3684 MangledName + ".resolver", ResolverType, GlobalDecl{},
3685 /*ForVTable=*/false);
3686 llvm::GlobalIFunc *GIF =
3687 llvm::GlobalIFunc::create(DeclTy, 0, getMultiversionLinkage(*this, GD),
3688 "", Resolver, &getModule());
3689 GIF->setName(ResolverName);
3690 SetCommonAttributes(FD, GIF);
3691
3692 return GIF;
3693 }
3694
3695 llvm::Constant *Resolver = GetOrCreateLLVMFunction(
3696 ResolverName, DeclTy, GlobalDecl{}, /*ForVTable=*/false);
3697 assert(isa<llvm::GlobalValue>(Resolver) &&(static_cast <bool> (isa<llvm::GlobalValue>(Resolver
) && "Resolver should be created for the first time")
? void (0) : __assert_fail ("isa<llvm::GlobalValue>(Resolver) && \"Resolver should be created for the first time\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3698, __extension__ __PRETTY_FUNCTION__
))
3698 "Resolver should be created for the first time")(static_cast <bool> (isa<llvm::GlobalValue>(Resolver
) && "Resolver should be created for the first time")
? void (0) : __assert_fail ("isa<llvm::GlobalValue>(Resolver) && \"Resolver should be created for the first time\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3698, __extension__ __PRETTY_FUNCTION__
))
;
3699 SetCommonAttributes(FD, cast<llvm::GlobalValue>(Resolver));
3700 return Resolver;
3701}
3702
3703/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
3704/// module, create and return an llvm Function with the specified type. If there
3705/// is something in the module with the specified name, return it potentially
3706/// bitcasted to the right type.
3707///
3708/// If D is non-null, it specifies a decl that correspond to this. This is used
3709/// to set the attributes on the function when it is first created.
3710llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
3711 StringRef MangledName, llvm::Type *Ty, GlobalDecl GD, bool ForVTable,
3712 bool DontDefer, bool IsThunk, llvm::AttributeList ExtraAttrs,
3713 ForDefinition_t IsForDefinition) {
3714 const Decl *D = GD.getDecl();
38
Calling 'GlobalDecl::getDecl'
50
Returning from 'GlobalDecl::getDecl'
51
'D' initialized here
3715
3716 // Any attempts to use a MultiVersion function should result in retrieving
3717 // the iFunc instead. Name Mangling will handle the rest of the changes.
3718 if (const FunctionDecl *FD
9.1
'FD' is non-null
53.1
'FD' is null
9.1
'FD' is non-null
53.1
'FD' is null
9.1
'FD' is non-null
53.1
'FD' is null
9.1
'FD' is non-null
53.1
'FD' is null
9.1
'FD' is non-null
53.1
'FD' is null
= cast_or_null<FunctionDecl>(D)) {
9
Assuming 'D' is a 'FunctionDecl'
52
Assuming null pointer is passed into cast
53
Assuming pointer value is null
54
Taking false branch
3719 // For the device mark the function as one that should be emitted.
3720 if (getLangOpts().OpenMPIsDevice && OpenMPRuntime &&
10
Assuming field 'OpenMPIsDevice' is 0
3721 !OpenMPRuntime->markAsGlobalTarget(GD) && FD->isDefined() &&
3722 !DontDefer && !IsForDefinition) {
3723 if (const FunctionDecl *FDDef = FD->getDefinition()) {
3724 GlobalDecl GDDef;
3725 if (const auto *CD = dyn_cast<CXXConstructorDecl>(FDDef))
3726 GDDef = GlobalDecl(CD, GD.getCtorType());
3727 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(FDDef))
3728 GDDef = GlobalDecl(DD, GD.getDtorType());
3729 else
3730 GDDef = GlobalDecl(FDDef);
3731 EmitGlobal(GDDef);
3732 }
3733 }
3734
3735 if (FD->isMultiVersion()) {
11
Assuming the condition is true
12
Taking true branch
3736 UpdateMultiVersionNames(GD, FD, MangledName);
3737 if (!IsForDefinition
12.1
'IsForDefinition' is 0
12.1
'IsForDefinition' is 0
12.1
'IsForDefinition' is 0
12.1
'IsForDefinition' is 0
12.1
'IsForDefinition' is 0
)
13
Taking true branch
3738 return GetOrCreateMultiVersionResolver(GD, Ty, FD);
14
Calling 'CodeGenModule::GetOrCreateMultiVersionResolver'
3739 }
3740 }
3741
3742 // Lookup the entry, lazily creating it if necessary.
3743 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
3744 if (Entry) {
55
Assuming 'Entry' is non-null
56
Taking true branch
3745 if (WeakRefReferences.erase(Entry)) {
57
Assuming the condition is false
3746 const FunctionDecl *FD = cast_or_null<FunctionDecl>(D);
3747 if (FD && !FD->hasAttr<WeakAttr>())
3748 Entry->setLinkage(llvm::Function::ExternalLinkage);
3749 }
3750
3751 // Handle dropped DLL attributes.
3752 if (D
57.1
'D' is null
57.1
'D' is null
57.1
'D' is null
57.1
'D' is null
57.1
'D' is null
&& !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>()) {
3753 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
3754 setDSOLocal(Entry);
3755 }
3756
3757 // If there are two attempts to define the same mangled name, issue an
3758 // error.
3759 if (IsForDefinition
57.2
'IsForDefinition' is 1
57.2
'IsForDefinition' is 1
57.2
'IsForDefinition' is 1
57.2
'IsForDefinition' is 1
57.2
'IsForDefinition' is 1
&& !Entry->isDeclaration()) {
58
Assuming the condition is true
59
Taking true branch
3760 GlobalDecl OtherGD;
3761 // Check that GD is not yet in DiagnosedConflictingDefinitions is required
3762 // to make sure that we issue an error only once.
3763 if (lookupRepresentativeDecl(MangledName, OtherGD) &&
61
Assuming the condition is true
62
Taking true branch
3764 (GD.getCanonicalDecl().getDecl() !=
60
Assuming the condition is true
3765 OtherGD.getCanonicalDecl().getDecl()) &&
3766 DiagnosedConflictingDefinitions.insert(GD).second) {
3767 getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
63
Called C++ object pointer is null
3768 << MangledName;
3769 getDiags().Report(OtherGD.getDecl()->getLocation(),
3770 diag::note_previous_definition);
3771 }
3772 }
3773
3774 if ((isa<llvm::Function>(Entry) || isa<llvm::GlobalAlias>(Entry)) &&
3775 (Entry->getValueType() == Ty)) {
3776 return Entry;
3777 }
3778
3779 // Make sure the result is of the correct type.
3780 // (If function is requested for a definition, we always need to create a new
3781 // function, not just return a bitcast.)
3782 if (!IsForDefinition)
3783 return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
3784 }
3785
3786 // This function doesn't have a complete type (for example, the return
3787 // type is an incomplete struct). Use a fake type instead, and make
3788 // sure not to try to set attributes.
3789 bool IsIncompleteFunction = false;
3790
3791 llvm::FunctionType *FTy;
3792 if (isa<llvm::FunctionType>(Ty)) {
3793 FTy = cast<llvm::FunctionType>(Ty);
3794 } else {
3795 FTy = llvm::FunctionType::get(VoidTy, false);
3796 IsIncompleteFunction = true;
3797 }
3798
3799 llvm::Function *F =
3800 llvm::Function::Create(FTy, llvm::Function::ExternalLinkage,
3801 Entry ? StringRef() : MangledName, &getModule());
3802
3803 // If we already created a function with the same mangled name (but different
3804 // type) before, take its name and add it to the list of functions to be
3805 // replaced with F at the end of CodeGen.
3806 //
3807 // This happens if there is a prototype for a function (e.g. "int f()") and
3808 // then a definition of a different type (e.g. "int f(int x)").
3809 if (Entry) {
3810 F->takeName(Entry);
3811
3812 // This might be an implementation of a function without a prototype, in
3813 // which case, try to do special replacement of calls which match the new
3814 // prototype. The really key thing here is that we also potentially drop
3815 // arguments from the call site so as to make a direct call, which makes the
3816 // inliner happier and suppresses a number of optimizer warnings (!) about
3817 // dropping arguments.
3818 if (!Entry->use_empty()) {
3819 ReplaceUsesOfNonProtoTypeWithRealFunction(Entry, F);
3820 Entry->removeDeadConstantUsers();
3821 }
3822
3823 llvm::Constant *BC = llvm::ConstantExpr::getBitCast(
3824 F, Entry->getValueType()->getPointerTo());
3825 addGlobalValReplacement(Entry, BC);
3826 }
3827
3828 assert(F->getName() == MangledName && "name was uniqued!")(static_cast <bool> (F->getName() == MangledName &&
"name was uniqued!") ? void (0) : __assert_fail ("F->getName() == MangledName && \"name was uniqued!\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3828, __extension__ __PRETTY_FUNCTION__
))
;
3829 if (D)
3830 SetFunctionAttributes(GD, F, IsIncompleteFunction, IsThunk);
3831 if (ExtraAttrs.hasFnAttrs()) {
3832 llvm::AttrBuilder B(F->getContext(), ExtraAttrs.getFnAttrs());
3833 F->addFnAttrs(B);
3834 }
3835
3836 if (!DontDefer) {
3837 // All MSVC dtors other than the base dtor are linkonce_odr and delegate to
3838 // each other bottoming out with the base dtor. Therefore we emit non-base
3839 // dtors on usage, even if there is no dtor definition in the TU.
3840 if (D && isa<CXXDestructorDecl>(D) &&
3841 getCXXABI().useThunkForDtorVariant(cast<CXXDestructorDecl>(D),
3842 GD.getDtorType()))
3843 addDeferredDeclToEmit(GD);
3844
3845 // This is the first use or definition of a mangled name. If there is a
3846 // deferred decl with this name, remember that we need to emit it at the end
3847 // of the file.
3848 auto DDI = DeferredDecls.find(MangledName);
3849 if (DDI != DeferredDecls.end()) {
3850 // Move the potentially referenced deferred decl to the
3851 // DeferredDeclsToEmit list, and remove it from DeferredDecls (since we
3852 // don't need it anymore).
3853 addDeferredDeclToEmit(DDI->second);
3854 DeferredDecls.erase(DDI);
3855
3856 // Otherwise, there are cases we have to worry about where we're
3857 // using a declaration for which we must emit a definition but where
3858 // we might not find a top-level definition:
3859 // - member functions defined inline in their classes
3860 // - friend functions defined inline in some class
3861 // - special member functions with implicit definitions
3862 // If we ever change our AST traversal to walk into class methods,
3863 // this will be unnecessary.
3864 //
3865 // We also don't emit a definition for a function if it's going to be an
3866 // entry in a vtable, unless it's already marked as used.
3867 } else if (getLangOpts().CPlusPlus && D) {
3868 // Look for a declaration that's lexically in a record.
3869 for (const auto *FD = cast<FunctionDecl>(D)->getMostRecentDecl(); FD;
3870 FD = FD->getPreviousDecl()) {
3871 if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
3872 if (FD->doesThisDeclarationHaveABody()) {
3873 addDeferredDeclToEmit(GD.getWithDecl(FD));
3874 break;
3875 }
3876 }
3877 }
3878 }
3879 }
3880
3881 // Make sure the result is of the requested type.
3882 if (!IsIncompleteFunction) {
3883 assert(F->getFunctionType() == Ty)(static_cast <bool> (F->getFunctionType() == Ty) ? void
(0) : __assert_fail ("F->getFunctionType() == Ty", "clang/lib/CodeGen/CodeGenModule.cpp"
, 3883, __extension__ __PRETTY_FUNCTION__))
;
3884 return F;
3885 }
3886
3887 llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
3888 return llvm::ConstantExpr::getBitCast(F, PTy);
3889}
3890
3891/// GetAddrOfFunction - Return the address of the given function. If Ty is
3892/// non-null, then this function will use the specified type if it has to
3893/// create it (this occurs when we see a definition of the function).
3894llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
3895 llvm::Type *Ty,
3896 bool ForVTable,
3897 bool DontDefer,
3898 ForDefinition_t IsForDefinition) {
3899 assert(!cast<FunctionDecl>(GD.getDecl())->isConsteval() &&(static_cast <bool> (!cast<FunctionDecl>(GD.getDecl
())->isConsteval() && "consteval function should never be emitted"
) ? void (0) : __assert_fail ("!cast<FunctionDecl>(GD.getDecl())->isConsteval() && \"consteval function should never be emitted\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3900, __extension__ __PRETTY_FUNCTION__
))
2
The object is a 'FunctionDecl'
3
'?' condition is true
3900 "consteval function should never be emitted")(static_cast <bool> (!cast<FunctionDecl>(GD.getDecl
())->isConsteval() && "consteval function should never be emitted"
) ? void (0) : __assert_fail ("!cast<FunctionDecl>(GD.getDecl())->isConsteval() && \"consteval function should never be emitted\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 3900, __extension__ __PRETTY_FUNCTION__
))
;
3901 // If there was no specific requested type, just convert it now.
3902 if (!Ty
3.1
'Ty' is null
3.1
'Ty' is null
3.1
'Ty' is null
3.1
'Ty' is null
3.1
'Ty' is null
) {
4
Taking true branch
3903 const auto *FD = cast<FunctionDecl>(GD.getDecl());
5
The object is a 'FunctionDecl'
3904 Ty = getTypes().ConvertType(FD->getType());
3905 }
3906
3907 // Devirtualized destructor calls may come through here instead of via
3908 // getAddrOfCXXStructor. Make sure we use the MS ABI base destructor instead
3909 // of the complete destructor when necessary.
3910 if (const auto *DD
6.1
'DD' is null
6.1
'DD' is null
6.1
'DD' is null
6.1
'DD' is null
6.1
'DD' is null
= dyn_cast<CXXDestructorDecl>(GD.getDecl())) {
6
Assuming the object is not a 'CXXDestructorDecl'
7
Taking false branch
3911 if (getTarget().getCXXABI().isMicrosoft() &&
3912 GD.getDtorType() == Dtor_Complete &&
3913 DD->getParent()->getNumVBases() == 0)
3914 GD = GlobalDecl(DD, Dtor_Base);
3915 }
3916
3917 StringRef MangledName = getMangledName(GD);
3918 auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
8
Calling 'CodeGenModule::GetOrCreateLLVMFunction'
3919 /*IsThunk=*/false, llvm::AttributeList(),
3920 IsForDefinition);
3921 // Returns kernel handle for HIP kernel stub function.
3922 if (LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
3923 cast<FunctionDecl>(GD.getDecl())->hasAttr<CUDAGlobalAttr>()) {
3924 auto *Handle = getCUDARuntime().getKernelHandle(
3925 cast<llvm::Function>(F->stripPointerCasts()), GD);
3926 if (IsForDefinition)
3927 return F;
3928 return llvm::ConstantExpr::getBitCast(Handle, Ty->getPointerTo());
3929 }
3930 return F;
3931}
3932
3933llvm::Constant *CodeGenModule::GetFunctionStart(const ValueDecl *Decl) {
3934 llvm::GlobalValue *F =
3935 cast<llvm::GlobalValue>(GetAddrOfFunction(Decl)->stripPointerCasts());
1
Calling 'CodeGenModule::GetAddrOfFunction'
3936
3937 return llvm::ConstantExpr::getBitCast(llvm::NoCFIValue::get(F),
3938 llvm::Type::getInt8PtrTy(VMContext));
3939}
3940
3941static const FunctionDecl *
3942GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
3943 TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
3944 DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
3945
3946 IdentifierInfo &CII = C.Idents.get(Name);
3947 for (const auto *Result : DC->lookup(&CII))
3948 if (const auto *FD = dyn_cast<FunctionDecl>(Result))
3949 return FD;
3950
3951 if (!C.getLangOpts().CPlusPlus)
3952 return nullptr;
3953
3954 // Demangle the premangled name from getTerminateFn()
3955 IdentifierInfo &CXXII =
3956 (Name == "_ZSt9terminatev" || Name == "?terminate@@YAXXZ")
3957 ? C.Idents.get("terminate")
3958 : C.Idents.get(Name);
3959
3960 for (const auto &N : {"__cxxabiv1", "std"}) {
3961 IdentifierInfo &NS = C.Idents.get(N);
3962 for (const auto *Result : DC->lookup(&NS)) {
3963 const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Result);
3964 if (auto *LSD = dyn_cast<LinkageSpecDecl>(Result))
3965 for (const auto *Result : LSD->lookup(&NS))
3966 if ((ND = dyn_cast<NamespaceDecl>(Result)))
3967 break;
3968
3969 if (ND)
3970 for (const auto *Result : ND->lookup(&CXXII))
3971 if (const auto *FD = dyn_cast<FunctionDecl>(Result))
3972 return FD;
3973 }
3974 }
3975
3976 return nullptr;
3977}
3978
3979/// CreateRuntimeFunction - Create a new runtime function with the specified
3980/// type and name.
3981llvm::FunctionCallee
3982CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
3983 llvm::AttributeList ExtraAttrs, bool Local,
3984 bool AssumeConvergent) {
3985 if (AssumeConvergent) {
3986 ExtraAttrs =
3987 ExtraAttrs.addFnAttribute(VMContext, llvm::Attribute::Convergent);
3988 }
3989
3990 llvm::Constant *C =
3991 GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
3992 /*DontDefer=*/false, /*IsThunk=*/false,
3993 ExtraAttrs);
3994
3995 if (auto *F = dyn_cast<llvm::Function>(C)) {
3996 if (F->empty()) {
3997 F->setCallingConv(getRuntimeCC());
3998
3999 // In Windows Itanium environments, try to mark runtime functions
4000 // dllimport. For Mingw and MSVC, don't. We don't really know if the user
4001 // will link their standard library statically or dynamically. Marking
4002 // functions imported when they are not imported can cause linker errors
4003 // and warnings.
4004 if (!Local && getTriple().isWindowsItaniumEnvironment() &&
4005 !getCodeGenOpts().LTOVisibilityPublicStd) {
4006 const FunctionDecl *FD = GetRuntimeFunctionDecl(Context, Name);
4007 if (!FD || FD->hasAttr<DLLImportAttr>()) {
4008 F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
4009 F->setLinkage(llvm::GlobalValue::ExternalLinkage);
4010 }
4011 }
4012 setDSOLocal(F);
4013 }
4014 }
4015
4016 return {FTy, C};
4017}
4018
4019/// isTypeConstant - Determine whether an object of this type can be emitted
4020/// as a constant.
4021///
4022/// If ExcludeCtor is true, the duration when the object's constructor runs
4023/// will not be considered. The caller will need to verify that the object is
4024/// not written to during its construction.
4025bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
4026 if (!Ty.isConstant(Context) && !Ty->isReferenceType())
4027 return false;
4028
4029 if (Context.getLangOpts().CPlusPlus) {
4030 if (const CXXRecordDecl *Record
4031 = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
4032 return ExcludeCtor && !Record->hasMutableFields() &&
4033 Record->hasTrivialDestructor();
4034 }
4035
4036 return true;
4037}
4038
4039/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
4040/// create and return an llvm GlobalVariable with the specified type and address
4041/// space. If there is something in the module with the specified name, return
4042/// it potentially bitcasted to the right type.
4043///
4044/// If D is non-null, it specifies a decl that correspond to this. This is used
4045/// to set the attributes on the global when it is first created.
4046///
4047/// If IsForDefinition is true, it is guaranteed that an actual global with
4048/// type Ty will be returned, not conversion of a variable with the same
4049/// mangled name but some other type.
4050llvm::Constant *
4051CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
4052 LangAS AddrSpace, const VarDecl *D,
4053 ForDefinition_t IsForDefinition) {
4054 // Lookup the entry, lazily creating it if necessary.
4055 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
4056 unsigned TargetAS = getContext().getTargetAddressSpace(AddrSpace);
4057 if (Entry) {
4058 if (WeakRefReferences.erase(Entry)) {
4059 if (D && !D->hasAttr<WeakAttr>())
4060 Entry->setLinkage(llvm::Function::ExternalLinkage);
4061 }
4062
4063 // Handle dropped DLL attributes.
4064 if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
4065 Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
4066
4067 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
4068 getOpenMPRuntime().registerTargetGlobalVariable(D, Entry);
4069
4070 if (Entry->getValueType() == Ty && Entry->getAddressSpace() == TargetAS)
4071 return Entry;
4072
4073 // If there are two attempts to define the same mangled name, issue an
4074 // error.
4075 if (IsForDefinition && !Entry->isDeclaration()) {
4076 GlobalDecl OtherGD;
4077 const VarDecl *OtherD;
4078
4079 // Check that D is not yet in DiagnosedConflictingDefinitions is required
4080 // to make sure that we issue an error only once.
4081 if (D && lookupRepresentativeDecl(MangledName, OtherGD) &&
4082 (D->getCanonicalDecl() != OtherGD.getCanonicalDecl().getDecl()) &&
4083 (OtherD = dyn_cast<VarDecl>(OtherGD.getDecl())) &&
4084 OtherD->hasInit() &&
4085 DiagnosedConflictingDefinitions.insert(D).second) {
4086 getDiags().Report(D->getLocation(), diag::err_duplicate_mangled_name)
4087 << MangledName;
4088 getDiags().Report(OtherGD.getDecl()->getLocation(),
4089 diag::note_previous_definition);
4090 }
4091 }
4092
4093 // Make sure the result is of the correct type.
4094 if (Entry->getType()->getAddressSpace() != TargetAS) {
4095 return llvm::ConstantExpr::getAddrSpaceCast(Entry,
4096 Ty->getPointerTo(TargetAS));
4097 }
4098
4099 // (If global is requested for a definition, we always need to create a new
4100 // global, not just return a bitcast.)
4101 if (!IsForDefinition)
4102 return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo(TargetAS));
4103 }
4104
4105 auto DAddrSpace = GetGlobalVarAddressSpace(D);
4106
4107 auto *GV = new llvm::GlobalVariable(
4108 getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr,
4109 MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal,
4110 getContext().getTargetAddressSpace(DAddrSpace));
4111
4112 // If we already created a global with the same mangled name (but different
4113 // type) before, take its name and remove it from its parent.
4114 if (Entry) {
4115 GV->takeName(Entry);
4116
4117 if (!Entry->use_empty()) {
4118 llvm::Constant *NewPtrForOldDecl =
4119 llvm::ConstantExpr::getBitCast(GV, Entry->getType());
4120 Entry->replaceAllUsesWith(NewPtrForOldDecl);
4121 }
4122
4123 Entry->eraseFromParent();
4124 }
4125
4126 // This is the first use or definition of a mangled name. If there is a
4127 // deferred decl with this name, remember that we need to emit it at the end
4128 // of the file.
4129 auto DDI = DeferredDecls.find(MangledName);
4130 if (DDI != DeferredDecls.end()) {
4131 // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
4132 // list, and remove it from DeferredDecls (since we don't need it anymore).
4133 addDeferredDeclToEmit(DDI->second);
4134 DeferredDecls.erase(DDI);
4135 }
4136
4137 // Handle things which are present even on external declarations.
4138 if (D) {
4139 if (LangOpts.OpenMP && !LangOpts.OpenMPSimd)
4140 getOpenMPRuntime().registerTargetGlobalVariable(D, GV);
4141
4142 // FIXME: This code is overly simple and should be merged with other global
4143 // handling.
4144 GV->setConstant(isTypeConstant(D->getType(), false));
4145
4146 GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
4147
4148 setLinkageForGV(GV, D);
4149
4150 if (D->getTLSKind()) {
4151 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
4152 CXXThreadLocals.push_back(D);
4153 setTLSMode(GV, *D);
4154 }
4155
4156 setGVProperties(GV, D);
4157
4158 // If required by the ABI, treat declarations of static data members with
4159 // inline initializers as definitions.
4160 if (getContext().isMSStaticDataMemberInlineDefinition(D)) {
4161 EmitGlobalVarDefinition(D);
4162 }
4163
4164 // Emit section information for extern variables.
4165 if (D->hasExternalStorage()) {
4166 if (const SectionAttr *SA = D->getAttr<SectionAttr>())
4167 GV->setSection(SA->getName());
4168 }
4169
4170 // Handle XCore specific ABI requirements.
4171 if (getTriple().getArch() == llvm::Triple::xcore &&
4172 D->getLanguageLinkage() == CLanguageLinkage &&
4173 D->getType().isConstant(Context) &&
4174 isExternallyVisible(D->getLinkageAndVisibility().getLinkage()))
4175 GV->setSection(".cp.rodata");
4176
4177 // Check if we a have a const declaration with an initializer, we may be
4178 // able to emit it as available_externally to expose it's value to the
4179 // optimizer.
4180 if (Context.getLangOpts().CPlusPlus && GV->hasExternalLinkage() &&
4181 D->getType().isConstQualified() && !GV->hasInitializer() &&
4182 !D->hasDefinition() && D->hasInit() && !D->hasAttr<DLLImportAttr>()) {
4183 const auto *Record =
4184 Context.getBaseElementType(D->getType())->getAsCXXRecordDecl();
4185 bool HasMutableFields = Record && Record->hasMutableFields();
4186 if (!HasMutableFields) {
4187 const VarDecl *InitDecl;
4188 const Expr *InitExpr = D->getAnyInitializer(InitDecl);
4189 if (InitExpr) {
4190 ConstantEmitter emitter(*this);
4191 llvm::Constant *Init = emitter.tryEmitForInitializer(*InitDecl);
4192 if (Init) {
4193 auto *InitType = Init->getType();
4194 if (GV->getValueType() != InitType) {
4195 // The type of the initializer does not match the definition.
4196 // This happens when an initializer has a different type from
4197 // the type of the global (because of padding at the end of a
4198 // structure for instance).
4199 GV->setName(StringRef());
4200 // Make a new global with the correct type, this is now guaranteed
4201 // to work.
4202 auto *NewGV = cast<llvm::GlobalVariable>(
4203 GetAddrOfGlobalVar(D, InitType, IsForDefinition)
4204 ->stripPointerCasts());
4205
4206 // Erase the old global, since it is no longer used.
4207 GV->eraseFromParent();
4208 GV = NewGV;
4209 } else {
4210 GV->setInitializer(Init);
4211 GV->setConstant(true);
4212 GV->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
4213 }
4214 emitter.finalize(GV);
4215 }
4216 }
4217 }
4218 }
4219 }
4220
4221 if (GV->isDeclaration()) {
4222 getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
4223 // External HIP managed variables needed to be recorded for transformation
4224 // in both device and host compilations.
4225 if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() &&
4226 D->hasExternalStorage())
4227 getCUDARuntime().handleVarRegistration(D, *GV);
4228 }
4229
4230 LangAS ExpectedAS =
4231 D ? D->getType().getAddressSpace()
4232 : (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
4233 assert(getContext().getTargetAddressSpace(ExpectedAS) == TargetAS)(static_cast <bool> (getContext().getTargetAddressSpace
(ExpectedAS) == TargetAS) ? void (0) : __assert_fail ("getContext().getTargetAddressSpace(ExpectedAS) == TargetAS"
, "clang/lib/CodeGen/CodeGenModule.cpp", 4233, __extension__ __PRETTY_FUNCTION__
))
;
4234 if (DAddrSpace != ExpectedAS) {
4235 return getTargetCodeGenInfo().performAddrSpaceCast(
4236 *this, GV, DAddrSpace, ExpectedAS, Ty->getPointerTo(TargetAS));
4237 }
4238
4239 return GV;
4240}
4241
4242llvm::Constant *
4243CodeGenModule::GetAddrOfGlobal(GlobalDecl GD, ForDefinition_t IsForDefinition) {
4244 const Decl *D = GD.getDecl();
4245
4246 if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
4247 return getAddrOfCXXStructor(GD, /*FnInfo=*/nullptr, /*FnType=*/nullptr,
4248 /*DontDefer=*/false, IsForDefinition);
4249
4250 if (isa<CXXMethodDecl>(D)) {
4251 auto FInfo =
4252 &getTypes().arrangeCXXMethodDeclaration(cast<CXXMethodDecl>(D));
4253 auto Ty = getTypes().GetFunctionType(*FInfo);
4254 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
4255 IsForDefinition);
4256 }
4257
4258 if (isa<FunctionDecl>(D)) {
4259 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
4260 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
4261 return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
4262 IsForDefinition);
4263 }
4264
4265 return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr, IsForDefinition);
4266}
4267
4268llvm::GlobalVariable *CodeGenModule::CreateOrReplaceCXXRuntimeVariable(
4269 StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage,
4270 unsigned Alignment) {
4271 llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
4272 llvm::GlobalVariable *OldGV = nullptr;
4273
4274 if (GV) {
4275 // Check if the variable has the right type.
4276 if (GV->getValueType() == Ty)
4277 return GV;
4278
4279 // Because C++ name mangling, the only way we can end up with an already
4280 // existing global with the same name is if it has been declared extern "C".
4281 assert(GV->isDeclaration() && "Declaration has wrong type!")(static_cast <bool> (GV->isDeclaration() && "Declaration has wrong type!"
) ? void (0) : __assert_fail ("GV->isDeclaration() && \"Declaration has wrong type!\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 4281, __extension__ __PRETTY_FUNCTION__
))
;
4282 OldGV = GV;
4283 }
4284
4285 // Create a new variable.
4286 GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
4287 Linkage, nullptr, Name);
4288
4289 if (OldGV) {
4290 // Replace occurrences of the old variable if needed.
4291 GV->takeName(OldGV);
4292
4293 if (!OldGV->use_empty()) {
4294 llvm::Constant *NewPtrForOldDecl =
4295 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
4296 OldGV->replaceAllUsesWith(NewPtrForOldDecl);
4297 }
4298
4299 OldGV->eraseFromParent();
4300 }
4301
4302 if (supportsCOMDAT() && GV->isWeakForLinker() &&
4303 !GV->hasAvailableExternallyLinkage())
4304 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
4305
4306 GV->setAlignment(llvm::MaybeAlign(Alignment));
4307
4308 return GV;
4309}
4310
4311/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
4312/// given global variable. If Ty is non-null and if the global doesn't exist,
4313/// then it will be created with the specified type instead of whatever the
4314/// normal requested type would be. If IsForDefinition is true, it is guaranteed
4315/// that an actual global with type Ty will be returned, not conversion of a
4316/// variable with the same mangled name but some other type.
4317llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
4318 llvm::Type *Ty,
4319 ForDefinition_t IsForDefinition) {
4320 assert(D->hasGlobalStorage() && "Not a global variable")(static_cast <bool> (D->hasGlobalStorage() &&
"Not a global variable") ? void (0) : __assert_fail ("D->hasGlobalStorage() && \"Not a global variable\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 4320, __extension__ __PRETTY_FUNCTION__
))
;
4321 QualType ASTTy = D->getType();
4322 if (!Ty)
4323 Ty = getTypes().ConvertTypeForMem(ASTTy);
4324
4325 StringRef MangledName = getMangledName(D);
4326 return GetOrCreateLLVMGlobal(MangledName, Ty, ASTTy.getAddressSpace(), D,
4327 IsForDefinition);
4328}
4329
4330/// CreateRuntimeVariable - Create a new runtime global variable with the
4331/// specified type and name.
4332llvm::Constant *
4333CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
4334 StringRef Name) {
4335 LangAS AddrSpace = getContext().getLangOpts().OpenCL ? LangAS::opencl_global
4336 : LangAS::Default;
4337 auto *Ret = GetOrCreateLLVMGlobal(Name, Ty, AddrSpace, nullptr);
4338 setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
4339 return Ret;
4340}
4341
4342void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
4343 assert(!D->getInit() && "Cannot emit definite definitions here!")(static_cast <bool> (!D->getInit() && "Cannot emit definite definitions here!"
) ? void (0) : __assert_fail ("!D->getInit() && \"Cannot emit definite definitions here!\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 4343, __extension__ __PRETTY_FUNCTION__
))
;
4344
4345 StringRef MangledName = getMangledName(D);
4346 llvm::GlobalValue *GV = GetGlobalValue(MangledName);
4347
4348 // We already have a definition, not declaration, with the same mangled name.
4349 // Emitting of declaration is not required (and actually overwrites emitted
4350 // definition).
4351 if (GV && !GV->isDeclaration())
4352 return;
4353
4354 // If we have not seen a reference to this variable yet, place it into the
4355 // deferred declarations table to be emitted if needed later.
4356 if (!MustBeEmitted(D) && !GV) {
4357 DeferredDecls[MangledName] = D;
4358 return;
4359 }
4360
4361 // The tentative definition is the only definition.
4362 EmitGlobalVarDefinition(D);
4363}
4364
4365void CodeGenModule::EmitExternalDeclaration(const VarDecl *D) {
4366 EmitExternalVarDeclaration(D);
4367}
4368
4369CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
4370 return Context.toCharUnitsFromBits(
4371 getDataLayout().getTypeStoreSizeInBits(Ty));
4372}
4373
4374LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
4375 if (LangOpts.OpenCL) {
4376 LangAS AS = D ? D->getType().getAddressSpace() : LangAS::opencl_global;
4377 assert(AS == LangAS::opencl_global ||(static_cast <bool> (AS == LangAS::opencl_global || AS ==
LangAS::opencl_global_device || AS == LangAS::opencl_global_host
|| AS == LangAS::opencl_constant || AS == LangAS::opencl_local
|| AS >= LangAS::FirstTargetAddressSpace) ? void (0) : __assert_fail
("AS == LangAS::opencl_global || AS == LangAS::opencl_global_device || AS == LangAS::opencl_global_host || AS == LangAS::opencl_constant || AS == LangAS::opencl_local || AS >= LangAS::FirstTargetAddressSpace"
, "clang/lib/CodeGen/CodeGenModule.cpp", 4382, __extension__ __PRETTY_FUNCTION__
))
4378 AS == LangAS::opencl_global_device ||(static_cast <bool> (AS == LangAS::opencl_global || AS ==
LangAS::opencl_global_device || AS == LangAS::opencl_global_host
|| AS == LangAS::opencl_constant || AS == LangAS::opencl_local
|| AS >= LangAS::FirstTargetAddressSpace) ? void (0) : __assert_fail
("AS == LangAS::opencl_global || AS == LangAS::opencl_global_device || AS == LangAS::opencl_global_host || AS == LangAS::opencl_constant || AS == LangAS::opencl_local || AS >= LangAS::FirstTargetAddressSpace"
, "clang/lib/CodeGen/CodeGenModule.cpp", 4382, __extension__ __PRETTY_FUNCTION__
))
4379 AS == LangAS::opencl_global_host ||(static_cast <bool> (AS == LangAS::opencl_global || AS ==
LangAS::opencl_global_device || AS == LangAS::opencl_global_host
|| AS == LangAS::opencl_constant || AS == LangAS::opencl_local
|| AS >= LangAS::FirstTargetAddressSpace) ? void (0) : __assert_fail
("AS == LangAS::opencl_global || AS == LangAS::opencl_global_device || AS == LangAS::opencl_global_host || AS == LangAS::opencl_constant || AS == LangAS::opencl_local || AS >= LangAS::FirstTargetAddressSpace"
, "clang/lib/CodeGen/CodeGenModule.cpp", 4382, __extension__ __PRETTY_FUNCTION__
))
4380 AS == LangAS::opencl_constant ||(static_cast <bool> (AS == LangAS::opencl_global || AS ==
LangAS::opencl_global_device || AS == LangAS::opencl_global_host
|| AS == LangAS::opencl_constant || AS == LangAS::opencl_local
|| AS >= LangAS::FirstTargetAddressSpace) ? void (0) : __assert_fail
("AS == LangAS::opencl_global || AS == LangAS::opencl_global_device || AS == LangAS::opencl_global_host || AS == LangAS::opencl_constant || AS == LangAS::opencl_local || AS >= LangAS::FirstTargetAddressSpace"
, "clang/lib/CodeGen/CodeGenModule.cpp", 4382, __extension__ __PRETTY_FUNCTION__
))
4381 AS == LangAS::opencl_local ||(static_cast <bool> (AS == LangAS::opencl_global || AS ==
LangAS::opencl_global_device || AS == LangAS::opencl_global_host
|| AS == LangAS::opencl_constant || AS == LangAS::opencl_local
|| AS >= LangAS::FirstTargetAddressSpace) ? void (0) : __assert_fail
("AS == LangAS::opencl_global || AS == LangAS::opencl_global_device || AS == LangAS::opencl_global_host || AS == LangAS::opencl_constant || AS == LangAS::opencl_local || AS >= LangAS::FirstTargetAddressSpace"
, "clang/lib/CodeGen/CodeGenModule.cpp", 4382, __extension__ __PRETTY_FUNCTION__
))
4382 AS >= LangAS::FirstTargetAddressSpace)(static_cast <bool> (AS == LangAS::opencl_global || AS ==
LangAS::opencl_global_device || AS == LangAS::opencl_global_host
|| AS == LangAS::opencl_constant || AS == LangAS::opencl_local
|| AS >= LangAS::FirstTargetAddressSpace) ? void (0) : __assert_fail
("AS == LangAS::opencl_global || AS == LangAS::opencl_global_device || AS == LangAS::opencl_global_host || AS == LangAS::opencl_constant || AS == LangAS::opencl_local || AS >= LangAS::FirstTargetAddressSpace"
, "clang/lib/CodeGen/CodeGenModule.cpp", 4382, __extension__ __PRETTY_FUNCTION__
))
;
4383 return AS;
4384 }
4385
4386 if (LangOpts.SYCLIsDevice &&
4387 (!D || D->getType().getAddressSpace() == LangAS::Default))
4388 return LangAS::sycl_global;
4389
4390 if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
4391 if (D && D->hasAttr<CUDAConstantAttr>())
4392 return LangAS::cuda_constant;
4393 else if (D && D->hasAttr<CUDASharedAttr>())
4394 return LangAS::cuda_shared;
4395 else if (D && D->hasAttr<CUDADeviceAttr>())
4396 return LangAS::cuda_device;
4397 else if (D && D->getType().isConstQualified())
4398 return LangAS::cuda_constant;
4399 else
4400 return LangAS::cuda_device;
4401 }
4402
4403 if (LangOpts.OpenMP) {
4404 LangAS AS;
4405 if (OpenMPRuntime->hasAllocateAttributeForGlobalVar(D, AS))
4406 return AS;
4407 }
4408 return getTargetCodeGenInfo().getGlobalVarAddressSpace(*this, D);
4409}
4410
4411LangAS CodeGenModule::GetGlobalConstantAddressSpace() const {
4412 // OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
4413 if (LangOpts.OpenCL)
4414 return LangAS::opencl_constant;
4415 if (LangOpts.SYCLIsDevice)
4416 return LangAS::sycl_global;
4417 if (LangOpts.HIP && LangOpts.CUDAIsDevice && getTriple().isSPIRV())
4418 // For HIPSPV map literals to cuda_device (maps to CrossWorkGroup in SPIR-V)
4419 // instead of default AS (maps to Generic in SPIR-V). Otherwise, we end up
4420 // with OpVariable instructions with Generic storage class which is not
4421 // allowed (SPIR-V V1.6 s3.42.8). Also, mapping literals to SPIR-V
4422 // UniformConstant storage class is not viable as pointers to it may not be
4423 // casted to Generic pointers which are used to model HIP's "flat" pointers.
4424 return LangAS::cuda_device;
4425 if (auto AS = getTarget().getConstantAddressSpace())
4426 return AS.getValue();
4427 return LangAS::Default;
4428}
4429
4430// In address space agnostic languages, string literals are in default address
4431// space in AST. However, certain targets (e.g. amdgcn) request them to be
4432// emitted in constant address space in LLVM IR. To be consistent with other
4433// parts of AST, string literal global variables in constant address space
4434// need to be casted to default address space before being put into address
4435// map and referenced by other part of CodeGen.
4436// In OpenCL, string literals are in constant address space in AST, therefore
4437// they should not be casted to default address space.
4438static llvm::Constant *
4439castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM,
4440 llvm::GlobalVariable *GV) {
4441 llvm::Constant *Cast = GV;
4442 if (!CGM.getLangOpts().OpenCL) {
4443 auto AS = CGM.GetGlobalConstantAddressSpace();
4444 if (AS != LangAS::Default)
4445 Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast(
4446 CGM, GV, AS, LangAS::Default,
4447 GV->getValueType()->getPointerTo(
4448 CGM.getContext().getTargetAddressSpace(LangAS::Default)));
4449 }
4450 return Cast;
4451}
4452
4453template<typename SomeDecl>
4454void CodeGenModule::MaybeHandleStaticInExternC(const SomeDecl *D,
4455 llvm::GlobalValue *GV) {
4456 if (!getLangOpts().CPlusPlus)
4457 return;
4458
4459 // Must have 'used' attribute, or else inline assembly can't rely on
4460 // the name existing.
4461 if (!D->template hasAttr<UsedAttr>())
4462 return;
4463
4464 // Must have internal linkage and an ordinary name.
4465 if (!D->getIdentifier() || D->getFormalLinkage() != InternalLinkage)
4466 return;
4467
4468 // Must be in an extern "C" context. Entities declared directly within
4469 // a record are not extern "C" even if the record is in such a context.
4470 const SomeDecl *First = D->getFirstDecl();
4471 if (First->getDeclContext()->isRecord() || !First->isInExternCContext())
4472 return;
4473
4474 // OK, this is an internal linkage entity inside an extern "C" linkage
4475 // specification. Make a note of that so we can give it the "expected"
4476 // mangled name if nothing else is using that name.
4477 std::pair<StaticExternCMap::iterator, bool> R =
4478 StaticExternCValues.insert(std::make_pair(D->getIdentifier(), GV));
4479
4480 // If we have multiple internal linkage entities with the same name
4481 // in extern "C" regions, none of them gets that name.
4482 if (!R.second)
4483 R.first->second = nullptr;
4484}
4485
4486static bool shouldBeInCOMDAT(CodeGenModule &CGM, const Decl &D) {
4487 if (!CGM.supportsCOMDAT())
4488 return false;
4489
4490 if (D.hasAttr<SelectAnyAttr>())
4491 return true;
4492
4493 GVALinkage Linkage;
4494 if (auto *VD = dyn_cast<VarDecl>(&D))
4495 Linkage = CGM.getContext().GetGVALinkageForVariable(VD);
4496 else
4497 Linkage = CGM.getContext().GetGVALinkageForFunction(cast<FunctionDecl>(&D));
4498
4499 switch (Linkage) {
4500 case GVA_Internal:
4501 case GVA_AvailableExternally:
4502 case GVA_StrongExternal:
4503 return false;
4504 case GVA_DiscardableODR:
4505 case GVA_StrongODR:
4506 return true;
4507 }
4508 llvm_unreachable("No such linkage")::llvm::llvm_unreachable_internal("No such linkage", "clang/lib/CodeGen/CodeGenModule.cpp"
, 4508)
;
4509}
4510
4511void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
4512 llvm::GlobalObject &GO) {
4513 if (!shouldBeInCOMDAT(*this, D))
4514 return;
4515 GO.setComdat(TheModule.getOrInsertComdat(GO.getName()));
4516}
4517
4518/// Pass IsTentative as true if you want to create a tentative definition.
4519void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
4520 bool IsTentative) {
4521 // OpenCL global variables of sampler type are translated to function calls,
4522 // therefore no need to be translated.
4523 QualType ASTTy = D->getType();
4524 if (getLangOpts().OpenCL && ASTTy->isSamplerT())
4525 return;
4526
4527 // If this is OpenMP device, check if it is legal to emit this global
4528 // normally.
4529 if (LangOpts.OpenMPIsDevice && OpenMPRuntime &&
4530 OpenMPRuntime->emitTargetGlobalVariable(D))
4531 return;
4532
4533 llvm::TrackingVH<llvm::Constant> Init;
4534 bool NeedsGlobalCtor = false;
4535 bool NeedsGlobalDtor =
4536 D->needsDestruction(getContext()) == QualType::DK_cxx_destructor;
4537
4538 const VarDecl *InitDecl;
4539 const Expr *InitExpr = D->getAnyInitializer(InitDecl);
4540
4541 Optional<ConstantEmitter> emitter;
4542
4543 // CUDA E.2.4.1 "__shared__ variables cannot have an initialization
4544 // as part of their declaration." Sema has already checked for
4545 // error cases, so we just need to set Init to UndefValue.
4546 bool IsCUDASharedVar =
4547 getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
4548 // Shadows of initialized device-side global variables are also left
4549 // undefined.
4550 // Managed Variables should be initialized on both host side and device side.
4551 bool IsCUDAShadowVar =
4552 !getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
4553 (D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
4554 D->hasAttr<CUDASharedAttr>());
4555 bool IsCUDADeviceShadowVar =
4556 getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
4557 (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
4558 D->getType()->isCUDADeviceBuiltinTextureType());
4559 if (getLangOpts().CUDA &&
4560 (IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
4561 Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
4562 else if (D->hasAttr<LoaderUninitializedAttr>())
4563 Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
4564 else if (!InitExpr) {
4565 // This is a tentative definition; tentative definitions are
4566 // implicitly initialized with { 0 }.
4567 //
4568 // Note that tentative definitions are only emitted at the end of
4569 // a translation unit, so they should never have incomplete
4570 // type. In addition, EmitTentativeDefinition makes sure that we
4571 // never attempt to emit a tentative definition if a real one
4572 // exists. A use may still exists, however, so we still may need
4573 // to do a RAUW.
4574 assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type")(static_cast <bool> (!ASTTy->isIncompleteType() &&
"Unexpected incomplete type") ? void (0) : __assert_fail ("!ASTTy->isIncompleteType() && \"Unexpected incomplete type\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 4574, __extension__ __PRETTY_FUNCTION__
))
;
4575 Init = EmitNullConstant(D->getType());
4576 } else {
4577 initializedGlobalDecl = GlobalDecl(D);
4578 emitter.emplace(*this);
4579 llvm::Constant *Initializer = emitter->tryEmitForInitializer(*InitDecl);
4580 if (!Initializer) {
4581 QualType T = InitExpr->getType();
4582 if (D->getType()->isReferenceType())
4583 T = D->getType();
4584
4585 if (getLangOpts().CPlusPlus) {
4586 Init = EmitNullConstant(T);
4587 NeedsGlobalCtor = true;
4588 } else {
4589 ErrorUnsupported(D, "static initializer");
4590 Init = llvm::UndefValue::get(getTypes().ConvertType(T));
4591 }
4592 } else {
4593 Init = Initializer;
4594 // We don't need an initializer, so remove the entry for the delayed
4595 // initializer position (just in case this entry was delayed) if we
4596 // also don't need to register a destructor.
4597 if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
4598 DelayedCXXInitPosition.erase(D);
4599 }
4600 }
4601
4602 llvm::Type* InitType = Init->getType();
4603 llvm::Constant *Entry =
4604 GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative));
4605
4606 // Strip off pointer casts if we got them.
4607 Entry = Entry->stripPointerCasts();
4608
4609 // Entry is now either a Function or GlobalVariable.
4610 auto *GV = dyn_cast<llvm::GlobalVariable>(Entry);
4611
4612 // We have a definition after a declaration with the wrong type.
4613 // We must make a new GlobalVariable* and update everything that used OldGV
4614 // (a declaration or tentative definition) with the new GlobalVariable*
4615 // (which will be a definition).
4616 //
4617 // This happens if there is a prototype for a global (e.g.
4618 // "extern int x[];") and then a definition of a different type (e.g.
4619 // "int x[10];"). This also happens when an initializer has a different type
4620 // from the type of the global (this happens with unions).
4621 if (!GV || GV->getValueType() != InitType ||
4622 GV->getType()->getAddressSpace() !=
4623 getContext().getTargetAddressSpace(GetGlobalVarAddressSpace(D))) {
4624
4625 // Move the old entry aside so that we'll create a new one.
4626 Entry->setName(StringRef());
4627
4628 // Make a new global with the correct type, this is now guaranteed to work.
4629 GV = cast<llvm::GlobalVariable>(
4630 GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative))
4631 ->stripPointerCasts());
4632
4633 // Replace all uses of the old global with the new global
4634 llvm::Constant *NewPtrForOldDecl =
4635 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
4636 Entry->getType());
4637 Entry->replaceAllUsesWith(NewPtrForOldDecl);
4638
4639 // Erase the old global, since it is no longer used.
4640 cast<llvm::GlobalValue>(Entry)->eraseFromParent();
4641 }
4642
4643 MaybeHandleStaticInExternC(D, GV);
4644
4645 if (D->hasAttr<AnnotateAttr>())
4646 AddGlobalAnnotations(D, GV);
4647
4648 // Set the llvm linkage type as appropriate.
4649 llvm::GlobalValue::LinkageTypes Linkage =
4650 getLLVMLinkageVarDefinition(D, GV->isConstant());
4651
4652 // CUDA B.2.1 "The __device__ qualifier declares a variable that resides on
4653 // the device. [...]"
4654 // CUDA B.2.2 "The __constant__ qualifier, optionally used together with
4655 // __device__, declares a variable that: [...]
4656 // Is accessible from all the threads within the grid and from the host
4657 // through the runtime library (cudaGetSymbolAddress() / cudaGetSymbolSize()
4658 // / cudaMemcpyToSymbol() / cudaMemcpyFromSymbol())."
4659 if (GV && LangOpts.CUDA) {
4660 if (LangOpts.CUDAIsDevice) {
4661 if (Linkage != llvm::GlobalValue::InternalLinkage &&
4662 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
4663 D->getType()->isCUDADeviceBuiltinSurfaceType() ||
4664 D->getType()->isCUDADeviceBuiltinTextureType()))
4665 GV->setExternallyInitialized(true);
4666 } else {
4667 getCUDARuntime().internalizeDeviceSideVar(D, Linkage);
4668 }
4669 getCUDARuntime().handleVarRegistration(D, *GV);
4670 }
4671
4672 GV->setInitializer(Init);
4673 if (emitter)
4674 emitter->finalize(GV);
4675
4676 // If it is safe to mark the global 'constant', do so now.
4677 GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
4678 isTypeConstant(D->getType(), true));
4679
4680 // If it is in a read-only section, mark it 'constant'.
4681 if (const SectionAttr *SA = D->getAttr<SectionAttr>()) {
4682 const ASTContext::SectionInfo &SI = Context.SectionInfos[SA->getName()];
4683 if ((SI.SectionFlags & ASTContext::PSF_Write) == 0)
4684 GV->setConstant(true);
4685 }
4686
4687 GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
4688
4689 // On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
4690 // function is only defined alongside the variable, not also alongside
4691 // callers. Normally, all accesses to a thread_local go through the
4692 // thread-wrapper in order to ensure initialization has occurred, underlying
4693 // variable will never be used other than the thread-wrapper, so it can be
4694 // converted to internal linkage.
4695 //
4696 // However, if the variable has the 'constinit' attribute, it _can_ be
4697 // referenced directly, without calling the thread-wrapper, so the linkage
4698 // must not be changed.
4699 //
4700 // Additionally, if the variable isn't plain external linkage, e.g. if it's
4701 // weak or linkonce, the de-duplication semantics are important to preserve,
4702 // so we don't change the linkage.
4703 if (D->getTLSKind() == VarDecl::TLS_Dynamic &&
4704 Linkage == llvm::GlobalValue::ExternalLinkage &&
4705 Context.getTargetInfo().getTriple().isOSDarwin() &&
4706 !D->hasAttr<ConstInitAttr>())
4707 Linkage = llvm::GlobalValue::InternalLinkage;
4708
4709 GV->setLinkage(Linkage);
4710 if (D->hasAttr<DLLImportAttr>())
4711 GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
4712 else if (D->hasAttr<DLLExportAttr>())
4713 GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
4714 else
4715 GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
4716
4717 if (Linkage == llvm::GlobalVariable::CommonLinkage) {
4718 // common vars aren't constant even if declared const.
4719 GV->setConstant(false);
4720 // Tentative definition of global variables may be initialized with
4721 // non-zero null pointers. In this case they should have weak linkage
4722 // since common linkage must have zero initializer and must not have
4723 // explicit section therefore cannot have non-zero initial value.
4724 if (!GV->getInitializer()->isNullValue())
4725 GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
4726 }
4727
4728 setNonAliasAttributes(D, GV);
4729
4730 if (D->getTLSKind() && !GV->isThreadLocal()) {
4731 if (D->getTLSKind() == VarDecl::TLS_Dynamic)
4732 CXXThreadLocals.push_back(D);
4733 setTLSMode(GV, *D);
4734 }
4735
4736 maybeSetTrivialComdat(*D, *GV);
4737
4738 // Emit the initializer function if necessary.
4739 if (NeedsGlobalCtor || NeedsGlobalDtor)
4740 EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
4741
4742 SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor);
4743
4744 // Emit global variable debug information.
4745 if (CGDebugInfo *DI = getModuleDebugInfo())
4746 if (getCodeGenOpts().hasReducedDebugInfo())
4747 DI->EmitGlobalVariable(GV, D);
4748}
4749
4750void CodeGenModule::EmitExternalVarDeclaration(const VarDecl *D) {
4751 if (CGDebugInfo *DI = getModuleDebugInfo())
4752 if (getCodeGenOpts().hasReducedDebugInfo()) {
4753 QualType ASTTy = D->getType();
4754 llvm::Type *Ty = getTypes().ConvertTypeForMem(D->getType());
4755 llvm::Constant *GV =
4756 GetOrCreateLLVMGlobal(D->getName(), Ty, ASTTy.getAddressSpace(), D);
4757 DI->EmitExternalVariable(
4758 cast<llvm::GlobalVariable>(GV->stripPointerCasts()), D);
4759 }
4760}
4761
4762static bool isVarDeclStrongDefinition(const ASTContext &Context,
4763 CodeGenModule &CGM, const VarDecl *D,
4764 bool NoCommon) {
4765 // Don't give variables common linkage if -fno-common was specified unless it
4766 // was overridden by a NoCommon attribute.
4767 if ((NoCommon || D->hasAttr<NoCommonAttr>()) && !D->hasAttr<CommonAttr>())
4768 return true;
4769
4770 // C11 6.9.2/2:
4771 // A declaration of an identifier for an object that has file scope without
4772 // an initializer, and without a storage-class specifier or with the
4773 // storage-class specifier static, constitutes a tentative definition.
4774 if (D->getInit() || D->hasExternalStorage())
4775 return true;
4776
4777 // A variable cannot be both common and exist in a section.
4778 if (D->hasAttr<SectionAttr>())
4779 return true;
4780
4781 // A variable cannot be both common and exist in a section.
4782 // We don't try to determine which is the right section in the front-end.
4783 // If no specialized section name is applicable, it will resort to default.
4784 if (D->hasAttr<PragmaClangBSSSectionAttr>() ||
4785 D->hasAttr<PragmaClangDataSectionAttr>() ||
4786 D->hasAttr<PragmaClangRelroSectionAttr>() ||
4787 D->hasAttr<PragmaClangRodataSectionAttr>())
4788 return true;
4789
4790 // Thread local vars aren't considered common linkage.
4791 if (D->getTLSKind())
4792 return true;
4793
4794 // Tentative definitions marked with WeakImportAttr are true definitions.
4795 if (D->hasAttr<WeakImportAttr>())
4796 return true;
4797
4798 // A variable cannot be both common and exist in a comdat.
4799 if (shouldBeInCOMDAT(CGM, *D))
4800 return true;
4801
4802 // Declarations with a required alignment do not have common linkage in MSVC
4803 // mode.
4804 if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
4805 if (D->hasAttr<AlignedAttr>())
4806 return true;
4807 QualType VarType = D->getType();
4808 if (Context.isAlignmentRequired(VarType))
4809 return true;
4810
4811 if (const auto *RT = VarType->getAs<RecordType>()) {
4812 const RecordDecl *RD = RT->getDecl();
4813 for (const FieldDecl *FD : RD->fields()) {
4814 if (FD->isBitField())
4815 continue;
4816 if (FD->hasAttr<AlignedAttr>())
4817 return true;
4818 if (Context.isAlignmentRequired(FD->getType()))
4819 return true;
4820 }
4821 }
4822 }
4823
4824 // Microsoft's link.exe doesn't support alignments greater than 32 bytes for
4825 // common symbols, so symbols with greater alignment requirements cannot be
4826 // common.
4827 // Other COFF linkers (ld.bfd and LLD) support arbitrary power-of-two
4828 // alignments for common symbols via the aligncomm directive, so this
4829 // restriction only applies to MSVC environments.
4830 if (Context.getTargetInfo().getTriple().isKnownWindowsMSVCEnvironment() &&
4831 Context.getTypeAlignIfKnown(D->getType()) >
4832 Context.toBits(CharUnits::fromQuantity(32)))
4833 return true;
4834
4835 return false;
4836}
4837
4838llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
4839 const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable) {
4840 if (Linkage == GVA_Internal)
4841 return llvm::Function::InternalLinkage;
4842
4843 if (D->hasAttr<WeakAttr>()) {
4844 if (IsConstantVariable)
4845 return llvm::GlobalVariable::WeakODRLinkage;
4846 else
4847 return llvm::GlobalVariable::WeakAnyLinkage;
4848 }
4849
4850 if (const auto *FD = D->getAsFunction())
4851 if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
4852 return llvm::GlobalVariable::LinkOnceAnyLinkage;
4853
4854 // We are guaranteed to have a strong definition somewhere else,
4855 // so we can use available_externally linkage.
4856 if (Linkage == GVA_AvailableExternally)
4857 return llvm::GlobalValue::AvailableExternallyLinkage;
4858
4859 // Note that Apple's kernel linker doesn't support symbol
4860 // coalescing, so we need to avoid linkonce and weak linkages there.
4861 // Normally, this means we just map to internal, but for explicit
4862 // instantiations we'll map to external.
4863
4864 // In C++, the compiler has to emit a definition in every translation unit
4865 // that references the function. We should use linkonce_odr because
4866 // a) if all references in this translation unit are optimized away, we
4867 // don't need to codegen it. b) if the function persists, it needs to be
4868 // merged with other definitions. c) C++ has the ODR, so we know the
4869 // definition is dependable.
4870 if (Linkage == GVA_DiscardableODR)
4871 return !Context.getLangOpts().AppleKext ? llvm::Function::LinkOnceODRLinkage
4872 : llvm::Function::InternalLinkage;
4873
4874 // An explicit instantiation of a template has weak linkage, since
4875 // explicit instantiations can occur in multiple translation units
4876 // and must all be equivalent. However, we are not allowed to
4877 // throw away these explicit instantiations.
4878 //
4879 // CUDA/HIP: For -fno-gpu-rdc case, device code is limited to one TU,
4880 // so say that CUDA templates are either external (for kernels) or internal.
4881 // This lets llvm perform aggressive inter-procedural optimizations. For
4882 // -fgpu-rdc case, device function calls across multiple TU's are allowed,
4883 // therefore we need to follow the normal linkage paradigm.
4884 if (Linkage == GVA_StrongODR) {
4885 if (getLangOpts().AppleKext)
4886 return llvm::Function::ExternalLinkage;
4887 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
4888 !getLangOpts().GPURelocatableDeviceCode)
4889 return D->hasAttr<CUDAGlobalAttr>() ? llvm::Function::ExternalLinkage
4890 : llvm::Function::InternalLinkage;
4891 return llvm::Function::WeakODRLinkage;
4892 }
4893
4894 // C++ doesn't have tentative definitions and thus cannot have common
4895 // linkage.
4896 if (!getLangOpts().CPlusPlus && isa<VarDecl>(D) &&
4897 !isVarDeclStrongDefinition(Context, *this, cast<VarDecl>(D),
4898 CodeGenOpts.NoCommon))
4899 return llvm::GlobalVariable::CommonLinkage;
4900
4901 // selectany symbols are externally visible, so use weak instead of
4902 // linkonce. MSVC optimizes away references to const selectany globals, so
4903 // all definitions should be the same and ODR linkage should be used.
4904 // http://msdn.microsoft.com/en-us/library/5tkz6s71.aspx
4905 if (D->hasAttr<SelectAnyAttr>())
4906 return llvm::GlobalVariable::WeakODRLinkage;
4907
4908 // Otherwise, we have strong external linkage.
4909 assert(Linkage == GVA_StrongExternal)(static_cast <bool> (Linkage == GVA_StrongExternal) ? void
(0) : __assert_fail ("Linkage == GVA_StrongExternal", "clang/lib/CodeGen/CodeGenModule.cpp"
, 4909, __extension__ __PRETTY_FUNCTION__))
;
4910 return llvm::GlobalVariable::ExternalLinkage;
4911}
4912
4913llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageVarDefinition(
4914 const VarDecl *VD, bool IsConstant) {
4915 GVALinkage Linkage = getContext().GetGVALinkageForVariable(VD);
4916 return getLLVMLinkageForDeclarator(VD, Linkage, IsConstant);
4917}
4918
4919/// Replace the uses of a function that was declared with a non-proto type.
4920/// We want to silently drop extra arguments from call sites
4921static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
4922 llvm::Function *newFn) {
4923 // Fast path.
4924 if (old->use_empty()) return;
4925
4926 llvm::Type *newRetTy = newFn->getReturnType();
4927 SmallVector<llvm::Value*, 4> newArgs;
4928
4929 for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
4930 ui != ue; ) {
4931 llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
4932 llvm::User *user = use->getUser();
4933
4934 // Recognize and replace uses of bitcasts. Most calls to
4935 // unprototyped functions will use bitcasts.
4936 if (auto *bitcast = dyn_cast<llvm::ConstantExpr>(user)) {
4937 if (bitcast->getOpcode() == llvm::Instruction::BitCast)
4938 replaceUsesOfNonProtoConstant(bitcast, newFn);
4939 continue;
4940 }
4941
4942 // Recognize calls to the function.
4943 llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
4944 if (!callSite) continue;
4945 if (!callSite->isCallee(&*use))
4946 continue;
4947
4948 // If the return types don't match exactly, then we can't
4949 // transform this call unless it's dead.
4950 if (callSite->getType() != newRetTy && !callSite->use_empty())
4951 continue;
4952
4953 // Get the call site's attribute list.
4954 SmallVector<llvm::AttributeSet, 8> newArgAttrs;
4955 llvm::AttributeList oldAttrs = callSite->getAttributes();
4956
4957 // If the function was passed too few arguments, don't transform.
4958 unsigned newNumArgs = newFn->arg_size();
4959 if (callSite->arg_size() < newNumArgs)
4960 continue;
4961
4962 // If extra arguments were passed, we silently drop them.
4963 // If any of the types mismatch, we don't transform.
4964 unsigned argNo = 0;
4965 bool dontTransform = false;
4966 for (llvm::Argument &A : newFn->args()) {
4967 if (callSite->getArgOperand(argNo)->getType() != A.getType()) {
4968 dontTransform = true;
4969 break;
4970 }
4971
4972 // Add any parameter attributes.
4973 newArgAttrs.push_back(oldAttrs.getParamAttrs(argNo));
4974 argNo++;
4975 }
4976 if (dontTransform)
4977 continue;
4978
4979 // Okay, we can transform this. Create the new call instruction and copy
4980 // over the required information.
4981 newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
4982
4983 // Copy over any operand bundles.
4984 SmallVector<llvm::OperandBundleDef, 1> newBundles;
4985 callSite->getOperandBundlesAsDefs(newBundles);
4986
4987 llvm::CallBase *newCall;
4988 if (isa<llvm::CallInst>(callSite)) {
4989 newCall =
4990 llvm::CallInst::Create(newFn, newArgs, newBundles, "", callSite);
4991 } else {
4992 auto *oldInvoke = cast<llvm::InvokeInst>(callSite);
4993 newCall = llvm::InvokeInst::Create(newFn, oldInvoke->getNormalDest(),
4994 oldInvoke->getUnwindDest(), newArgs,
4995 newBundles, "", callSite);
4996 }
4997 newArgs.clear(); // for the next iteration
4998
4999 if (!newCall->getType()->isVoidTy())
5000 newCall->takeName(callSite);
5001 newCall->setAttributes(
5002 llvm::AttributeList::get(newFn->getContext(), oldAttrs.getFnAttrs(),
5003 oldAttrs.getRetAttrs(), newArgAttrs));
5004 newCall->setCallingConv(callSite->getCallingConv());
5005
5006 // Finally, remove the old call, replacing any uses with the new one.
5007 if (!callSite->use_empty())
5008 callSite->replaceAllUsesWith(newCall);
5009
5010 // Copy debug location attached to CI.
5011 if (callSite->getDebugLoc())
5012 newCall->setDebugLoc(callSite->getDebugLoc());
5013
5014 callSite->eraseFromParent();
5015 }
5016}
5017
5018/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
5019/// implement a function with no prototype, e.g. "int foo() {}". If there are
5020/// existing call uses of the old function in the module, this adjusts them to
5021/// call the new function directly.
5022///
5023/// This is not just a cleanup: the always_inline pass requires direct calls to
5024/// functions to be able to inline them. If there is a bitcast in the way, it
5025/// won't inline them. Instcombine normally deletes these calls, but it isn't
5026/// run at -O0.
5027static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
5028 llvm::Function *NewFn) {
5029 // If we're redefining a global as a function, don't transform it.
5030 if (!isa<llvm::Function>(Old)) return;
5031
5032 replaceUsesOfNonProtoConstant(Old, NewFn);
5033}
5034
5035void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
5036 auto DK = VD->isThisDeclarationADefinition();
5037 if (DK == VarDecl::Definition && VD->hasAttr<DLLImportAttr>())
5038 return;
5039
5040 TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
5041 // If we have a definition, this might be a deferred decl. If the
5042 // instantiation is explicit, make sure we emit it at the end.
5043 if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
5044 GetAddrOfGlobalVar(VD);
5045
5046 EmitTopLevelDecl(VD);
5047}
5048
5049void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
5050 llvm::GlobalValue *GV) {
5051 const auto *D = cast<FunctionDecl>(GD.getDecl());
5052
5053 // Compute the function info and LLVM type.
5054 const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
5055 llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
5056
5057 // Get or create the prototype for the function.
5058 if (!GV || (GV->getValueType() != Ty))
5059 GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
5060 /*DontDefer=*/true,
5061 ForDefinition));
5062
5063 // Already emitted.
5064 if (!GV->isDeclaration())
5065 return;
5066
5067 // We need to set linkage and visibility on the function before
5068 // generating code for it because various parts of IR generation
5069 // want to propagate this information down (e.g. to local static
5070 // declarations).
5071 auto *Fn = cast<llvm::Function>(GV);
5072 setFunctionLinkage(GD, Fn);
5073
5074 // FIXME: this is redundant with part of setFunctionDefinitionAttributes
5075 setGVProperties(Fn, GD);
5076
5077 MaybeHandleStaticInExternC(D, Fn);
5078
5079 maybeSetTrivialComdat(*D, *Fn);
5080
5081 // Set CodeGen attributes that represent floating point environment.
5082 setLLVMFunctionFEnvAttributes(D, Fn);
5083
5084 CodeGenFunction(*this).GenerateCode(GD, Fn, FI);
5085
5086 setNonAliasAttributes(GD, Fn);
5087 SetLLVMFunctionAttributesForDefinition(D, Fn);
5088
5089 if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
5090 AddGlobalCtor(Fn, CA->getPriority());
5091 if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
5092 AddGlobalDtor(Fn, DA->getPriority(), true);
5093 if (D->hasAttr<AnnotateAttr>())
5094 AddGlobalAnnotations(D, Fn);
5095}
5096
5097void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
5098 const auto *D = cast<ValueDecl>(GD.getDecl());
5099 const AliasAttr *AA = D->getAttr<AliasAttr>();
5100 assert(AA && "Not an alias?")(static_cast <bool> (AA && "Not an alias?") ? void
(0) : __assert_fail ("AA && \"Not an alias?\"", "clang/lib/CodeGen/CodeGenModule.cpp"
, 5100, __extension__ __PRETTY_FUNCTION__))
;
5101
5102 StringRef MangledName = getMangledName(GD);
5103
5104 if (AA->getAliasee() == MangledName) {
5105 Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
5106 return;
5107 }
5108
5109 // If there is a definition in the module, then it wins over the alias.
5110 // This is dubious, but allow it to be safe. Just ignore the alias.
5111 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
5112 if (Entry && !Entry->isDeclaration())
5113 return;
5114
5115 Aliases.push_back(GD);
5116
5117 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
5118
5119 // Create a reference to the named value. This ensures that it is emitted
5120 // if a deferred decl.
5121 llvm::Constant *Aliasee;
5122 llvm::GlobalValue::LinkageTypes LT;
5123 if (isa<llvm::FunctionType>(DeclTy)) {
5124 Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GD,
5125 /*ForVTable=*/false);
5126 LT = getFunctionLinkage(GD);
5127 } else {
5128 Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, LangAS::Default,
5129 /*D=*/nullptr);
5130 if (const auto *VD = dyn_cast<VarDecl>(GD.getDecl()))
5131 LT = getLLVMLinkageVarDefinition(VD, D->getType().isConstQualified());
5132 else
5133 LT = getFunctionLinkage(GD);
5134 }
5135
5136 // Create the new alias itself, but don't set a name yet.
5137 unsigned AS = Aliasee->getType()->getPointerAddressSpace();
5138 auto *GA =
5139 llvm::GlobalAlias::create(DeclTy, AS, LT, "", Aliasee, &getModule());
5140
5141 if (Entry) {
5142 if (GA->getAliasee() == Entry) {
5143 Diags.Report(AA->getLocation(), diag::err_cyclic_alias) << 0;
5144 return;
5145 }
5146
5147 assert(Entry->isDeclaration())(static_cast <bool> (Entry->isDeclaration()) ? void (
0) : __assert_fail ("Entry->isDeclaration()", "clang/lib/CodeGen/CodeGenModule.cpp"
, 5147, __extension__ __PRETTY_FUNCTION__))
;
5148
5149 // If there is a declaration in the module, then we had an extern followed
5150 // by the alias, as in:
5151 // extern int test6();
5152 // ...
5153 // int test6() __attribute__((alias("test7")));
5154 //
5155 // Remove it and replace uses of it with the alias.
5156 GA->takeName(Entry);
5157
5158 Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
5159 Entry->getType()));
5160 Entry->eraseFromParent();
5161 } else {
5162 GA->setName(MangledName);
5163 }
5164
5165 // Set attributes which are particular to an alias; this is a
5166 // specialization of the attributes which may be set on a global
5167 // variable/function.
5168 if (D->hasAttr<WeakAttr>() || D->hasAttr<WeakRefAttr>() ||
5169 D->isWeakImported()) {
5170 GA->setLinkage(llvm::Function::WeakAnyLinkage);
5171 }
5172
5173 if (const auto *VD = dyn_cast<VarDecl>(D))
5174 if (VD->getTLSKind())
5175 setTLSMode(GA, *VD);
5176
5177 SetCommonAttributes(GD, GA);
5178}
5179
5180void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
5181 const auto *D = cast<ValueDecl>(GD.getDecl());
5182 const IFuncAttr *IFA = D->getAttr<IFuncAttr>();
5183 assert(IFA && "Not an ifunc?")(static_cast <bool> (IFA && "Not an ifunc?") ? void
(0) : __assert_fail ("IFA && \"Not an ifunc?\"", "clang/lib/CodeGen/CodeGenModule.cpp"
, 5183, __extension__ __PRETTY_FUNCTION__))
;
5184
5185 StringRef MangledName = getMangledName(GD);
5186
5187 if (IFA->getResolver() == MangledName) {
5188 Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
5189 return;
5190 }
5191
5192 // Report an error if some definition overrides ifunc.
5193 llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
5194 if (Entry && !Entry->isDeclaration()) {
5195 GlobalDecl OtherGD;
5196 if (lookupRepresentativeDecl(MangledName, OtherGD) &&
5197 DiagnosedConflictingDefinitions.insert(GD).second) {
5198 Diags.Report(D->getLocation(), diag::err_duplicate_mangled_name)
5199 << MangledName;
5200 Diags.Report(OtherGD.getDecl()->getLocation(),
5201 diag::note_previous_definition);
5202 }
5203 return;
5204 }
5205
5206 Aliases.push_back(GD);
5207
5208 llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
5209 llvm::Type *ResolverTy = llvm::GlobalIFunc::getResolverFunctionType(DeclTy);
5210 llvm::Constant *Resolver =
5211 GetOrCreateLLVMFunction(IFA->getResolver(), ResolverTy, {},
5212 /*ForVTable=*/false);
5213 llvm::GlobalIFunc *GIF =
5214 llvm::GlobalIFunc::create(DeclTy, 0, llvm::Function::ExternalLinkage,
5215 "", Resolver, &getModule());
5216 if (Entry) {
5217 if (GIF->getResolver() == Entry) {
5218 Diags.Report(IFA->getLocation(), diag::err_cyclic_alias) << 1;
5219 return;
5220 }
5221 assert(Entry->isDeclaration())(static_cast <bool> (Entry->isDeclaration()) ? void (
0) : __assert_fail ("Entry->isDeclaration()", "clang/lib/CodeGen/CodeGenModule.cpp"
, 5221, __extension__ __PRETTY_FUNCTION__))
;
5222
5223 // If there is a declaration in the module, then we had an extern followed
5224 // by the ifunc, as in:
5225 // extern int test();
5226 // ...
5227 // int test() __attribute__((ifunc("resolver")));
5228 //
5229 // Remove it and replace uses of it with the ifunc.
5230 GIF->takeName(Entry);
5231
5232 Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GIF,
5233 Entry->getType()));
5234 Entry->eraseFromParent();
5235 } else
5236 GIF->setName(MangledName);
5237
5238 SetCommonAttributes(GD, GIF);
5239}
5240
5241llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
5242 ArrayRef<llvm::Type*> Tys) {
5243 return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
5244 Tys);
5245}
5246
5247static llvm::StringMapEntry<llvm::GlobalVariable *> &
5248GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
5249 const StringLiteral *Literal, bool TargetIsLSB,
5250 bool &IsUTF16, unsigned &StringLength) {
5251 StringRef String = Literal->getString();
5252 unsigned NumBytes = String.size();
5253
5254 // Check for simple case.
5255 if (!Literal->containsNonAsciiOrNull()) {
5256 StringLength = NumBytes;
5257 return *Map.insert(std::make_pair(String, nullptr)).first;
5258 }
5259
5260 // Otherwise, convert the UTF8 literals into a string of shorts.
5261 IsUTF16 = true;
5262
5263 SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
5264 const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
5265 llvm::UTF16 *ToPtr = &ToBuf[0];
5266
5267 (void)llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
5268 ToPtr + NumBytes, llvm::strictConversion);
5269
5270 // ConvertUTF8toUTF16 returns the length in ToPtr.
5271 StringLength = ToPtr - &ToBuf[0];
5272
5273 // Add an explicit null.
5274 *ToPtr = 0;
5275 return *Map.insert(std::make_pair(
5276 StringRef(reinterpret_cast<const char *>(ToBuf.data()),
5277 (StringLength + 1) * 2),
5278 nullptr)).first;
5279}
5280
5281ConstantAddress
5282CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
5283 unsigned StringLength = 0;
5284 bool isUTF16 = false;
5285 llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
5286 GetConstantCFStringEntry(CFConstantStringMap, Literal,
5287 getDataLayout().isLittleEndian(), isUTF16,
5288 StringLength);
5289
5290 if (auto *C = Entry.second)
5291 return ConstantAddress(
5292 C, C->getValueType(), CharUnits::fromQuantity(C->getAlignment()));
5293
5294 llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
5295 llvm::Constant *Zeros[] = { Zero, Zero };
5296
5297 const ASTContext &Context = getContext();
5298 const llvm::Triple &Triple = getTriple();
5299
5300 const auto CFRuntime = getLangOpts().CFRuntime;
5301 const bool IsSwiftABI =
5302 static_cast<unsigned>(CFRuntime) >=
5303 static_cast<unsigned>(LangOptions::CoreFoundationABI::Swift);
5304 const bool IsSwift4_1 = CFRuntime == LangOptions::CoreFoundationABI::Swift4_1;
5305
5306 // If we don't already have it, get __CFConstantStringClassReference.
5307 if (!CFConstantStringClassRef) {
5308 const char *CFConstantStringClassName = "__CFConstantStringClassReference";
5309 llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
5310 Ty = llvm::ArrayType::get(Ty, 0);
5311
5312 switch (CFRuntime) {
5313 default: break;
5314 case LangOptions::CoreFoundationABI::Swift: LLVM_FALLTHROUGH[[gnu::fallthrough]];
5315 case LangOptions::CoreFoundationABI::Swift5_0:
5316 CFConstantStringClassName =
5317 Triple.isOSDarwin() ? "$s15SwiftFoundation19_NSCFConstantStringCN"
5318 : "$s10Foundation19_NSCFConstantStringCN";
5319 Ty = IntPtrTy;
5320 break;
5321 case LangOptions::CoreFoundationABI::Swift4_2:
5322 CFConstantStringClassName =
5323 Triple.isOSDarwin() ? "$S15SwiftFoundation19_NSCFConstantStringCN"
5324 : "$S10Foundation19_NSCFConstantStringCN";
5325 Ty = IntPtrTy;
5326 break;
5327 case LangOptions::CoreFoundationABI::Swift4_1:
5328 CFConstantStringClassName =
5329 Triple.isOSDarwin() ? "__T015SwiftFoundation19_NSCFConstantStringCN"
5330 : "__T010Foundation19_NSCFConstantStringCN";
5331 Ty = IntPtrTy;
5332 break;
5333 }
5334
5335 llvm::Constant *C = CreateRuntimeVariable(Ty, CFConstantStringClassName);
5336
5337 if (Triple.isOSBinFormatELF() || Triple.isOSBinFormatCOFF()) {
5338 llvm::GlobalValue *GV = nullptr;
5339
5340 if ((GV = dyn_cast<llvm::GlobalValue>(C))) {
5341 IdentifierInfo &II = Context.Idents.get(GV->getName());
5342 TranslationUnitDecl *TUDecl = Context.getTranslationUnitDecl();
5343 DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
5344
5345 const VarDecl *VD = nullptr;
5346 for (const auto *Result : DC->lookup(&II))
5347 if ((VD = dyn_cast<VarDecl>(Result)))
5348 break;
5349
5350 if (Triple.isOSBinFormatELF()) {
5351 if (!VD)
5352 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
5353 } else {
5354 GV->setLinkage(llvm::GlobalValue::ExternalLinkage);
5355 if (!VD || !VD->hasAttr<DLLExportAttr>())
5356 GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
5357 else
5358 GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
5359 }
5360
5361 setDSOLocal(GV);
5362 }
5363 }
5364
5365 // Decay array -> ptr
5366 CFConstantStringClassRef =
5367 IsSwiftABI ? llvm::ConstantExpr::getPtrToInt(C, Ty)
5368 : llvm::ConstantExpr::getGetElementPtr(Ty, C, Zeros);
5369 }
5370
5371 QualType CFTy = Context.getCFConstantStringType();
5372
5373 auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
5374
5375 ConstantInitBuilder Builder(*this);
5376 auto Fields = Builder.beginStruct(STy);
5377
5378 // Class pointer.
5379 Fields.add(cast<llvm::ConstantExpr>(CFConstantStringClassRef));
5380
5381 // Flags.
5382 if (IsSwiftABI) {
5383 Fields.addInt(IntPtrTy, IsSwift4_1 ? 0x05 : 0x01);
5384 Fields.addInt(Int64Ty, isUTF16 ? 0x07d0 : 0x07c8);
5385 } else {
5386 Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
5387 }
5388
5389 // String pointer.
5390 llvm::Constant *C = nullptr;
5391 if (isUTF16) {
5392 auto Arr = llvm::makeArrayRef(
5393 reinterpret_cast<uint16_t *>(const_cast<char *>(Entry.first().data())),
5394 Entry.first().size() / 2);
5395 C = llvm::ConstantDataArray::get(VMContext, Arr);
5396 } else {
5397 C = llvm::ConstantDataArray::getString(VMContext, Entry.first());
5398 }
5399
5400 // Note: -fwritable-strings doesn't make the backing store strings of
5401 // CFStrings writable. (See <rdar://problem/10657500>)
5402 auto *GV =
5403 new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
5404 llvm::GlobalValue::PrivateLinkage, C, ".str");
5405 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
5406 // Don't enforce the target's minimum global alignment, since the only use
5407 // of the string is via this class initializer.
5408 CharUnits Align = isUTF16 ? Context.getTypeAlignInChars(Context.ShortTy)
5409 : Context.getTypeAlignInChars(Context.CharTy);
5410 GV->setAlignment(Align.getAsAlign());
5411
5412 // FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
5413 // Without it LLVM can merge the string with a non unnamed_addr one during
5414 // LTO. Doing that changes the section it ends in, which surprises ld64.
5415 if (Triple.isOSBinFormatMachO())
5416 GV->setSection(isUTF16 ? "__TEXT,__ustring"
5417 : "__TEXT,__cstring,cstring_literals");
5418 // Make sure the literal ends up in .rodata to allow for safe ICF and for
5419 // the static linker to adjust permissions to read-only later on.
5420 else if (Triple.isOSBinFormatELF())
5421 GV->setSection(".rodata");
5422
5423 // String.
5424 llvm::Constant *Str =
5425 llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
5426
5427 if (isUTF16)
5428 // Cast the UTF16 string to the correct type.
5429 Str = llvm::ConstantExpr::getBitCast(Str, Int8PtrTy);
5430 Fields.add(Str);
5431
5432 // String length.
5433 llvm::IntegerType *LengthTy =
5434 llvm::IntegerType::get(getModule().getContext(),
5435 Context.getTargetInfo().getLongWidth());
5436 if (IsSwiftABI) {
5437 if (CFRuntime == LangOptions::CoreFoundationABI::Swift4_1 ||
5438 CFRuntime == LangOptions::CoreFoundationABI::Swift4_2)
5439 LengthTy = Int32Ty;
5440 else
5441 LengthTy = IntPtrTy;
5442 }
5443 Fields.addInt(LengthTy, StringLength);
5444
5445 // Swift ABI requires 8-byte alignment to ensure that the _Atomic(uint64_t) is
5446 // properly aligned on 32-bit platforms.
5447 CharUnits Alignment =
5448 IsSwiftABI ? Context.toCharUnitsFromBits(64) : getPointerAlign();
5449
5450 // The struct.
5451 GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
5452 /*isConstant=*/false,
5453 llvm::GlobalVariable::PrivateLinkage);
5454 GV->addAttribute("objc_arc_inert");
5455 switch (Triple.getObjectFormat()) {
5456 case llvm::Triple::UnknownObjectFormat:
5457 llvm_unreachable("unknown file format")::llvm::llvm_unreachable_internal("unknown file format", "clang/lib/CodeGen/CodeGenModule.cpp"
, 5457)
;
5458 case llvm::Triple::GOFF:
5459 llvm_unreachable("GOFF is not yet implemented")::llvm::llvm_unreachable_internal("GOFF is not yet implemented"
, "clang/lib/CodeGen/CodeGenModule.cpp", 5459)
;
5460 case llvm::Triple::XCOFF:
5461 llvm_unreachable("XCOFF is not yet implemented")::llvm::llvm_unreachable_internal("XCOFF is not yet implemented"
, "clang/lib/CodeGen/CodeGenModule.cpp", 5461)
;
5462 case llvm::Triple::COFF:
5463 case llvm::Triple::ELF:
5464 case llvm::Triple::Wasm:
5465 GV->setSection("cfstring");
5466 break;
5467 case llvm::Triple::MachO:
5468 GV->setSection("__DATA,__cfstring");
5469 break;
5470 }
5471 Entry.second = GV;
5472
5473 return ConstantAddress(GV, GV->getValueType(), Alignment);
5474}
5475
5476bool CodeGenModule::getExpressionLocationsEnabled() const {
5477 return !CodeGenOpts.EmitCodeView || CodeGenOpts.DebugColumnInfo;
5478}
5479
5480QualType CodeGenModule::getObjCFastEnumerationStateType() {
5481 if (ObjCFastEnumerationStateType.isNull()) {
5482 RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState");
5483 D->startDefinition();
5484
5485 QualType FieldTypes[] = {
5486 Context.UnsignedLongTy,
5487 Context.getPointerType(Context.getObjCIdType()),
5488 Context.getPointerType(Context.UnsignedLongTy),
5489 Context.getConstantArrayType(Context.UnsignedLongTy,
5490 llvm::APInt(32, 5), nullptr, ArrayType::Normal, 0)
5491 };
5492
5493 for (size_t i = 0; i < 4; ++i) {
5494 FieldDecl *Field = FieldDecl::Create(Context,
5495 D,
5496 SourceLocation(),
5497 SourceLocation(), nullptr,
5498 FieldTypes[i], /*TInfo=*/nullptr,
5499 /*BitWidth=*/nullptr,
5500 /*Mutable=*/false,
5501 ICIS_NoInit);
5502 Field->setAccess(AS_public);
5503 D->addDecl(Field);
5504 }
5505
5506 D->completeDefinition();
5507 ObjCFastEnumerationStateType = Context.getTagDeclType(D);
5508 }
5509
5510 return ObjCFastEnumerationStateType;
5511}
5512
5513llvm::Constant *
5514CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
5515 assert(!E->getType()->isPointerType() && "Strings are always arrays")(static_cast <bool> (!E->getType()->isPointerType
() && "Strings are always arrays") ? void (0) : __assert_fail
("!E->getType()->isPointerType() && \"Strings are always arrays\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 5515, __extension__ __PRETTY_FUNCTION__
))
;
5516
5517 // Don't emit it as the address of the string, emit the string data itself
5518 // as an inline array.
5519 if (E->getCharByteWidth() == 1) {
5520 SmallString<64> Str(E->getString());
5521
5522 // Resize the string to the right size, which is indicated by its type.
5523 const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
5524 Str.resize(CAT->getSize().getZExtValue());
5525 return llvm::ConstantDataArray::getString(VMContext, Str, false);
5526 }
5527
5528 auto *AType = cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
5529 llvm::Type *ElemTy = AType->getElementType();
5530 unsigned NumElements = AType->getNumElements();
5531
5532 // Wide strings have either 2-byte or 4-byte elements.
5533 if (ElemTy->getPrimitiveSizeInBits() == 16) {
5534 SmallVector<uint16_t, 32> Elements;
5535 Elements.reserve(NumElements);
5536
5537 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
5538 Elements.push_back(E->getCodeUnit(i));
5539 Elements.resize(NumElements);
5540 return llvm::ConstantDataArray::get(VMContext, Elements);
5541 }
5542
5543 assert(ElemTy->getPrimitiveSizeInBits() == 32)(static_cast <bool> (ElemTy->getPrimitiveSizeInBits(
) == 32) ? void (0) : __assert_fail ("ElemTy->getPrimitiveSizeInBits() == 32"
, "clang/lib/CodeGen/CodeGenModule.cpp", 5543, __extension__ __PRETTY_FUNCTION__
))
;
5544 SmallVector<uint32_t, 32> Elements;
5545 Elements.reserve(NumElements);
5546
5547 for(unsigned i = 0, e = E->getLength(); i != e; ++i)
5548 Elements.push_back(E->getCodeUnit(i));
5549 Elements.resize(NumElements);
5550 return llvm::ConstantDataArray::get(VMContext, Elements);
5551}
5552
5553static llvm::GlobalVariable *
5554GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
5555 CodeGenModule &CGM, StringRef GlobalName,
5556 CharUnits Alignment) {
5557 unsigned AddrSpace = CGM.getContext().getTargetAddressSpace(
5558 CGM.GetGlobalConstantAddressSpace());
5559
5560 llvm::Module &M = CGM.getModule();
5561 // Create a global variable for this string
5562 auto *GV = new llvm::GlobalVariable(
5563 M, C->getType(), !CGM.getLangOpts().WritableStrings, LT, C, GlobalName,
5564 nullptr, llvm::GlobalVariable::NotThreadLocal, AddrSpace);
5565 GV->setAlignment(Alignment.getAsAlign());
5566 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
5567 if (GV->isWeakForLinker()) {
5568 assert(CGM.supportsCOMDAT() && "Only COFF uses weak string literals")(static_cast <bool> (CGM.supportsCOMDAT() && "Only COFF uses weak string literals"
) ? void (0) : __assert_fail ("CGM.supportsCOMDAT() && \"Only COFF uses weak string literals\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 5568, __extension__ __PRETTY_FUNCTION__
))
;
5569 GV->setComdat(M.getOrInsertComdat(GV->getName()));
5570 }
5571 CGM.setDSOLocal(GV);
5572
5573 return GV;
5574}
5575
5576/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
5577/// constant array for the given string literal.
5578ConstantAddress
5579CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
5580 StringRef Name) {
5581 CharUnits Alignment = getContext().getAlignOfGlobalVarInChars(S->getType());
5582
5583 llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
5584 llvm::GlobalVariable **Entry = nullptr;
5585 if (!LangOpts.WritableStrings) {
5586 Entry = &ConstantStringMap[C];
5587 if (auto GV = *Entry) {
5588 if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
5589 GV->setAlignment(Alignment.getAsAlign());
5590 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
5591 GV->getValueType(), Alignment);
5592 }
5593 }
5594
5595 SmallString<256> MangledNameBuffer;
5596 StringRef GlobalVariableName;
5597 llvm::GlobalValue::LinkageTypes LT;
5598
5599 // Mangle the string literal if that's how the ABI merges duplicate strings.
5600 // Don't do it if they are writable, since we don't want writes in one TU to
5601 // affect strings in another.
5602 if (getCXXABI().getMangleContext().shouldMangleStringLiteral(S) &&
5603 !LangOpts.WritableStrings) {
5604 llvm::raw_svector_ostream Out(MangledNameBuffer);
5605 getCXXABI().getMangleContext().mangleStringLiteral(S, Out);
5606 LT = llvm::GlobalValue::LinkOnceODRLinkage;
5607 GlobalVariableName = MangledNameBuffer;
5608 } else {
5609 LT = llvm::GlobalValue::PrivateLinkage;
5610 GlobalVariableName = Name;
5611 }
5612
5613 auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
5614 if (Entry)
5615 *Entry = GV;
5616
5617 SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
5618 QualType());
5619
5620 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
5621 GV->getValueType(), Alignment);
5622}
5623
5624/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
5625/// array for the given ObjCEncodeExpr node.
5626ConstantAddress
5627CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
5628 std::string Str;
5629 getContext().getObjCEncodingForType(E->getEncodedType(), Str);
5630
5631 return GetAddrOfConstantCString(Str);
5632}
5633
5634/// GetAddrOfConstantCString - Returns a pointer to a character array containing
5635/// the literal and a terminating '\0' character.
5636/// The result has pointer to array type.
5637ConstantAddress CodeGenModule::GetAddrOfConstantCString(
5638 const std::string &Str, const char *GlobalName) {
5639 StringRef StrWithNull(Str.c_str(), Str.size() + 1);
5640 CharUnits Alignment =
5641 getContext().getAlignOfGlobalVarInChars(getContext().CharTy);
5642
5643 llvm::Constant *C =
5644 llvm::ConstantDataArray::getString(getLLVMContext(), StrWithNull, false);
5645
5646 // Don't share any string literals if strings aren't constant.
5647 llvm::GlobalVariable **Entry = nullptr;
5648 if (!LangOpts.WritableStrings) {
5649 Entry = &ConstantStringMap[C];
5650 if (auto GV = *Entry) {
5651 if (uint64_t(Alignment.getQuantity()) > GV->getAlignment())
5652 GV->setAlignment(Alignment.getAsAlign());
5653 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
5654 GV->getValueType(), Alignment);
5655 }
5656 }
5657
5658 // Get the default prefix if a name wasn't specified.
5659 if (!GlobalName)
5660 GlobalName = ".str";
5661 // Create a global variable for this.
5662 auto GV = GenerateStringLiteral(C, llvm::GlobalValue::PrivateLinkage, *this,
5663 GlobalName, Alignment);
5664 if (Entry)
5665 *Entry = GV;
5666
5667 return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
5668 GV->getValueType(), Alignment);
5669}
5670
5671ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
5672 const MaterializeTemporaryExpr *E, const Expr *Init) {
5673 assert((E->getStorageDuration() == SD_Static ||(static_cast <bool> ((E->getStorageDuration() == SD_Static
|| E->getStorageDuration() == SD_Thread) && "not a global temporary"
) ? void (0) : __assert_fail ("(E->getStorageDuration() == SD_Static || E->getStorageDuration() == SD_Thread) && \"not a global temporary\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 5674, __extension__ __PRETTY_FUNCTION__
))
5674 E->getStorageDuration() == SD_Thread) && "not a global temporary")(static_cast <bool> ((E->getStorageDuration() == SD_Static
|| E->getStorageDuration() == SD_Thread) && "not a global temporary"
) ? void (0) : __assert_fail ("(E->getStorageDuration() == SD_Static || E->getStorageDuration() == SD_Thread) && \"not a global temporary\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 5674, __extension__ __PRETTY_FUNCTION__
))
;
5675 const auto *VD = cast<VarDecl>(E->getExtendingDecl());
5676
5677 // If we're not materializing a subobject of the temporary, keep the
5678 // cv-qualifiers from the type of the MaterializeTemporaryExpr.
5679 QualType MaterializedType = Init->getType();
5680 if (Init == E->getSubExpr())
5681 MaterializedType = E->getType();
5682
5683 CharUnits Align = getContext().getTypeAlignInChars(MaterializedType);
5684
5685 auto InsertResult = MaterializedGlobalTemporaryMap.insert({E, nullptr});
5686 if (!InsertResult.second) {
5687 // We've seen this before: either we already created it or we're in the
5688 // process of doing so.
5689 if (!InsertResult.first->second) {
5690 // We recursively re-entered this function, probably during emission of
5691 // the initializer. Create a placeholder. We'll clean this up in the
5692 // outer call, at the end of this function.
5693 llvm::Type *Type = getTypes().ConvertTypeForMem(MaterializedType);
5694 InsertResult.first->second = new llvm::GlobalVariable(
5695 getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
5696 nullptr);
5697 }
5698 return ConstantAddress(InsertResult.first->second,
5699 llvm::cast<llvm::GlobalVariable>(
5700 InsertResult.first->second->stripPointerCasts())
5701 ->getValueType(),
5702 Align);
5703 }
5704
5705 // FIXME: If an externally-visible declaration extends multiple temporaries,
5706 // we need to give each temporary the same name in every translation unit (and
5707 // we also need to make the temporaries externally-visible).
5708 SmallString<256> Name;
5709 llvm::raw_svector_ostream Out(Name);
5710 getCXXABI().getMangleContext().mangleReferenceTemporary(
5711 VD, E->getManglingNumber(), Out);
5712
5713 APValue *Value = nullptr;
5714 if (E->getStorageDuration() == SD_Static && VD && VD->evaluateValue()) {
5715 // If the initializer of the extending declaration is a constant
5716 // initializer, we should have a cached constant initializer for this
5717 // temporary. Note that this might have a different value from the value
5718 // computed by evaluating the initializer if the surrounding constant
5719 // expression modifies the temporary.
5720 Value = E->getOrCreateValue(false);
5721 }
5722
5723 // Try evaluating it now, it might have a constant initializer.
5724 Expr::EvalResult EvalResult;
5725 if (!Value && Init->EvaluateAsRValue(EvalResult, getContext()) &&
5726 !EvalResult.hasSideEffects())
5727 Value = &EvalResult.Val;
5728
5729 LangAS AddrSpace =
5730 VD ? GetGlobalVarAddressSpace(VD) : MaterializedType.getAddressSpace();
5731
5732 Optional<ConstantEmitter> emitter;
5733 llvm::Constant *InitialValue = nullptr;
5734 bool Constant = false;
5735 llvm::Type *Type;
5736 if (Value) {
5737 // The temporary has a constant initializer, use it.
5738 emitter.emplace(*this);
5739 InitialValue = emitter->emitForInitializer(*Value, AddrSpace,
5740 MaterializedType);
5741 Constant = isTypeConstant(MaterializedType, /*ExcludeCtor*/Value);
5742 Type = InitialValue->getType();
5743 } else {
5744 // No initializer, the initialization will be provided when we
5745 // initialize the declaration which performed lifetime extension.
5746 Type = getTypes().ConvertTypeForMem(MaterializedType);
5747 }
5748
5749 // Create a global variable for this lifetime-extended temporary.
5750 llvm::GlobalValue::LinkageTypes Linkage =
5751 getLLVMLinkageVarDefinition(VD, Constant);
5752 if (Linkage == llvm::GlobalVariable::ExternalLinkage) {
5753 const VarDecl *InitVD;
5754 if (VD->isStaticDataMember() && VD->getAnyInitializer(InitVD) &&
5755 isa<CXXRecordDecl>(InitVD->getLexicalDeclContext())) {
5756 // Temporaries defined inside a class get linkonce_odr linkage because the
5757 // class can be defined in multiple translation units.
5758 Linkage = llvm::GlobalVariable::LinkOnceODRLinkage;
5759 } else {
5760 // There is no need for this temporary to have external linkage if the
5761 // VarDecl has external linkage.
5762 Linkage = llvm::GlobalVariable::InternalLinkage;
5763 }
5764 }
5765 auto TargetAS = getContext().getTargetAddressSpace(AddrSpace);
5766 auto *GV = new llvm::GlobalVariable(
5767 getModule(), Type, Constant, Linkage, InitialValue, Name.c_str(),
5768 /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
5769 if (emitter) emitter->finalize(GV);
5770 setGVProperties(GV, VD);
5771 if (GV->getDLLStorageClass() == llvm::GlobalVariable::DLLExportStorageClass)
5772 // The reference temporary should never be dllexport.
5773 GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
5774 GV->setAlignment(Align.getAsAlign());
5775 if (supportsCOMDAT() && GV->isWeakForLinker())
5776 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
5777 if (VD->getTLSKind())
5778 setTLSMode(GV, *VD);
5779 llvm::Constant *CV = GV;
5780 if (AddrSpace != LangAS::Default)
5781 CV = getTargetCodeGenInfo().performAddrSpaceCast(
5782 *this, GV, AddrSpace, LangAS::Default,
5783 Type->getPointerTo(
5784 getContext().getTargetAddressSpace(LangAS::Default)));
5785
5786 // Update the map with the new temporary. If we created a placeholder above,
5787 // replace it with the new global now.
5788 llvm::Constant *&Entry = MaterializedGlobalTemporaryMap[E];
5789 if (Entry) {
5790 Entry->replaceAllUsesWith(
5791 llvm::ConstantExpr::getBitCast(CV, Entry->getType()));
5792 llvm::cast<llvm::GlobalVariable>(Entry)->eraseFromParent();
5793 }
5794 Entry = CV;
5795
5796 return ConstantAddress(CV, Type, Align);
5797}
5798
5799/// EmitObjCPropertyImplementations - Emit information for synthesized
5800/// properties for an implementation.
5801void CodeGenModule::EmitObjCPropertyImplementations(const
5802 ObjCImplementationDecl *D) {
5803 for (const auto *PID : D->property_impls()) {
5804 // Dynamic is just for type-checking.
5805 if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
5806 ObjCPropertyDecl *PD = PID->getPropertyDecl();
5807
5808 // Determine which methods need to be implemented, some may have
5809 // been overridden. Note that ::isPropertyAccessor is not the method
5810 // we want, that just indicates if the decl came from a
5811 // property. What we want to know is if the method is defined in
5812 // this implementation.
5813 auto *Getter = PID->getGetterMethodDecl();
5814 if (!Getter || Getter->isSynthesizedAccessorStub())
5815 CodeGenFunction(*this).GenerateObjCGetter(
5816 const_cast<ObjCImplementationDecl *>(D), PID);
5817 auto *Setter = PID->getSetterMethodDecl();
5818 if (!PD->isReadOnly() && (!Setter || Setter->isSynthesizedAccessorStub()))
5819 CodeGenFunction(*this).GenerateObjCSetter(
5820 const_cast<ObjCImplementationDecl *>(D), PID);
5821 }
5822 }
5823}
5824
5825static bool needsDestructMethod(ObjCImplementationDecl *impl) {
5826 const ObjCInterfaceDecl *iface = impl->getClassInterface();
5827 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
5828 ivar; ivar = ivar->getNextIvar())
5829 if (ivar->getType().isDestructedType())
5830 return true;
5831
5832 return false;
5833}
5834
5835static bool AllTrivialInitializers(CodeGenModule &CGM,
5836 ObjCImplementationDecl *D) {
5837 CodeGenFunction CGF(CGM);
5838 for (ObjCImplementationDecl::init_iterator B = D->init_begin(),
5839 E = D->init_end(); B != E; ++B) {
5840 CXXCtorInitializer *CtorInitExp = *B;
5841 Expr *Init = CtorInitExp->getInit();
5842 if (!CGF.isTrivialInitializer(Init))
5843 return false;
5844 }
5845 return true;
5846}
5847
5848/// EmitObjCIvarInitializations - Emit information for ivar initialization
5849/// for an implementation.
5850void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
5851 // We might need a .cxx_destruct even if we don't have any ivar initializers.
5852 if (needsDestructMethod(D)) {
5853 IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
5854 Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
5855 ObjCMethodDecl *DTORMethod = ObjCMethodDecl::Create(
5856 getContext(), D->getLocation(), D->getLocation(), cxxSelector,
5857 getContext().VoidTy, nullptr, D,
5858 /*isInstance=*/true, /*isVariadic=*/false,
5859 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
5860 /*isImplicitlyDeclared=*/true,
5861 /*isDefined=*/false, ObjCMethodDecl::Required);
5862 D->addInstanceMethod(DTORMethod);
5863 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
5864 D->setHasDestructors(true);
5865 }
5866
5867 // If the implementation doesn't have any ivar initializers, we don't need
5868 // a .cxx_construct.
5869 if (D->getNumIvarInitializers() == 0 ||
5870 AllTrivialInitializers(*this, D))
5871 return;
5872
5873 IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
5874 Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
5875 // The constructor returns 'self'.
5876 ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(
5877 getContext(), D->getLocation(), D->getLocation(), cxxSelector,
5878 getContext().getObjCIdType(), nullptr, D, /*isInstance=*/true,
5879 /*isVariadic=*/false,
5880 /*isPropertyAccessor=*/true, /*isSynthesizedAccessorStub=*/false,
5881 /*isImplicitlyDeclared=*/true,
5882 /*isDefined=*/false, ObjCMethodDecl::Required);
5883 D->addInstanceMethod(CTORMethod);
5884 CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
5885 D->setHasNonZeroConstructors(true);
5886}
5887
5888// EmitLinkageSpec - Emit all declarations in a linkage spec.
5889void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
5890 if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
5891 LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
5892 ErrorUnsupported(LSD, "linkage spec");
5893 return;
5894 }
5895
5896 EmitDeclContext(LSD);
5897}
5898
5899void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
5900 for (auto *I : DC->decls()) {
5901 // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
5902 // are themselves considered "top-level", so EmitTopLevelDecl on an
5903 // ObjCImplDecl does not recursively visit them. We need to do that in
5904 // case they're nested inside another construct (LinkageSpecDecl /
5905 // ExportDecl) that does stop them from being considered "top-level".
5906 if (auto *OID = dyn_cast<ObjCImplDecl>(I)) {
5907 for (auto *M : OID->methods())
5908 EmitTopLevelDecl(M);
5909 }
5910
5911 EmitTopLevelDecl(I);
5912 }
5913}
5914
5915/// EmitTopLevelDecl - Emit code for a single top level declaration.
5916void CodeGenModule::EmitTopLevelDecl(Decl *D) {
5917 // Ignore dependent declarations.
5918 if (D->isTemplated())
5919 return;
5920
5921 // Consteval function shouldn't be emitted.
5922 if (auto *FD = dyn_cast<FunctionDecl>(D))
5923 if (FD->isConsteval())
5924 return;
5925
5926 switch (D->getKind()) {
5927 case Decl::CXXConversion:
5928 case Decl::CXXMethod:
5929 case Decl::Function:
5930 EmitGlobal(cast<FunctionDecl>(D));
5931 // Always provide some coverage mapping
5932 // even for the functions that aren't emitted.
5933 AddDeferredUnusedCoverageMapping(D);
5934 break;
5935
5936 case Decl::CXXDeductionGuide:
5937 // Function-like, but does not result in code emission.
5938 break;
5939
5940 case Decl::Var:
5941 case Decl::Decomposition:
5942 case Decl::VarTemplateSpecialization:
5943 EmitGlobal(cast<VarDecl>(D));
5944 if (auto *DD = dyn_cast<DecompositionDecl>(D))
5945 for (auto *B : DD->bindings())
5946 if (auto *HD = B->getHoldingVar())
5947 EmitGlobal(HD);
5948 break;
5949
5950 // Indirect fields from global anonymous structs and unions can be
5951 // ignored; only the actual variable requires IR gen support.
5952 case Decl::IndirectField:
5953 break;
5954
5955 // C++ Decls
5956 case Decl::Namespace:
5957 EmitDeclContext(cast<NamespaceDecl>(D));
5958 break;
5959 case Decl::ClassTemplateSpecialization: {
5960 const auto *Spec = cast<ClassTemplateSpecializationDecl>(D);
5961 if (CGDebugInfo *DI = getModuleDebugInfo())
5962 if (Spec->getSpecializationKind() ==
5963 TSK_ExplicitInstantiationDefinition &&
5964 Spec->hasDefinition())
5965 DI->completeTemplateDefinition(*Spec);
5966 } LLVM_FALLTHROUGH[[gnu::fallthrough]];
5967 case Decl::CXXRecord: {
5968 CXXRecordDecl *CRD = cast<CXXRecordDecl>(D);
5969 if (CGDebugInfo *DI = getModuleDebugInfo()) {
5970 if (CRD->hasDefinition())
5971 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
5972 if (auto *ES = D->getASTContext().getExternalSource())
5973 if (ES->hasExternalDefinitions(D) == ExternalASTSource::EK_Never)
5974 DI->completeUnusedClass(*CRD);
5975 }
5976 // Emit any static data members, they may be definitions.
5977 for (auto *I : CRD->decls())
5978 if (isa<VarDecl>(I) || isa<CXXRecordDecl>(I))
5979 EmitTopLevelDecl(I);
5980 break;
5981 }
5982 // No code generation needed.
5983 case Decl::UsingShadow:
5984 case Decl::ClassTemplate:
5985 case Decl::VarTemplate:
5986 case Decl::Concept:
5987 case Decl::VarTemplatePartialSpecialization:
5988 case Decl::FunctionTemplate:
5989 case Decl::TypeAliasTemplate:
5990 case Decl::Block:
5991 case Decl::Empty:
5992 case Decl::Binding:
5993 break;
5994 case Decl::Using: // using X; [C++]
5995 if (CGDebugInfo *DI = getModuleDebugInfo())
5996 DI->EmitUsingDecl(cast<UsingDecl>(*D));
5997 break;
5998 case Decl::UsingEnum: // using enum X; [C++]
5999 if (CGDebugInfo *DI = getModuleDebugInfo())
6000 DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(*D));
6001 break;
6002 case Decl::NamespaceAlias:
6003 if (CGDebugInfo *DI = getModuleDebugInfo())
6004 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
6005 break;
6006 case Decl::UsingDirective: // using namespace X; [C++]
6007 if (CGDebugInfo *DI = getModuleDebugInfo())
6008 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(*D));
6009 break;
6010 case Decl::CXXConstructor:
6011 getCXXABI().EmitCXXConstructors(cast<CXXConstructorDecl>(D));
6012 break;
6013 case Decl::CXXDestructor:
6014 getCXXABI().EmitCXXDestructors(cast<CXXDestructorDecl>(D));
6015 break;
6016
6017 case Decl::StaticAssert:
6018 // Nothing to do.
6019 break;
6020
6021 // Objective-C Decls
6022
6023 // Forward declarations, no (immediate) code generation.
6024 case Decl::ObjCInterface:
6025 case Decl::ObjCCategory:
6026 break;
6027
6028 case Decl::ObjCProtocol: {
6029 auto *Proto = cast<ObjCProtocolDecl>(D);
6030 if (Proto->isThisDeclarationADefinition())
6031 ObjCRuntime->GenerateProtocol(Proto);
6032 break;
6033 }
6034
6035 case Decl::ObjCCategoryImpl:
6036 // Categories have properties but don't support synthesize so we
6037 // can ignore them here.
6038 ObjCRuntime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
6039 break;
6040
6041 case Decl::ObjCImplementation: {
6042 auto *OMD = cast<ObjCImplementationDecl>(D);
6043 EmitObjCPropertyImplementations(OMD);
6044 EmitObjCIvarInitializations(OMD);
6045 ObjCRuntime->GenerateClass(OMD);
6046 // Emit global variable debug information.
6047 if (CGDebugInfo *DI = getModuleDebugInfo())
6048 if (getCodeGenOpts().hasReducedDebugInfo())
6049 DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(
6050 OMD->getClassInterface()), OMD->getLocation());
6051 break;
6052 }
6053 case Decl::ObjCMethod: {
6054 auto *OMD = cast<ObjCMethodDecl>(D);
6055 // If this is not a prototype, emit the body.
6056 if (OMD->getBody())
6057 CodeGenFunction(*this).GenerateObjCMethod(OMD);
6058 break;
6059 }
6060 case Decl::ObjCCompatibleAlias:
6061 ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
6062 break;
6063
6064 case Decl::PragmaComment: {
6065 const auto *PCD = cast<PragmaCommentDecl>(D);
6066 switch (PCD->getCommentKind()) {
6067 case PCK_Unknown:
6068 llvm_unreachable("unexpected pragma comment kind")::llvm::llvm_unreachable_internal("unexpected pragma comment kind"
, "clang/lib/CodeGen/CodeGenModule.cpp", 6068)
;
6069 case PCK_Linker:
6070 AppendLinkerOptions(PCD->getArg());
6071 break;
6072 case PCK_Lib:
6073 AddDependentLib(PCD->getArg());
6074 break;
6075 case PCK_Compiler:
6076 case PCK_ExeStr:
6077 case PCK_User:
6078 break; // We ignore all of these.
6079 }
6080 break;
6081 }
6082
6083 case Decl::PragmaDetectMismatch: {
6084 const auto *PDMD = cast<PragmaDetectMismatchDecl>(D);
6085 AddDetectMismatch(PDMD->getName(), PDMD->getValue());
6086 break;
6087 }
6088
6089 case Decl::LinkageSpec:
6090 EmitLinkageSpec(cast<LinkageSpecDecl>(D));
6091 break;
6092
6093 case Decl::FileScopeAsm: {
6094 // File-scope asm is ignored during device-side CUDA compilation.
6095 if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
6096 break;
6097 // File-scope asm is ignored during device-side OpenMP compilation.
6098 if (LangOpts.OpenMPIsDevice)
6099 break;
6100 // File-scope asm is ignored during device-side SYCL compilation.
6101 if (LangOpts.SYCLIsDevice)
6102 break;
6103 auto *AD = cast<FileScopeAsmDecl>(D);
6104 getModule().appendModuleInlineAsm(AD->getAsmString()->getString());
6105 break;
6106 }
6107
6108 case Decl::Import: {
6109 auto *Import = cast<ImportDecl>(D);
6110
6111 // If we've already imported this module, we're done.
6112 if (!ImportedModules.insert(Import->getImportedModule()))
6113 break;
6114
6115 // Emit debug information for direct imports.
6116 if (!Import->getImportedOwningModule()) {
6117 if (CGDebugInfo *DI = getModuleDebugInfo())
6118 DI->EmitImportDecl(*Import);
6119 }
6120
6121 // Find all of the submodules and emit the module initializers.
6122 llvm::SmallPtrSet<clang::Module *, 16> Visited;
6123 SmallVector<clang::Module *, 16> Stack;
6124 Visited.insert(Import->getImportedModule());
6125 Stack.push_back(Import->getImportedModule());
6126
6127 while (!Stack.empty()) {
6128 clang::Module *Mod = Stack.pop_back_val();
6129 if (!EmittedModuleInitializers.insert(Mod).second)
6130 continue;
6131
6132 for (auto *D : Context.getModuleInitializers(Mod))
6133 EmitTopLevelDecl(D);
6134
6135 // Visit the submodules of this module.
6136 for (clang::Module::submodule_iterator Sub = Mod->submodule_begin(),
6137 SubEnd = Mod->submodule_end();
6138 Sub != SubEnd; ++Sub) {
6139 // Skip explicit children; they need to be explicitly imported to emit
6140 // the initializers.
6141 if ((*Sub)->IsExplicit)
6142 continue;
6143
6144 if (Visited.insert(*Sub).second)
6145 Stack.push_back(*Sub);
6146 }
6147 }
6148 break;
6149 }
6150
6151 case Decl::Export:
6152 EmitDeclContext(cast<ExportDecl>(D));
6153 break;
6154
6155 case Decl::OMPThreadPrivate:
6156 EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
6157 break;
6158
6159 case Decl::OMPAllocate:
6160 EmitOMPAllocateDecl(cast<OMPAllocateDecl>(D));
6161 break;
6162
6163 case Decl::OMPDeclareReduction:
6164 EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(D));
6165 break;
6166
6167 case Decl::OMPDeclareMapper:
6168 EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(D));
6169 break;
6170
6171 case Decl::OMPRequires:
6172 EmitOMPRequiresDecl(cast<OMPRequiresDecl>(D));
6173 break;
6174
6175 case Decl::Typedef:
6176 case Decl::TypeAlias: // using foo = bar; [C++11]
6177 if (CGDebugInfo *DI = getModuleDebugInfo())
6178 DI->EmitAndRetainType(
6179 getContext().getTypedefType(cast<TypedefNameDecl>(D)));
6180 break;
6181
6182 case Decl::Record:
6183 if (CGDebugInfo *DI = getModuleDebugInfo())
6184 if (cast<RecordDecl>(D)->getDefinition())
6185 DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(D)));
6186 break;
6187
6188 case Decl::Enum:
6189 if (CGDebugInfo *DI = getModuleDebugInfo())
6190 if (cast<EnumDecl>(D)->getDefinition())
6191 DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(D)));
6192 break;
6193
6194 default:
6195 // Make sure we handled everything we should, every other kind is a
6196 // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
6197 // function. Need to recode Decl::Kind to do that easily.
6198 assert(isa<TypeDecl>(D) && "Unsupported decl kind")(static_cast <bool> (isa<TypeDecl>(D) && "Unsupported decl kind"
) ? void (0) : __assert_fail ("isa<TypeDecl>(D) && \"Unsupported decl kind\""
, "clang/lib/CodeGen/CodeGenModule.cpp", 6198, __extension__ __PRETTY_FUNCTION__
))
;
6199 break;
6200 }
6201}
6202
6203void CodeGenModule::AddDeferredUnusedCoverageMapping(Decl *D) {
6204 // Do we need to generate coverage mapping?
6205 if (!CodeGenOpts.CoverageMapping)
6206 return;
6207 switch (D->getKind()) {
6208 case Decl::CXXConversion:
6209 case Decl::CXXMethod:
6210 case Decl::Function:
6211 case Decl::ObjCMethod:
6212 case Decl::CXXConstructor:
6213 case Decl::CXXDestructor: {
6214 if (!cast<FunctionDecl>(D)->doesThisDeclarationHaveABody())
6215 break;
6216 SourceManager &SM = getContext().getSourceManager();
6217 if (LimitedCoverage && SM.getMainFileID() != SM.getFileID(D->getBeginLoc()))
6218 break;
6219 auto I = DeferredEmptyCoverageMappingDecls.find(D);
6220 if (I == DeferredEmptyCoverageMappingDecls.end())
6221 DeferredEmptyCoverageMappingDecls[D] = true;
6222 break;
6223 }
6224 default:
6225 break;
6226 };
6227}
6228
6229void CodeGenModule::ClearUnusedCoverageMapping(const Decl *D) {
6230 // Do we need to generate coverage mapping?
6231 if (!CodeGenOpts.CoverageMapping)
6232 return;
6233 if (const auto *Fn = dyn_cast<FunctionDecl>(D)) {
6234 if (Fn->isTemplateInstantiation())
6235 ClearUnusedCoverageMapping(Fn->getTemplateInstantiationPattern());
6236 }
6237 auto I = DeferredEmptyCoverageMappingDecls.find(D);
6238 if (I == DeferredEmptyCoverageMappingDecls.end())
6239 DeferredEmptyCoverageMappingDecls[D] = false;
6240 else
6241 I->second = false;
6242}
6243
6244void CodeGenModule::EmitDeferredUnusedCoverageMappings() {
6245 // We call takeVector() here to avoid use-after-free.
6246 // FIXME: DeferredEmptyCoverageMappingDecls is getting mutated because
6247 // we deserialize function bodies to emit coverage info for them, and that
6248 // deserializes more declarations. How should we handle that case?
6249 for (const auto &Entry : DeferredEmptyCoverageMappingDecls.takeVector()) {
6250 if (!Entry.second)
6251 continue;
6252 const Decl *D = Entry.first;
6253 switch (D->getKind()) {
6254 case Decl::CXXConversion:
6255 case Decl::CXXMethod:
6256 case Decl::Function:
6257 case Decl::ObjCMethod: {
6258 CodeGenPGO PGO(*this);
6259 GlobalDecl GD(cast<FunctionDecl>(D));
6260 PGO.emitEmptyCounterMapping(D, getMangledName(GD),
6261 getFunctionLinkage(GD));
6262 break;
6263 }
6264 case Decl::CXXConstructor: {
6265 CodeGenPGO PGO(*this);
6266 GlobalDecl GD(cast<CXXConstructorDecl>(D), Ctor_Base);
6267 PGO.emitEmptyCounterMapping(D, getMangledName(GD),
6268 getFunctionLinkage(GD));
6269 break;
6270 }
6271 case Decl::CXXDestructor: {
6272 CodeGenPGO PGO(*this);
6273 GlobalDecl GD(cast<CXXDestructorDecl>(D), Dtor_Base);
6274 PGO.emitEmptyCounterMapping(D, getMangledName(GD),
6275 getFunctionLinkage(GD));
6276 break;
6277 }
6278 default:
6279 break;
6280 };
6281 }
6282}
6283
6284void CodeGenModule::EmitMainVoidAlias() {
6285 // In order to transition away from "__original_main" gracefully, emit an
6286 // alias for "main" in the no-argument case so that libc can detect when
6287 // new-style no-argument main is in used.
6288 if (llvm::Function *F = getModule().getFunction("main")) {
6289 if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
6290 F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth()))
6291 addUsedGlobal(llvm::GlobalAlias::create("__main_void", F));
6292 }
6293}
6294
6295/// Turns the given pointer into a constant.
6296static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
6297 const void *Ptr) {
6298 uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
6299 llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
6300 return llvm::ConstantInt::get(i64, PtrInt);
6301}
6302
6303static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
6304 llvm::NamedMDNode *&GlobalMetadata,
6305 GlobalDecl D,
6306 llvm::GlobalValue *Addr) {
6307 if (!GlobalMetadata)
6308 GlobalMetadata =
6309 CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
6310
6311 // TODO: should we report variant information for ctors/dtors?
6312 llvm::Metadata *Ops[] = {llvm::ConstantAsMetadata::get(Addr),
6313 llvm::ConstantAsMetadata::get(GetPointerConstant(
6314 CGM.getLLVMContext(), D.getDecl()))};
6315 GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
6316}
6317
6318/// For each function which is declared within an extern "C" region and marked
6319/// as 'used', but has internal linkage, create an alias from the unmangled
6320/// name to the mangled name if possible. People expect to be able to refer
6321/// to such functions with an unmangled name from inline assembly within the
6322/// same translation unit.
6323void CodeGenModule::EmitStaticExternCAliases() {
6324 if (!getTargetCodeGenInfo().shouldEmitStaticExternCAliases())
6325 return;
6326 for (auto &I : StaticExternCValues) {
6327 IdentifierInfo *Name = I.first;
6328 llvm::GlobalValue *Val = I.second;
6329 if (Val && !getModule().getNamedValue(Name->getName()))
6330 addCompilerUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val));
6331 }
6332}
6333
6334bool CodeGenModule::lookupRepresentativeDecl(StringRef MangledName,
6335 GlobalDecl &Result) const {
6336 auto Res = Manglings.find(MangledName);
6337 if (Res == Manglings.end())
6338 return false;
6339 Result = Res->getValue();
6340 return true;
6341}
6342
6343/// Emits metadata nodes associating all the global values in the
6344/// current module with the Decls they came from. This is useful for
6345/// projects using IR gen as a subroutine.
6346///
6347/// Since there's currently no way to associate an MDNode directly
6348/// with an llvm::GlobalValue, we create a global named metadata
6349/// with the name 'clang.global.decl.ptrs'.
6350void CodeGenModule::EmitDeclMetadata() {
6351 llvm::NamedMDNode *GlobalMetadata = nullptr;
6352
6353 for (auto &I : MangledDeclNames) {
6354 llvm::GlobalValue *Addr = getModule().getNamedValue(I.second);
6355 // Some mangled names don't necessarily have an associated GlobalValue
6356 // in this module, e.g. if we mangled it for DebugInfo.
6357 if (Addr)
6358 EmitGlobalDeclMetadata(*this, GlobalMetadata, I.first, Addr);
6359 }
6360}
6361
6362/// Emits metadata nodes for all the local variables in the current
6363/// function.
6364void CodeGenFunction::EmitDeclMetadata() {
6365 if (LocalDeclMap.empty()) return;
6366
6367 llvm::LLVMContext &Context = getLLVMContext();
6368
6369 // Find the unique metadata ID for this name.
6370 unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
6371
6372 llvm::NamedMDNode *GlobalMetadata = nullptr;
6373
6374 for (auto &I : LocalDeclMap) {
6375 const Decl *D = I.first;
6376 llvm::Value *Addr = I.second.getPointer();
6377 if (auto *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
6378 llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
6379 Alloca->setMetadata(
6380 DeclPtrKind, llvm::MDNode::get(
6381 Context, llvm::ValueAsMetadata::getConstant(DAddr)));
6382 } else if (auto *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
6383 GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
6384 EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
6385 }
6386 }
6387}
6388
6389void CodeGenModule::EmitVersionIdentMetadata() {
6390 llvm::NamedMDNode *IdentMetadata =
6391 TheModule.getOrInsertNamedMetadata("llvm.ident");
6392 std::string Version = getClangFullVersion();
6393 llvm::LLVMContext &Ctx = TheModule.getContext();
6394
6395 llvm::Metadata *IdentNode[] = {llvm::MDString::get(Ctx, Version)};
6396 IdentMetadata->addOperand(llvm::MDNode::get(Ctx, IdentNode));
6397}
6398
6399void CodeGenModule::EmitCommandLineMetadata() {
6400 llvm::NamedMDNode *CommandLineMetadata =
6401 TheModule.getOrInsertNamedMetadata("llvm.commandline");
6402 std::string CommandLine = getCodeGenOpts().RecordCommandLine;
6403 llvm::LLVMContext &Ctx = TheModule.getContext();
6404
6405 llvm::Metadata *CommandLineNode[] = {llvm::MDString::get(Ctx, CommandLine)};
6406 CommandLineMetadata->addOperand(llvm::MDNode::get(Ctx, CommandLineNode));
6407}
6408
6409void CodeGenModule::EmitCoverageFile() {
6410 if (getCodeGenOpts().CoverageDataFile.empty() &&
6411 getCodeGenOpts().CoverageNotesFile.empty())
6412 return;
6413
6414 llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu");
6415 if (!CUNode)
6416 return;
6417
6418 llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
6419 llvm::LLVMContext &Ctx = TheModule.getContext();
6420 auto *CoverageDataFile =
6421 llvm::MDString::get(Ctx, getCodeGenOpts().CoverageDataFile);
6422 auto *CoverageNotesFile =
6423 llvm::MDString::get(Ctx, getCodeGenOpts().CoverageNotesFile);
6424 for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
6425 llvm::MDNode *CU = CUNode->getOperand(i);
6426 llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU};
6427 GCov->addOperand(llvm::MDNode::get(Ctx, Elts));
6428 }
6429}
6430
6431llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
6432 bool ForEH) {
6433 // Return a bogus pointer if RTTI is disabled, unless it's for EH.
6434 // FIXME: should we even be calling this method if RTTI is disabled
6435 // and it's not for EH?
6436 if ((!ForEH && !getLangOpts().RTTI) || getLangOpts().CUDAIsDevice ||
6437 (getLangOpts().OpenMP && getLangOpts().OpenMPIsDevice &&
6438 getTriple().isNVPTX()))
6439 return llvm::Constant::getNullValue(Int8PtrTy);
6440
6441 if (ForEH && Ty->isObjCObjectPointerType() &&
6442 LangOpts.ObjCRuntime.isGNUFamily())
6443 return ObjCRuntime->GetEHType(Ty);
6444
6445 return getCXXABI().getAddrOfRTTIDescriptor(Ty);
6446}
6447
6448void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
6449 // Do not emit threadprivates in simd-only mode.
6450 if (LangOpts.OpenMP && LangOpts.OpenMPSimd)
6451 return;
6452 for (auto RefExpr : D->varlists()) {
6453 auto *VD = cast<VarDecl>(cast<DeclRefExpr>(RefExpr)->getDecl());
6454 bool PerformInit =
6455 VD->getAnyInitializer() &&
6456 !VD->getAnyInitializer()->isConstantInitializer(getContext(),
6457 /*ForRef=*/false);
6458
6459 Address Addr = Address::deprecated(GetAddrOfGlobalVar(VD),
6460 getContext().getDeclAlign(VD));
6461 if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
6462 VD, Addr, RefExpr->getBeginLoc(), PerformInit))
6463 CXXGlobalInits.push_back(InitFunction);
6464 }
6465}
6466
6467llvm::Metadata *
6468CodeGenModule::CreateMetadataIdentifierImpl(QualType T, MetadataTypeMap &Map,
6469 StringRef Suffix) {
6470 if (auto *FnType = T->getAs<FunctionProtoType>())
6471 T = getContext().getFunctionType(
6472 FnType->getReturnType(), FnType->getParamTypes(),
6473 FnType->getExtProtoInfo().withExceptionSpec(EST_None));
6474
6475 llvm::Metadata *&InternalId = Map[T.getCanonicalType()];
6476 if (InternalId)
6477 return InternalId;
6478
6479 if (isExternallyVisible(T->getLinkage())) {
6480 std::string OutName;
6481 llvm::raw_string_ostream Out(OutName);
6482 getCXXABI().getMangleContext().mangleTypeName(T, Out);
6483 Out << Suffix;
6484
6485 InternalId = llvm::MDString::get(getLLVMContext(), Out.str());
6486 } else {
6487 InternalId = llvm::MDNode::getDistinct(getLLVMContext(),
6488 llvm::ArrayRef<llvm::Metadata *>());
6489 }
6490
6491 return InternalId;
6492}
6493
6494llvm::Metadata *CodeGenModule::CreateMetadataIdentifierForType(QualType T) {
6495 return CreateMetadataIdentifierImpl(T, MetadataIdMap, "");
6496}
6497
6498llvm::Metadata *
6499CodeGenModule::CreateMetadataIdentifierForVirtualMemPtrType(QualType T) {
6500 return CreateMetadataIdentifierImpl(T, VirtualMetadataIdMap, ".virtual");
6501}
6502
6503// Generalize pointer types to a void pointer with the qualifiers of the
6504// originally pointed-to type, e.g. 'const char *' and 'char * const *'
6505// generalize to 'const void *' while 'char *' and 'const char **' generalize to
6506// 'void *'.
6507static QualType GeneralizeType(ASTContext &Ctx, QualType Ty) {
6508 if (!Ty->isPointerType())
6509 return Ty;
6510
6511 return Ctx.getPointerType(
6512 QualType(Ctx.VoidTy).withCVRQualifiers(
6513 Ty->getPointeeType().getCVRQualifiers()));
6514}
6515
6516// Apply type generalization to a FunctionType's return and argument types
6517static QualType GeneralizeFunctionType(ASTContext &Ctx, QualType Ty) {
6518 if (auto *FnType = Ty->getAs<FunctionProtoType>()) {
6519 SmallVector<QualType, 8> GeneralizedParams;
6520 for (auto &Param : FnType->param_types())
6521 GeneralizedParams.push_back(GeneralizeType(Ctx, Param));
6522
6523 return Ctx.getFunctionType(
6524 GeneralizeType(Ctx, FnType->getReturnType()),
6525 GeneralizedParams, FnType->getExtProtoInfo());
6526 }
6527
6528 if (auto *FnType = Ty->getAs<FunctionNoProtoType>())
6529 return Ctx.getFunctionNoProtoType(
6530 GeneralizeType(Ctx, FnType->getReturnType()));
6531
6532 llvm_unreachable("Encountered unknown FunctionType")::llvm::llvm_unreachable_internal("Encountered unknown FunctionType"
, "clang/lib/CodeGen/CodeGenModule.cpp", 6532)
;
6533}
6534
6535llvm::Metadata *CodeGenModule::CreateMetadataIdentifierGeneralized(QualType T) {
6536 return CreateMetadataIdentifierImpl(GeneralizeFunctionType(getContext(), T),
6537 GeneralizedMetadataIdMap, ".generalized");
6538}
6539
6540/// Returns whether this module needs the "all-vtables" type identifier.
6541bool CodeGenModule::NeedAllVtablesTypeId() const {
6542 // Returns true if at least one of vtable-based CFI checkers is enabled and
6543 // is not in the trapping mode.
6544 return ((LangOpts.Sanitize.has(SanitizerKind::CFIVCall) &&
6545 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIVCall)) ||
6546 (LangOpts.Sanitize.has(SanitizerKind::CFINVCall) &&
6547 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFINVCall)) ||
6548 (LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast) &&
6549 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIDerivedCast)) ||
6550 (LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast) &&
6551 !CodeGenOpts.SanitizeTrap.has(SanitizerKind::CFIUnrelatedCast)));
6552}
6553
6554void CodeGenModule::AddVTableTypeMetadata(llvm::GlobalVariable *VTable,
6555 CharUnits Offset,
6556 const CXXRecordDecl *RD) {
6557 llvm::Metadata *MD =
6558 CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
6559 VTable->addTypeMetadata(Offset.getQuantity(), MD);
6560
6561 if (CodeGenOpts.SanitizeCfiCrossDso)
6562 if (auto CrossDsoTypeId = CreateCrossDsoCfiTypeId(MD))
6563 VTable->addTypeMetadata(Offset.getQuantity(),
6564 llvm::ConstantAsMetadata::get(CrossDsoTypeId));
6565
6566 if (NeedAllVtablesTypeId()) {
6567 llvm::Metadata *MD = llvm::MDString::get(getLLVMContext(), "all-vtables");
6568 VTable->addTypeMetadata(Offset.getQuantity(), MD);
6569 }
6570}
6571
6572llvm::SanitizerStatReport &CodeGenModule::getSanStats() {
6573 if (!SanStats)
6574 SanStats = std::make_unique<llvm::SanitizerStatReport>(&getModule());
6575
6576 return *SanStats;
6577}
6578
6579llvm::Value *
6580CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
6581 CodeGenFunction &CGF) {
6582 llvm::Constant *C = ConstantEmitter(CGF).emitAbstract(E, E->getType());
6583 auto *SamplerT = getOpenCLRuntime().getSamplerType(E->getType().getTypePtr());
6584 auto *FTy = llvm::FunctionType::get(SamplerT, {C->getType()}, false);
6585 auto *Call = CGF.EmitRuntimeCall(
6586 CreateRuntimeFunction(FTy, "__translate_sampler_initializer"), {C});
6587 return Call;
6588}
6589
6590CharUnits CodeGenModule::getNaturalPointeeTypeAlignment(
6591 QualType T, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo) {
6592 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
6593 /* forPointeeType= */ true);
6594}
6595
6596CharUnits CodeGenModule::getNaturalTypeAlignment(QualType T,
6597 LValueBaseInfo *BaseInfo,
6598 TBAAAccessInfo *TBAAInfo,
6599 bool forPointeeType) {
6600 if (TBAAInfo)
6601 *TBAAInfo = getTBAAAccessInfo(T);
6602
6603 // FIXME: This duplicates logic in ASTContext::getTypeAlignIfKnown. But
6604 // that doesn't return the information we need to compute BaseInfo.
6605
6606 // Honor alignment typedef attributes even on incomplete types.
6607 // We also honor them straight for C++ class types, even as pointees;
6608 // there's an expressivity gap here.
6609 if (auto TT = T->getAs<TypedefType>()) {
6610 if (auto Align = TT->getDecl()->getMaxAlignment()) {
6611 if (BaseInfo)
6612 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
6613 return getContext().toCharUnitsFromBits(Align);
6614 }
6615 }
6616
6617 bool AlignForArray = T->isArrayType();
6618
6619 // Analyze the base element type, so we don't get confused by incomplete
6620 // array types.
6621 T = getContext().getBaseElementType(T);
6622
6623 if (T->isIncompleteType()) {
6624 // We could try to replicate the logic from
6625 // ASTContext::getTypeAlignIfKnown, but nothing uses the alignment if the
6626 // type is incomplete, so it's impossible to test. We could try to reuse
6627 // getTypeAlignIfKnown, but that doesn't return the information we need
6628 // to set BaseInfo. So just ignore the possibility that the alignment is
6629 // greater than one.
6630 if (BaseInfo)
6631 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
6632 return CharUnits::One();
6633 }
6634
6635 if (BaseInfo)
6636 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
6637
6638 CharUnits Alignment;
6639 const CXXRecordDecl *RD;
6640 if (T.getQualifiers().hasUnaligned()) {
6641 Alignment = CharUnits::One();
6642 } else if (forPointeeType && !AlignForArray &&
6643 (RD = T->getAsCXXRecordDecl())) {
6644 // For C++ class pointees, we don't know whether we're pointing at a
6645 // base or a complete object, so we generally need to use the
6646 // non-virtual alignment.
6647 Alignment = getClassPointerAlignment(RD);
6648 } else {
6649 Alignment = getContext().getTypeAlignInChars(T);
6650 }
6651
6652 // Cap to the global maximum type alignment unless the alignment
6653 // was somehow explicit on the type.
6654 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
6655 if (Alignment.getQuantity() > MaxAlign &&
6656 !getContext().isAlignmentRequired(T))
6657 Alignment = CharUnits::fromQuantity(MaxAlign);
6658 }
6659 return Alignment;
6660}
6661
6662bool CodeGenModule::stopAutoInit() {
6663 unsigned StopAfter = getContext().getLangOpts().TrivialAutoVarInitStopAfter;
6664 if (StopAfter) {
6665 // This number is positive only when -ftrivial-auto-var-init-stop-after=* is
6666 // used
6667 if (NumAutoVarInit >= StopAfter) {
6668 return true;
6669 }
6670 if (!NumAutoVarInit) {
6671 unsigned DiagID = getDiags().getCustomDiagID(
6672 DiagnosticsEngine::Warning,
6673 "-ftrivial-auto-var-init-stop-after=%0 has been enabled to limit the "
6674 "number of times ftrivial-auto-var-init=%1 gets applied.");
6675 getDiags().Report(DiagID)
6676 << StopAfter
6677 << (getContext().getLangOpts().getTrivialAutoVarInit() ==
6678 LangOptions::TrivialAutoVarInitKind::Zero
6679 ? "zero"
6680 : "pattern");
6681 }
6682 ++NumAutoVarInit;
6683 }
6684 return false;
6685}
6686
6687void CodeGenModule::printPostfixForExternalizedStaticVar(
6688 llvm::raw_ostream &OS) const {
6689 OS << "__static__" << getContext().getCUIDHash();
6690}

/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/include/llvm/ADT/DenseMap.h

1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the DenseMap class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_DENSEMAP_H
15#define LLVM_ADT_DENSEMAP_H
16
17#include "llvm/ADT/DenseMapInfo.h"
18#include "llvm/ADT/EpochTracker.h"
19#include "llvm/Support/AlignOf.h"
20#include "llvm/Support/Compiler.h"
21#include "llvm/Support/MathExtras.h"
22#include "llvm/Support/MemAlloc.h"
23#include "llvm/Support/ReverseIteration.h"
24#include "llvm/Support/type_traits.h"
25#include <algorithm>
26#include <cassert>
27#include <cstddef>
28#include <cstring>
29#include <initializer_list>
30#include <iterator>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37namespace detail {
38
39// We extend a pair to allow users to override the bucket type with their own
40// implementation without requiring two members.
41template <typename KeyT, typename ValueT>
42struct DenseMapPair : public std::pair<KeyT, ValueT> {
43 using std::pair<KeyT, ValueT>::pair;
44
45 KeyT &getFirst() { return std::pair<KeyT, ValueT>::first; }
46 const KeyT &getFirst() const { return std::pair<KeyT, ValueT>::first; }
47 ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
48 const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
49};
50
51} // end namespace detail
52
53template <typename KeyT, typename ValueT,
54 typename KeyInfoT = DenseMapInfo<KeyT>,
55 typename Bucket = llvm::detail::DenseMapPair<KeyT, ValueT>,
56 bool IsConst = false>
57class DenseMapIterator;
58
59template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
60 typename BucketT>
61class DenseMapBase : public DebugEpochBase {
62 template <typename T>
63 using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
64
65public:
66 using size_type = unsigned;
67 using key_type = KeyT;
68 using mapped_type = ValueT;
69 using value_type = BucketT;
70
71 using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
72 using const_iterator =
73 DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
74
75 inline iterator begin() {
76 // When the map is empty, avoid the overhead of advancing/retreating past
77 // empty buckets.
78 if (empty())
79 return end();
80 if (shouldReverseIterate<KeyT>())
81 return makeIterator(getBucketsEnd() - 1, getBuckets(), *this);
82 return makeIterator(getBuckets(), getBucketsEnd(), *this);
83 }
84 inline iterator end() {
85 return makeIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
86 }
87 inline const_iterator begin() const {
88 if (empty())
89 return end();
90 if (shouldReverseIterate<KeyT>())
91 return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *this);
92 return makeConstIterator(getBuckets(), getBucketsEnd(), *this);
93 }
94 inline const_iterator end() const {
95 return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *this, true);
96 }
97
98 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const {
99 return getNumEntries() == 0;
100 }
101 unsigned size() const { return getNumEntries(); }
102
103 /// Grow the densemap so that it can contain at least \p NumEntries items
104 /// before resizing again.
105 void reserve(size_type NumEntries) {
106 auto NumBuckets = getMinBucketToReserveForEntries(NumEntries);
107 incrementEpoch();
108 if (NumBuckets > getNumBuckets())
109 grow(NumBuckets);
110 }
111
112 void clear() {
113 incrementEpoch();
114 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
115
116 // If the capacity of the array is huge, and the # elements used is small,
117 // shrink the array.
118 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
119 shrink_and_clear();
120 return;
121 }
122
123 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
124 if (std::is_trivially_destructible<ValueT>::value) {
125 // Use a simpler loop when values don't need destruction.
126 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P)
127 P->getFirst() = EmptyKey;
128 } else {
129 unsigned NumEntries = getNumEntries();
130 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
131 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey)) {
132 if (!KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
133 P->getSecond().~ValueT();
134 --NumEntries;
135 }
136 P->getFirst() = EmptyKey;
137 }
138 }
139 assert(NumEntries == 0 && "Node count imbalance!")(static_cast <bool> (NumEntries == 0 && "Node count imbalance!"
) ? void (0) : __assert_fail ("NumEntries == 0 && \"Node count imbalance!\""
, "llvm/include/llvm/ADT/DenseMap.h", 139, __extension__ __PRETTY_FUNCTION__
))
;
140 }
141 setNumEntries(0);
142 setNumTombstones(0);
143 }
144
145 /// Return 1 if the specified key is in the map, 0 otherwise.
146 size_type count(const_arg_type_t<KeyT> Val) const {
147 const BucketT *TheBucket;
148 return LookupBucketFor(Val, TheBucket) ? 1 : 0;
149 }
150
151 iterator find(const_arg_type_t<KeyT> Val) {
152 BucketT *TheBucket;
153 if (LookupBucketFor(Val, TheBucket))
154 return makeIterator(TheBucket,
155 shouldReverseIterate<KeyT>() ? getBuckets()
156 : getBucketsEnd(),
157 *this, true);
158 return end();
159 }
160 const_iterator find(const_arg_type_t<KeyT> Val) const {
161 const BucketT *TheBucket;
162 if (LookupBucketFor(Val, TheBucket))
163 return makeConstIterator(TheBucket,
164 shouldReverseIterate<KeyT>() ? getBuckets()
165 : getBucketsEnd(),
166 *this, true);
167 return end();
168 }
169
170 /// Alternate version of find() which allows a different, and possibly
171 /// less expensive, key type.
172 /// The DenseMapInfo is responsible for supplying methods
173 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
174 /// type used.
175 template<class LookupKeyT>
176 iterator find_as(const LookupKeyT &Val) {
177 BucketT *TheBucket;
178 if (LookupBucketFor(Val, TheBucket))
179 return makeIterator(TheBucket,
180 shouldReverseIterate<KeyT>() ? getBuckets()
181 : getBucketsEnd(),
182 *this, true);
183 return end();
184 }
185 template<class LookupKeyT>
186 const_iterator find_as(const LookupKeyT &Val) const {
187 const BucketT *TheBucket;
188 if (LookupBucketFor(Val, TheBucket))
189 return makeConstIterator(TheBucket,
190 shouldReverseIterate<KeyT>() ? getBuckets()
191 : getBucketsEnd(),
192 *this, true);
193 return end();
194 }
195
196 /// lookup - Return the entry for the specified key, or a default
197 /// constructed value if no such entry exists.
198 ValueT lookup(const_arg_type_t<KeyT> Val) const {
199 const BucketT *TheBucket;
200 if (LookupBucketFor(Val, TheBucket))
201 return TheBucket->getSecond();
202 return ValueT();
203 }
204
205 // Inserts key,value pair into the map if the key isn't already in the map.
206 // If the key is already in the map, it returns false and doesn't update the
207 // value.
208 std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
209 return try_emplace(KV.first, KV.second);
210 }
211
212 // Inserts key,value pair into the map if the key isn't already in the map.
213 // If the key is already in the map, it returns false and doesn't update the
214 // value.
215 std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
216 return try_emplace(std::move(KV.first), std::move(KV.second));
217 }
218
219 // Inserts key,value pair into the map if the key isn't already in the map.
220 // The value is constructed in-place if the key is not in the map, otherwise
221 // it is not moved.
222 template <typename... Ts>
223 std::pair<iterator, bool> try_emplace(KeyT &&Key, Ts &&... Args) {
224 BucketT *TheBucket;
225 if (LookupBucketFor(Key, TheBucket))
226 return std::make_pair(makeIterator(TheBucket,
227 shouldReverseIterate<KeyT>()
228 ? getBuckets()
229 : getBucketsEnd(),
230 *this, true),
231 false); // Already in map.
232
233 // Otherwise, insert the new element.
234 TheBucket =
235 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
236 return std::make_pair(makeIterator(TheBucket,
237 shouldReverseIterate<KeyT>()
238 ? getBuckets()
239 : getBucketsEnd(),
240 *this, true),
241 true);
242 }
243
244 // Inserts key,value pair into the map if the key isn't already in the map.
245 // The value is constructed in-place if the key is not in the map, otherwise
246 // it is not moved.
247 template <typename... Ts>
248 std::pair<iterator, bool> try_emplace(const KeyT &Key, Ts &&... Args) {
249 BucketT *TheBucket;
250 if (LookupBucketFor(Key, TheBucket))
251 return std::make_pair(makeIterator(TheBucket,
252 shouldReverseIterate<KeyT>()
253 ? getBuckets()
254 : getBucketsEnd(),
255 *this, true),
256 false); // Already in map.
257
258 // Otherwise, insert the new element.
259 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
260 return std::make_pair(makeIterator(TheBucket,
261 shouldReverseIterate<KeyT>()
262 ? getBuckets()
263 : getBucketsEnd(),
264 *this, true),
265 true);
266 }
267
268 /// Alternate version of insert() which allows a different, and possibly
269 /// less expensive, key type.
270 /// The DenseMapInfo is responsible for supplying methods
271 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
272 /// type used.
273 template <typename LookupKeyT>
274 std::pair<iterator, bool> insert_as(std::pair<KeyT, ValueT> &&KV,
275 const LookupKeyT &Val) {
276 BucketT *TheBucket;
277 if (LookupBucketFor(Val, TheBucket))
278 return std::make_pair(makeIterator(TheBucket,
279 shouldReverseIterate<KeyT>()
280 ? getBuckets()
281 : getBucketsEnd(),
282 *this, true),
283 false); // Already in map.
284
285 // Otherwise, insert the new element.
286 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
287 std::move(KV.second), Val);
288 return std::make_pair(makeIterator(TheBucket,
289 shouldReverseIterate<KeyT>()
290 ? getBuckets()
291 : getBucketsEnd(),
292 *this, true),
293 true);
294 }
295
296 /// insert - Range insertion of pairs.
297 template<typename InputIt>
298 void insert(InputIt I, InputIt E) {
299 for (; I != E; ++I)
300 insert(*I);
301 }
302
303 bool erase(const KeyT &Val) {
304 BucketT *TheBucket;
305 if (!LookupBucketFor(Val, TheBucket))
306 return false; // not in map.
307
308 TheBucket->getSecond().~ValueT();
309 TheBucket->getFirst() = getTombstoneKey();
310 decrementNumEntries();
311 incrementNumTombstones();
312 return true;
313 }
314 void erase(iterator I) {
315 BucketT *TheBucket = &*I;
316 TheBucket->getSecond().~ValueT();
317 TheBucket->getFirst() = getTombstoneKey();
318 decrementNumEntries();
319 incrementNumTombstones();
320 }
321
322 value_type& FindAndConstruct(const KeyT &Key) {
323 BucketT *TheBucket;
324 if (LookupBucketFor(Key, TheBucket))
325 return *TheBucket;
326
327 return *InsertIntoBucket(TheBucket, Key);
328 }
329
330 ValueT &operator[](const KeyT &Key) {
331 return FindAndConstruct(Key).second;
332 }
333
334 value_type& FindAndConstruct(KeyT &&Key) {
335 BucketT *TheBucket;
336 if (LookupBucketFor(Key, TheBucket))
337 return *TheBucket;
338
339 return *InsertIntoBucket(TheBucket, std::move(Key));
340 }
341
342 ValueT &operator[](KeyT &&Key) {
343 return FindAndConstruct(std::move(Key)).second;
344 }
345
346 /// isPointerIntoBucketsArray - Return true if the specified pointer points
347 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
348 /// value in the DenseMap).
349 bool isPointerIntoBucketsArray(const void *Ptr) const {
350 return Ptr >= getBuckets() && Ptr < getBucketsEnd();
351 }
352
353 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
354 /// array. In conjunction with the previous method, this can be used to
355 /// determine whether an insertion caused the DenseMap to reallocate.
356 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
357
358protected:
359 DenseMapBase() = default;
360
361 void destroyAll() {
362 if (getNumBuckets() == 0) // Nothing to do.
363 return;
364
365 const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
366 for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
367 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
368 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey))
369 P->getSecond().~ValueT();
370 P->getFirst().~KeyT();
371 }
372 }
373
374 void initEmpty() {
375 setNumEntries(0);
376 setNumTombstones(0);
377
378 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&(static_cast <bool> ((getNumBuckets() & (getNumBuckets
()-1)) == 0 && "# initial buckets must be a power of two!"
) ? void (0) : __assert_fail ("(getNumBuckets() & (getNumBuckets()-1)) == 0 && \"# initial buckets must be a power of two!\""
, "llvm/include/llvm/ADT/DenseMap.h", 379, __extension__ __PRETTY_FUNCTION__
))
379 "# initial buckets must be a power of two!")(static_cast <bool> ((getNumBuckets() & (getNumBuckets
()-1)) == 0 && "# initial buckets must be a power of two!"
) ? void (0) : __assert_fail ("(getNumBuckets() & (getNumBuckets()-1)) == 0 && \"# initial buckets must be a power of two!\""
, "llvm/include/llvm/ADT/DenseMap.h", 379, __extension__ __PRETTY_FUNCTION__
))
;
380 const KeyT EmptyKey = getEmptyKey();
381 for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
382 ::new (&B->getFirst()) KeyT(EmptyKey);
383 }
384
385 /// Returns the number of buckets to allocate to ensure that the DenseMap can
386 /// accommodate \p NumEntries without need to grow().
387 unsigned getMinBucketToReserveForEntries(unsigned NumEntries) {
388 // Ensure that "NumEntries * 4 < NumBuckets * 3"
389 if (NumEntries == 0)
390 return 0;
391 // +1 is required because of the strict equality.
392 // For example if NumEntries is 48, we need to return 401.
393 return NextPowerOf2(NumEntries * 4 / 3 + 1);
394 }
395
396 void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
397 initEmpty();
398
399 // Insert all the old elements.
400 const KeyT EmptyKey = getEmptyKey();
401 const KeyT TombstoneKey = getTombstoneKey();
402 for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
403 if (!KeyInfoT::isEqual(B->getFirst(), EmptyKey) &&
404 !KeyInfoT::isEqual(B->getFirst(), TombstoneKey)) {
405 // Insert the key/value into the new table.
406 BucketT *DestBucket;
407 bool FoundVal = LookupBucketFor(B->getFirst(), DestBucket);
408 (void)FoundVal; // silence warning.
409 assert(!FoundVal && "Key already in new map?")(static_cast <bool> (!FoundVal && "Key already in new map?"
) ? void (0) : __assert_fail ("!FoundVal && \"Key already in new map?\""
, "llvm/include/llvm/ADT/DenseMap.h", 409, __extension__ __PRETTY_FUNCTION__
))
;
410 DestBucket->getFirst() = std::move(B->getFirst());
411 ::new (&DestBucket->getSecond()) ValueT(std::move(B->getSecond()));
412 incrementNumEntries();
413
414 // Free the value.
415 B->getSecond().~ValueT();
416 }
417 B->getFirst().~KeyT();
418 }
419 }
420
421 template <typename OtherBaseT>
422 void copyFrom(
423 const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT> &other) {
424 assert(&other != this)(static_cast <bool> (&other != this) ? void (0) : __assert_fail
("&other != this", "llvm/include/llvm/ADT/DenseMap.h", 424
, __extension__ __PRETTY_FUNCTION__))
;
425 assert(getNumBuckets() == other.getNumBuckets())(static_cast <bool> (getNumBuckets() == other.getNumBuckets
()) ? void (0) : __assert_fail ("getNumBuckets() == other.getNumBuckets()"
, "llvm/include/llvm/ADT/DenseMap.h", 425, __extension__ __PRETTY_FUNCTION__
))
;
426
427 setNumEntries(other.getNumEntries());
428 setNumTombstones(other.getNumTombstones());
429
430 if (std::is_trivially_copyable<KeyT>::value &&
431 std::is_trivially_copyable<ValueT>::value)
432 memcpy(reinterpret_cast<void *>(getBuckets()), other.getBuckets(),
433 getNumBuckets() * sizeof(BucketT));
434 else
435 for (size_t i = 0; i < getNumBuckets(); ++i) {
436 ::new (&getBuckets()[i].getFirst())
437 KeyT(other.getBuckets()[i].getFirst());
438 if (!KeyInfoT::isEqual(getBuckets()[i].getFirst(), getEmptyKey()) &&
439 !KeyInfoT::isEqual(getBuckets()[i].getFirst(), getTombstoneKey()))
440 ::new (&getBuckets()[i].getSecond())
441 ValueT(other.getBuckets()[i].getSecond());
442 }
443 }
444
445 static unsigned getHashValue(const KeyT &Val) {
446 return KeyInfoT::getHashValue(Val);
447 }
448
449 template<typename LookupKeyT>
450 static unsigned getHashValue(const LookupKeyT &Val) {
451 return KeyInfoT::getHashValue(Val);
452 }
453
454 static const KeyT getEmptyKey() {
455 static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
456 "Must pass the derived type to this template!");
457 return KeyInfoT::getEmptyKey();
458 }
459
460 static const KeyT getTombstoneKey() {
461 return KeyInfoT::getTombstoneKey();
462 }
463
464private:
465 iterator makeIterator(BucketT *P, BucketT *E,
466 DebugEpochBase &Epoch,
467 bool NoAdvance=false) {
468 if (shouldReverseIterate<KeyT>()) {
469 BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
470 return iterator(B, E, Epoch, NoAdvance);
471 }
472 return iterator(P, E, Epoch, NoAdvance);
473 }
474
475 const_iterator makeConstIterator(const BucketT *P, const BucketT *E,
476 const DebugEpochBase &Epoch,
477 const bool NoAdvance=false) const {
478 if (shouldReverseIterate<KeyT>()) {
479 const BucketT *B = P == getBucketsEnd() ? getBuckets() : P + 1;
480 return const_iterator(B, E, Epoch, NoAdvance);
481 }
482 return const_iterator(P, E, Epoch, NoAdvance);
483 }
484
485 unsigned getNumEntries() const {
486 return static_cast<const DerivedT *>(this)->getNumEntries();
487 }
488
489 void setNumEntries(unsigned Num) {
490 static_cast<DerivedT *>(this)->setNumEntries(Num);
491 }
492
493 void incrementNumEntries() {
494 setNumEntries(getNumEntries() + 1);
495 }
496
497 void decrementNumEntries() {
498 setNumEntries(getNumEntries() - 1);
499 }
500
501 unsigned getNumTombstones() const {
502 return static_cast<const DerivedT *>(this)->getNumTombstones();
503 }
504
505 void setNumTombstones(unsigned Num) {
506 static_cast<DerivedT *>(this)->setNumTombstones(Num);
507 }
508
509 void incrementNumTombstones() {
510 setNumTombstones(getNumTombstones() + 1);
511 }
512
513 void decrementNumTombstones() {
514 setNumTombstones(getNumTombstones() - 1);
515 }
516
517 const BucketT *getBuckets() const {
518 return static_cast<const DerivedT *>(this)->getBuckets();
519 }
520
521 BucketT *getBuckets() {
522 return static_cast<DerivedT *>(this)->getBuckets();
523 }
524
525 unsigned getNumBuckets() const {
526 return static_cast<const DerivedT *>(this)->getNumBuckets();
527 }
528
529 BucketT *getBucketsEnd() {
530 return getBuckets() + getNumBuckets();
531 }
532
533 const BucketT *getBucketsEnd() const {
534 return getBuckets() + getNumBuckets();
535 }
536
537 void grow(unsigned AtLeast) {
538 static_cast<DerivedT *>(this)->grow(AtLeast);
539 }
540
541 void shrink_and_clear() {
542 static_cast<DerivedT *>(this)->shrink_and_clear();
543 }
544
545 template <typename KeyArg, typename... ValueArgs>
546 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
547 ValueArgs &&... Values) {
548 TheBucket = InsertIntoBucketImpl(Key, Key, TheBucket);
549
550 TheBucket->getFirst() = std::forward<KeyArg>(Key);
551 ::new (&TheBucket->getSecond()) ValueT(std::forward<ValueArgs>(Values)...);
552 return TheBucket;
553 }
554
555 template <typename LookupKeyT>
556 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket, KeyT &&Key,
557 ValueT &&Value, LookupKeyT &Lookup) {
558 TheBucket = InsertIntoBucketImpl(Key, Lookup, TheBucket);
559
560 TheBucket->getFirst() = std::move(Key);
561 ::new (&TheBucket->getSecond()) ValueT(std::move(Value));
562 return TheBucket;
563 }
564
565 template <typename LookupKeyT>
566 BucketT *InsertIntoBucketImpl(const KeyT &Key, const LookupKeyT &Lookup,
567 BucketT *TheBucket) {
568 incrementEpoch();
569
570 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
571 // the buckets are empty (meaning that many are filled with tombstones),
572 // grow the table.
573 //
574 // The later case is tricky. For example, if we had one empty bucket with
575 // tons of tombstones, failing lookups (e.g. for insertion) would have to
576 // probe almost the entire table until it found the empty bucket. If the
577 // table completely filled with tombstones, no lookup would ever succeed,
578 // causing infinite loops in lookup.
579 unsigned NewNumEntries = getNumEntries() + 1;
580 unsigned NumBuckets = getNumBuckets();
581 if (LLVM_UNLIKELY(NewNumEntries * 4 >= NumBuckets * 3)__builtin_expect((bool)(NewNumEntries * 4 >= NumBuckets * 3
), false)
) {
582 this->grow(NumBuckets * 2);
583 LookupBucketFor(Lookup, TheBucket);
584 NumBuckets = getNumBuckets();
585 } else if (LLVM_UNLIKELY(NumBuckets-(NewNumEntries+getNumTombstones()) <=__builtin_expect((bool)(NumBuckets-(NewNumEntries+getNumTombstones
()) <= NumBuckets/8), false)
586 NumBuckets/8)__builtin_expect((bool)(NumBuckets-(NewNumEntries+getNumTombstones
()) <= NumBuckets/8), false)
) {
587 this->grow(NumBuckets);
588 LookupBucketFor(Lookup, TheBucket);
589 }
590 assert(TheBucket)(static_cast <bool> (TheBucket) ? void (0) : __assert_fail
("TheBucket", "llvm/include/llvm/ADT/DenseMap.h", 590, __extension__
__PRETTY_FUNCTION__))
;
591
592 // Only update the state after we've grown our bucket space appropriately
593 // so that when growing buckets we have self-consistent entry count.
594 incrementNumEntries();
595
596 // If we are writing over a tombstone, remember this.
597 const KeyT EmptyKey = getEmptyKey();
598 if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
599 decrementNumTombstones();
600
601 return TheBucket;
602 }
603
604 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
605 /// FoundBucket. If the bucket contains the key and a value, this returns
606 /// true, otherwise it returns a bucket with an empty marker or tombstone and
607 /// returns false.
608 template<typename LookupKeyT>
609 bool LookupBucketFor(const LookupKeyT &Val,
610 const BucketT *&FoundBucket) const {
611 const BucketT *BucketsPtr = getBuckets();
612 const unsigned NumBuckets = getNumBuckets();
613
614 if (NumBuckets == 0) {
615 FoundBucket = nullptr;
616 return false;
617 }
618
619 // FoundTombstone - Keep track of whether we find a tombstone while probing.
620 const BucketT *FoundTombstone = nullptr;
621 const KeyT EmptyKey = getEmptyKey();
622 const KeyT TombstoneKey = getTombstoneKey();
623 assert(!KeyInfoT::isEqual(Val, EmptyKey) &&(static_cast <bool> (!KeyInfoT::isEqual(Val, EmptyKey) &&
!KeyInfoT::isEqual(Val, TombstoneKey) && "Empty/Tombstone value shouldn't be inserted into map!"
) ? void (0) : __assert_fail ("!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && \"Empty/Tombstone value shouldn't be inserted into map!\""
, "llvm/include/llvm/ADT/DenseMap.h", 625, __extension__ __PRETTY_FUNCTION__
))
624 !KeyInfoT::isEqual(Val, TombstoneKey) &&(static_cast <bool> (!KeyInfoT::isEqual(Val, EmptyKey) &&
!KeyInfoT::isEqual(Val, TombstoneKey) && "Empty/Tombstone value shouldn't be inserted into map!"
) ? void (0) : __assert_fail ("!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && \"Empty/Tombstone value shouldn't be inserted into map!\""
, "llvm/include/llvm/ADT/DenseMap.h", 625, __extension__ __PRETTY_FUNCTION__
))
625 "Empty/Tombstone value shouldn't be inserted into map!")(static_cast <bool> (!KeyInfoT::isEqual(Val, EmptyKey) &&
!KeyInfoT::isEqual(Val, TombstoneKey) && "Empty/Tombstone value shouldn't be inserted into map!"
) ? void (0) : __assert_fail ("!KeyInfoT::isEqual(Val, EmptyKey) && !KeyInfoT::isEqual(Val, TombstoneKey) && \"Empty/Tombstone value shouldn't be inserted into map!\""
, "llvm/include/llvm/ADT/DenseMap.h", 625, __extension__ __PRETTY_FUNCTION__
))
;
626
627 unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
628 unsigned ProbeAmt = 1;
629 while (true) {
630 const BucketT *ThisBucket = BucketsPtr + BucketNo;
631 // Found Val's bucket? If so, return it.
632 if (LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))__builtin_expect((bool)(KeyInfoT::isEqual(Val, ThisBucket->
getFirst())), true)
) {
633 FoundBucket = ThisBucket;
634 return true;
635 }
636
637 // If we found an empty bucket, the key doesn't exist in the set.
638 // Insert it and return the default value.
639 if (LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))__builtin_expect((bool)(KeyInfoT::isEqual(ThisBucket->getFirst
(), EmptyKey)), true)
) {
640 // If we've already seen a tombstone while probing, fill it in instead
641 // of the empty bucket we eventually probed to.
642 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
643 return false;
644 }
645
646 // If this is a tombstone, remember it. If Val ends up not in the map, we
647 // prefer to return it than something that would require more probing.
648 if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
649 !FoundTombstone)
650 FoundTombstone = ThisBucket; // Remember the first tombstone found.
651
652 // Otherwise, it's a hash collision or a tombstone, continue quadratic
653 // probing.
654 BucketNo += ProbeAmt++;
655 BucketNo &= (NumBuckets-1);
656 }
657 }
658
659 template <typename LookupKeyT>
660 bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
661 const BucketT *ConstFoundBucket;
662 bool Result = const_cast<const DenseMapBase *>(this)
663 ->LookupBucketFor(Val, ConstFoundBucket);
664 FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
665 return Result;
666 }
667
668public:
669 /// Return the approximate size (in bytes) of the actual map.
670 /// This is just the raw memory used by DenseMap.
671 /// If entries are pointers to objects, the size of the referenced objects
672 /// are not included.
673 size_t getMemorySize() const {
674 return getNumBuckets() * sizeof(BucketT);
675 }
676};
677
678/// Equality comparison for DenseMap.
679///
680/// Iterates over elements of LHS confirming that each (key, value) pair in LHS
681/// is also in RHS, and that no additional pairs are in RHS.
682/// Equivalent to N calls to RHS.find and N value comparisons. Amortized
683/// complexity is linear, worst case is O(N^2) (if every hash collides).
684template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
685 typename BucketT>
686bool operator==(
687 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
688 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
689 if (LHS.size() != RHS.size())
690 return false;
691
692 for (auto &KV : LHS) {
693 auto I = RHS.find(KV.first);
694 if (I == RHS.end() || I->second != KV.second)
695 return false;
696 }
697
698 return true;
699}
700
701/// Inequality comparison for DenseMap.
702///
703/// Equivalent to !(LHS == RHS). See operator== for performance notes.
704template <typename DerivedT, typename KeyT, typename ValueT, typename KeyInfoT,
705 typename BucketT>
706bool operator!=(
707 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &LHS,
708 const DenseMapBase<DerivedT, KeyT, ValueT, KeyInfoT, BucketT> &RHS) {
709 return !(LHS == RHS);
710}
711
712template <typename KeyT, typename ValueT,
713 typename KeyInfoT = DenseMapInfo<KeyT>,
714 typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
715class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
716 KeyT, ValueT, KeyInfoT, BucketT> {
717 friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
718
719 // Lift some types from the dependent base class into this class for
720 // simplicity of referring to them.
721 using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
722
723 BucketT *Buckets;
724 unsigned NumEntries;
725 unsigned NumTombstones;
726 unsigned NumBuckets;
727
728public:
729 /// Create a DenseMap with an optional \p InitialReserve that guarantee that
730 /// this number of elements can be inserted in the map without grow()
731 explicit DenseMap(unsigned InitialReserve = 0) { init(InitialReserve); }
732
733 DenseMap(const DenseMap &other) : BaseT() {
734 init(0);
735 copyFrom(other);
736 }
737
738 DenseMap(DenseMap &&other) : BaseT() {
739 init(0);
740 swap(other);
741 }
742
743 template<typename InputIt>
744 DenseMap(const InputIt &I, const InputIt &E) {
745 init(std::distance(I, E));
746 this->insert(I, E);
747 }
748
749 DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
750 init(Vals.size());
751 this->insert(Vals.begin(), Vals.end());
752 }
753
754 ~DenseMap() {
755 this->destroyAll();
756 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
757 }
758
759 void swap(DenseMap& RHS) {
760 this->incrementEpoch();
761 RHS.incrementEpoch();
762 std::swap(Buckets, RHS.Buckets);
763 std::swap(NumEntries, RHS.NumEntries);
764 std::swap(NumTombstones, RHS.NumTombstones);
765 std::swap(NumBuckets, RHS.NumBuckets);
766 }
767
768 DenseMap& operator=(const DenseMap& other) {
769 if (&other != this)
770 copyFrom(other);
771 return *this;
772 }
773
774 DenseMap& operator=(DenseMap &&other) {
775 this->destroyAll();
776 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
777 init(0);
778 swap(other);
779 return *this;
780 }
781
782 void copyFrom(const DenseMap& other) {
783 this->destroyAll();
784 deallocate_buffer(Buckets, sizeof(BucketT) * NumBuckets, alignof(BucketT));
785 if (allocateBuckets(other.NumBuckets)) {
786 this->BaseT::copyFrom(other);
787 } else {
788 NumEntries = 0;
789 NumTombstones = 0;
790 }
791 }
792
793 void init(unsigned InitNumEntries) {
794 auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
795 if (allocateBuckets(InitBuckets)) {
796 this->BaseT::initEmpty();
797 } else {
798 NumEntries = 0;
799 NumTombstones = 0;
800 }
801 }
802
803 void grow(unsigned AtLeast) {
804 unsigned OldNumBuckets = NumBuckets;
805 BucketT *OldBuckets = Buckets;
806
807 allocateBuckets(std::max<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast-1))));
808 assert(Buckets)(static_cast <bool> (Buckets) ? void (0) : __assert_fail
("Buckets", "llvm/include/llvm/ADT/DenseMap.h", 808, __extension__
__PRETTY_FUNCTION__))
;
809 if (!OldBuckets) {
810 this->BaseT::initEmpty();
811 return;
812 }
813
814 this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
815
816 // Free the old table.
817 deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
818 alignof(BucketT));
819 }
820
821 void shrink_and_clear() {
822 unsigned OldNumBuckets = NumBuckets;
823 unsigned OldNumEntries = NumEntries;
824 this->destroyAll();
825
826 // Reduce the number of buckets.
827 unsigned NewNumBuckets = 0;
828 if (OldNumEntries)
829 NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
830 if (NewNumBuckets == NumBuckets) {
831 this->BaseT::initEmpty();
832 return;
833 }
834
835 deallocate_buffer(Buckets, sizeof(BucketT) * OldNumBuckets,
836 alignof(BucketT));
837 init(NewNumBuckets);
838 }
839
840private:
841 unsigned getNumEntries() const {
842 return NumEntries;
843 }
844
845 void setNumEntries(unsigned Num) {
846 NumEntries = Num;
847 }
848
849 unsigned getNumTombstones() const {
850 return NumTombstones;
851 }
852
853 void setNumTombstones(unsigned Num) {
854 NumTombstones = Num;
855 }
856
857 BucketT *getBuckets() const {
858 return Buckets;
859 }
860
861 unsigned getNumBuckets() const {
862 return NumBuckets;
863 }
864
865 bool allocateBuckets(unsigned Num) {
866 NumBuckets = Num;
867 if (NumBuckets == 0) {
868 Buckets = nullptr;
869 return false;
870 }
871
872 Buckets = static_cast<BucketT *>(
873 allocate_buffer(sizeof(BucketT) * NumBuckets, alignof(BucketT)));
874 return true;
875 }
876};
877
878template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
879 typename KeyInfoT = DenseMapInfo<KeyT>,
880 typename BucketT = llvm::detail::DenseMapPair<KeyT, ValueT>>
881class SmallDenseMap
882 : public DenseMapBase<
883 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
884 ValueT, KeyInfoT, BucketT> {
885 friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
886
887 // Lift some types from the dependent base class into this class for
888 // simplicity of referring to them.
889 using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
890
891 static_assert(isPowerOf2_64(InlineBuckets),
892 "InlineBuckets must be a power of 2.");
893
894 unsigned Small : 1;
895 unsigned NumEntries : 31;
896 unsigned NumTombstones;
897
898 struct LargeRep {
899 BucketT *Buckets;
900 unsigned NumBuckets;
901 };
902
903 /// A "union" of an inline bucket array and the struct representing
904 /// a large bucket. This union will be discriminated by the 'Small' bit.
905 AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
906
907public:
908 explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
909 init(NumInitBuckets);
910 }
911
912 SmallDenseMap(const SmallDenseMap &other) : BaseT() {
913 init(0);
914 copyFrom(other);
915 }
916
917 SmallDenseMap(SmallDenseMap &&other) : BaseT() {
918 init(0);
919 swap(other);
920 }
921
922 template<typename InputIt>
923 SmallDenseMap(const InputIt &I, const InputIt &E) {
924 init(NextPowerOf2(std::distance(I, E)));
925 this->insert(I, E);
926 }
927
928 SmallDenseMap(std::initializer_list<typename BaseT::value_type> Vals)
929 : SmallDenseMap(Vals.begin(), Vals.end()) {}
930
931 ~SmallDenseMap() {
932 this->destroyAll();
933 deallocateBuckets();
934 }
935
936 void swap(SmallDenseMap& RHS) {
937 unsigned TmpNumEntries = RHS.NumEntries;
938 RHS.NumEntries = NumEntries;
939 NumEntries = TmpNumEntries;
940 std::swap(NumTombstones, RHS.NumTombstones);
941
942 const KeyT EmptyKey = this->getEmptyKey();
943 const KeyT TombstoneKey = this->getTombstoneKey();
944 if (Small && RHS.Small) {
945 // If we're swapping inline bucket arrays, we have to cope with some of
946 // the tricky bits of DenseMap's storage system: the buckets are not
947 // fully initialized. Thus we swap every key, but we may have
948 // a one-directional move of the value.
949 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
950 BucketT *LHSB = &getInlineBuckets()[i],
951 *RHSB = &RHS.getInlineBuckets()[i];
952 bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
953 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
954 bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
955 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
956 if (hasLHSValue && hasRHSValue) {
957 // Swap together if we can...
958 std::swap(*LHSB, *RHSB);
959 continue;
960 }
961 // Swap separately and handle any asymmetry.
962 std::swap(LHSB->getFirst(), RHSB->getFirst());
963 if (hasLHSValue) {
964 ::new (&RHSB->getSecond()) ValueT(std::move(LHSB->getSecond()));
965 LHSB->getSecond().~ValueT();
966 } else if (hasRHSValue) {
967 ::new (&LHSB->getSecond()) ValueT(std::move(RHSB->getSecond()));
968 RHSB->getSecond().~ValueT();
969 }
970 }
971 return;
972 }
973 if (!Small && !RHS.Small) {
974 std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
975 std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
976 return;
977 }
978
979 SmallDenseMap &SmallSide = Small ? *this : RHS;
980 SmallDenseMap &LargeSide = Small ? RHS : *this;
981
982 // First stash the large side's rep and move the small side across.
983 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
984 LargeSide.getLargeRep()->~LargeRep();
985 LargeSide.Small = true;
986 // This is similar to the standard move-from-old-buckets, but the bucket
987 // count hasn't actually rotated in this case. So we have to carefully
988 // move construct the keys and values into their new locations, but there
989 // is no need to re-hash things.
990 for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
991 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
992 *OldB = &SmallSide.getInlineBuckets()[i];
993 ::new (&NewB->getFirst()) KeyT(std::move(OldB->getFirst()));
994 OldB->getFirst().~KeyT();
995 if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
996 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
997 ::new (&NewB->getSecond()) ValueT(std::move(OldB->getSecond()));
998 OldB->getSecond().~ValueT();
999 }
1000 }
1001
1002 // The hard part of moving the small buckets across is done, just move
1003 // the TmpRep into its new home.
1004 SmallSide.Small = false;
1005 new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1006 }
1007
1008 SmallDenseMap& operator=(const SmallDenseMap& other) {
1009 if (&other != this)
1010 copyFrom(other);
1011 return *this;
1012 }
1013
1014 SmallDenseMap& operator=(SmallDenseMap &&other) {
1015 this->destroyAll();
1016 deallocateBuckets();
1017 init(0);
1018 swap(other);
1019 return *this;
1020 }
1021
1022 void copyFrom(const SmallDenseMap& other) {
1023 this->destroyAll();
1024 deallocateBuckets();
1025 Small = true;
1026 if (other.getNumBuckets() > InlineBuckets) {
1027 Small = false;
1028 new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1029 }
1030 this->BaseT::copyFrom(other);
1031 }
1032
1033 void init(unsigned InitBuckets) {
1034 Small = true;
1035 if (InitBuckets > InlineBuckets) {
1036 Small = false;
1037 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1038 }
1039 this->BaseT::initEmpty();
1040 }
1041
1042 void grow(unsigned AtLeast) {
1043 if (AtLeast > InlineBuckets)
1044 AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
1045
1046 if (Small) {
1047 // First move the inline buckets into a temporary storage.
1048 AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
1049 BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
1050 BucketT *TmpEnd = TmpBegin;
1051
1052 // Loop over the buckets, moving non-empty, non-tombstones into the
1053 // temporary storage. Have the loop move the TmpEnd forward as it goes.
1054 const KeyT EmptyKey = this->getEmptyKey();
1055 const KeyT TombstoneKey = this->getTombstoneKey();
1056 for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
1057 if (!KeyInfoT::isEqual(P->getFirst(), EmptyKey) &&
1058 !KeyInfoT::isEqual(P->getFirst(), TombstoneKey)) {
1059 assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&(static_cast <bool> (size_t(TmpEnd - TmpBegin) < InlineBuckets
&& "Too many inline buckets!") ? void (0) : __assert_fail
("size_t(TmpEnd - TmpBegin) < InlineBuckets && \"Too many inline buckets!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1060, __extension__ __PRETTY_FUNCTION__
))
1060 "Too many inline buckets!")(static_cast <bool> (size_t(TmpEnd - TmpBegin) < InlineBuckets
&& "Too many inline buckets!") ? void (0) : __assert_fail
("size_t(TmpEnd - TmpBegin) < InlineBuckets && \"Too many inline buckets!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1060, __extension__ __PRETTY_FUNCTION__
))
;
1061 ::new (&TmpEnd->getFirst()) KeyT(std::move(P->getFirst()));
1062 ::new (&TmpEnd->getSecond()) ValueT(std::move(P->getSecond()));
1063 ++TmpEnd;
1064 P->getSecond().~ValueT();
1065 }
1066 P->getFirst().~KeyT();
1067 }
1068
1069 // AtLeast == InlineBuckets can happen if there are many tombstones,
1070 // and grow() is used to remove them. Usually we always switch to the
1071 // large rep here.
1072 if (AtLeast > InlineBuckets) {
1073 Small = false;
1074 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1075 }
1076 this->moveFromOldBuckets(TmpBegin, TmpEnd);
1077 return;
1078 }
1079
1080 LargeRep OldRep = std::move(*getLargeRep());
1081 getLargeRep()->~LargeRep();
1082 if (AtLeast <= InlineBuckets) {
1083 Small = true;
1084 } else {
1085 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1086 }
1087
1088 this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
1089
1090 // Free the old table.
1091 deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
1092 alignof(BucketT));
1093 }
1094
1095 void shrink_and_clear() {
1096 unsigned OldSize = this->size();
1097 this->destroyAll();
1098
1099 // Reduce the number of buckets.
1100 unsigned NewNumBuckets = 0;
1101 if (OldSize) {
1102 NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
1103 if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1104 NewNumBuckets = 64;
1105 }
1106 if ((Small && NewNumBuckets <= InlineBuckets) ||
1107 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1108 this->BaseT::initEmpty();
1109 return;
1110 }
1111
1112 deallocateBuckets();
1113 init(NewNumBuckets);
1114 }
1115
1116private:
1117 unsigned getNumEntries() const {
1118 return NumEntries;
1119 }
1120
1121 void setNumEntries(unsigned Num) {
1122 // NumEntries is hardcoded to be 31 bits wide.
1123 assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries")(static_cast <bool> (Num < (1U << 31) &&
"Cannot support more than 1<<31 entries") ? void (0) :
__assert_fail ("Num < (1U << 31) && \"Cannot support more than 1<<31 entries\""
, "llvm/include/llvm/ADT/DenseMap.h", 1123, __extension__ __PRETTY_FUNCTION__
))
;
1124 NumEntries = Num;
1125 }
1126
1127 unsigned getNumTombstones() const {
1128 return NumTombstones;
1129 }
1130
1131 void setNumTombstones(unsigned Num) {
1132 NumTombstones = Num;
1133 }
1134
1135 const BucketT *getInlineBuckets() const {
1136 assert(Small)(static_cast <bool> (Small) ? void (0) : __assert_fail (
"Small", "llvm/include/llvm/ADT/DenseMap.h", 1136, __extension__
__PRETTY_FUNCTION__))
;
1137 // Note that this cast does not violate aliasing rules as we assert that
1138 // the memory's dynamic type is the small, inline bucket buffer, and the
1139 // 'storage' is a POD containing a char buffer.
1140 return reinterpret_cast<const BucketT *>(&storage);
1141 }
1142
1143 BucketT *getInlineBuckets() {
1144 return const_cast<BucketT *>(
1145 const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
1146 }
1147
1148 const LargeRep *getLargeRep() const {
1149 assert(!Small)(static_cast <bool> (!Small) ? void (0) : __assert_fail
("!Small", "llvm/include/llvm/ADT/DenseMap.h", 1149, __extension__
__PRETTY_FUNCTION__))
;
1150 // Note, same rule about aliasing as with getInlineBuckets.
1151 return reinterpret_cast<const LargeRep *>(&storage);
1152 }
1153
1154 LargeRep *getLargeRep() {
1155 return const_cast<LargeRep *>(
1156 const_cast<const SmallDenseMap *>(this)->getLargeRep());
1157 }
1158
1159 const BucketT *getBuckets() const {
1160 return Small ? getInlineBuckets() : getLargeRep()->Buckets;
1161 }
1162
1163 BucketT *getBuckets() {
1164 return const_cast<BucketT *>(
1165 const_cast<const SmallDenseMap *>(this)->getBuckets());
1166 }
1167
1168 unsigned getNumBuckets() const {
1169 return Small ? InlineBuckets : getLargeRep()->NumBuckets;
1170 }
1171
1172 void deallocateBuckets() {
1173 if (Small)
1174 return;
1175
1176 deallocate_buffer(getLargeRep()->Buckets,
1177 sizeof(BucketT) * getLargeRep()->NumBuckets,
1178 alignof(BucketT));
1179 getLargeRep()->~LargeRep();
1180 }
1181
1182 LargeRep allocateBuckets(unsigned Num) {
1183 assert(Num > InlineBuckets && "Must allocate more buckets than are inline")(static_cast <bool> (Num > InlineBuckets && "Must allocate more buckets than are inline"
) ? void (0) : __assert_fail ("Num > InlineBuckets && \"Must allocate more buckets than are inline\""
, "llvm/include/llvm/ADT/DenseMap.h", 1183, __extension__ __PRETTY_FUNCTION__
))
;
1184 LargeRep Rep = {static_cast<BucketT *>(allocate_buffer(
1185 sizeof(BucketT) * Num, alignof(BucketT))),
1186 Num};
1187 return Rep;
1188 }
1189};
1190
1191template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
1192 bool IsConst>
1193class DenseMapIterator : DebugEpochBase::HandleBase {
1194 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
1195 friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
1196
1197public:
1198 using difference_type = ptrdiff_t;
1199 using value_type =
1200 typename std::conditional<IsConst, const Bucket, Bucket>::type;
1201 using pointer = value_type *;
1202 using reference = value_type &;
1203 using iterator_category = std::forward_iterator_tag;
1204
1205private:
1206 pointer Ptr = nullptr;
1207 pointer End = nullptr;
1208
1209public:
1210 DenseMapIterator() = default;
1211
1212 DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
1213 bool NoAdvance = false)
1214 : DebugEpochBase::HandleBase(&Epoch), Ptr(Pos), End(E) {
1215 assert(isHandleInSync() && "invalid construction!")(static_cast <bool> (isHandleInSync() && "invalid construction!"
) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid construction!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1215, __extension__ __PRETTY_FUNCTION__
))
;
1216
1217 if (NoAdvance) return;
1218 if (shouldReverseIterate<KeyT>()) {
1219 RetreatPastEmptyBuckets();
1220 return;
1221 }
1222 AdvancePastEmptyBuckets();
1223 }
1224
1225 // Converting ctor from non-const iterators to const iterators. SFINAE'd out
1226 // for const iterator destinations so it doesn't end up as a user defined copy
1227 // constructor.
1228 template <bool IsConstSrc,
1229 typename = std::enable_if_t<!IsConstSrc && IsConst>>
1230 DenseMapIterator(
1231 const DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc> &I)
1232 : DebugEpochBase::HandleBase(I), Ptr(I.Ptr), End(I.End) {}
1233
1234 reference operator*() const {
1235 assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!"
) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1235, __extension__ __PRETTY_FUNCTION__
))
;
1236 assert(Ptr != End && "dereferencing end() iterator")(static_cast <bool> (Ptr != End && "dereferencing end() iterator"
) ? void (0) : __assert_fail ("Ptr != End && \"dereferencing end() iterator\""
, "llvm/include/llvm/ADT/DenseMap.h", 1236, __extension__ __PRETTY_FUNCTION__
))
;
1237 if (shouldReverseIterate<KeyT>())
1238 return Ptr[-1];
1239 return *Ptr;
1240 }
1241 pointer operator->() const {
1242 assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!"
) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1242, __extension__ __PRETTY_FUNCTION__
))
;
1243 assert(Ptr != End && "dereferencing end() iterator")(static_cast <bool> (Ptr != End && "dereferencing end() iterator"
) ? void (0) : __assert_fail ("Ptr != End && \"dereferencing end() iterator\""
, "llvm/include/llvm/ADT/DenseMap.h", 1243, __extension__ __PRETTY_FUNCTION__
))
;
1244 if (shouldReverseIterate<KeyT>())
1245 return &(Ptr[-1]);
1246 return Ptr;
1247 }
1248
1249 friend bool operator==(const DenseMapIterator &LHS,
1250 const DenseMapIterator &RHS) {
1251 assert((!LHS.Ptr || LHS.isHandleInSync()) && "handle not in sync!")(static_cast <bool> ((!LHS.Ptr || LHS.isHandleInSync())
&& "handle not in sync!") ? void (0) : __assert_fail
("(!LHS.Ptr || LHS.isHandleInSync()) && \"handle not in sync!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1251, __extension__ __PRETTY_FUNCTION__
))
;
26
Assuming field 'Ptr' is null
27
'?' condition is true
1252 assert((!RHS.Ptr || RHS.isHandleInSync()) && "handle not in sync!")(static_cast <bool> ((!RHS.Ptr || RHS.isHandleInSync())
&& "handle not in sync!") ? void (0) : __assert_fail
("(!RHS.Ptr || RHS.isHandleInSync()) && \"handle not in sync!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1252, __extension__ __PRETTY_FUNCTION__
))
;
28
Assuming field 'Ptr' is null
29
'?' condition is true
1253 assert(LHS.getEpochAddress() == RHS.getEpochAddress() &&(static_cast <bool> (LHS.getEpochAddress() == RHS.getEpochAddress
() && "comparing incomparable iterators!") ? void (0)
: __assert_fail ("LHS.getEpochAddress() == RHS.getEpochAddress() && \"comparing incomparable iterators!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1254, __extension__ __PRETTY_FUNCTION__
))
30
Assuming the condition is true
31
'?' condition is true
1254 "comparing incomparable iterators!")(static_cast <bool> (LHS.getEpochAddress() == RHS.getEpochAddress
() && "comparing incomparable iterators!") ? void (0)
: __assert_fail ("LHS.getEpochAddress() == RHS.getEpochAddress() && \"comparing incomparable iterators!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1254, __extension__ __PRETTY_FUNCTION__
))
;
1255 return LHS.Ptr == RHS.Ptr;
32
Returning the value 1, which participates in a condition later
1256 }
1257
1258 friend bool operator!=(const DenseMapIterator &LHS,
1259 const DenseMapIterator &RHS) {
1260 return !(LHS == RHS);
25
Calling 'operator=='
33
Returning from 'operator=='
34
Returning zero, which participates in a condition later
1261 }
1262
1263 inline DenseMapIterator& operator++() { // Preincrement
1264 assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!"
) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1264, __extension__ __PRETTY_FUNCTION__
))
;
1265 assert(Ptr != End && "incrementing end() iterator")(static_cast <bool> (Ptr != End && "incrementing end() iterator"
) ? void (0) : __assert_fail ("Ptr != End && \"incrementing end() iterator\""
, "llvm/include/llvm/ADT/DenseMap.h", 1265, __extension__ __PRETTY_FUNCTION__
))
;
1266 if (shouldReverseIterate<KeyT>()) {
1267 --Ptr;
1268 RetreatPastEmptyBuckets();
1269 return *this;
1270 }
1271 ++Ptr;
1272 AdvancePastEmptyBuckets();
1273 return *this;
1274 }
1275 DenseMapIterator operator++(int) { // Postincrement
1276 assert(isHandleInSync() && "invalid iterator access!")(static_cast <bool> (isHandleInSync() && "invalid iterator access!"
) ? void (0) : __assert_fail ("isHandleInSync() && \"invalid iterator access!\""
, "llvm/include/llvm/ADT/DenseMap.h", 1276, __extension__ __PRETTY_FUNCTION__
))
;
1277 DenseMapIterator tmp = *this; ++*this; return tmp;
1278 }
1279
1280private:
1281 void AdvancePastEmptyBuckets() {
1282 assert(Ptr <= End)(static_cast <bool> (Ptr <= End) ? void (0) : __assert_fail
("Ptr <= End", "llvm/include/llvm/ADT/DenseMap.h", 1282, __extension__
__PRETTY_FUNCTION__))
;
1283 const KeyT Empty = KeyInfoT::getEmptyKey();
1284 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1285
1286 while (Ptr != End && (KeyInfoT::isEqual(Ptr->getFirst(), Empty) ||
1287 KeyInfoT::isEqual(Ptr->getFirst(), Tombstone)))
1288 ++Ptr;
1289 }
1290
1291 void RetreatPastEmptyBuckets() {
1292 assert(Ptr >= End)(static_cast <bool> (Ptr >= End) ? void (0) : __assert_fail
("Ptr >= End", "llvm/include/llvm/ADT/DenseMap.h", 1292, __extension__
__PRETTY_FUNCTION__))
;
1293 const KeyT Empty = KeyInfoT::getEmptyKey();
1294 const KeyT Tombstone = KeyInfoT::getTombstoneKey();
1295
1296 while (Ptr != End && (KeyInfoT::isEqual(Ptr[-1].getFirst(), Empty) ||
1297 KeyInfoT::isEqual(Ptr[-1].getFirst(), Tombstone)))
1298 --Ptr;
1299 }
1300};
1301
1302template <typename KeyT, typename ValueT, typename KeyInfoT>
1303inline size_t capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
1304 return X.getMemorySize();
1305}
1306
1307} // end namespace llvm
1308
1309#endif // LLVM_ADT_DENSEMAP_H

/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/clang/include/clang/AST/GlobalDecl.h

1//===- GlobalDecl.h - Global declaration holder -----------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A GlobalDecl can hold either a regular variable/function or a C++ ctor/dtor
10// together with its type.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_CLANG_AST_GLOBALDECL_H
15#define LLVM_CLANG_AST_GLOBALDECL_H
16
17#include "clang/AST/Attr.h"
18#include "clang/AST/DeclCXX.h"
19#include "clang/AST/DeclObjC.h"
20#include "clang/AST/DeclOpenMP.h"
21#include "clang/AST/DeclTemplate.h"
22#include "clang/Basic/ABI.h"
23#include "clang/Basic/LLVM.h"
24#include "llvm/ADT/DenseMapInfo.h"
25#include "llvm/ADT/PointerIntPair.h"
26#include "llvm/Support/Casting.h"
27#include "llvm/Support/type_traits.h"
28#include <cassert>
29
30namespace clang {
31
32enum class DynamicInitKind : unsigned {
33 NoStub = 0,
34 Initializer,
35 AtExit,
36 GlobalArrayDestructor
37};
38
39enum class KernelReferenceKind : unsigned {
40 Kernel = 0,
41 Stub = 1,
42};
43
44/// GlobalDecl - represents a global declaration. This can either be a
45/// CXXConstructorDecl and the constructor type (Base, Complete).
46/// a CXXDestructorDecl and the destructor type (Base, Complete),
47/// a FunctionDecl and the kernel reference type (Kernel, Stub), or
48/// a VarDecl, a FunctionDecl or a BlockDecl.
49///
50/// When a new type of GlobalDecl is added, the following places should
51/// be updated to convert a Decl* to a GlobalDecl:
52/// PredefinedExpr::ComputeName() in lib/AST/Expr.cpp.
53/// getParentOfLocalEntity() in lib/AST/ItaniumMangle.cpp
54/// ASTNameGenerator::Implementation::writeFuncOrVarName in lib/AST/Mangle.cpp
55///
56class GlobalDecl {
57 llvm::PointerIntPair<const Decl *, 3> Value;
58 unsigned MultiVersionIndex = 0;
59
60 void Init(const Decl *D) {
61 assert(!isa<CXXConstructorDecl>(D) && "Use other ctor with ctor decls!")(static_cast <bool> (!isa<CXXConstructorDecl>(D) &&
"Use other ctor with ctor decls!") ? void (0) : __assert_fail
("!isa<CXXConstructorDecl>(D) && \"Use other ctor with ctor decls!\""
, "clang/include/clang/AST/GlobalDecl.h", 61, __extension__ __PRETTY_FUNCTION__
))
;
62 assert(!isa<CXXDestructorDecl>(D) && "Use other ctor with dtor decls!")(static_cast <bool> (!isa<CXXDestructorDecl>(D) &&
"Use other ctor with dtor decls!") ? void (0) : __assert_fail
("!isa<CXXDestructorDecl>(D) && \"Use other ctor with dtor decls!\""
, "clang/include/clang/AST/GlobalDecl.h", 62, __extension__ __PRETTY_FUNCTION__
))
;
63 assert(!D->hasAttr<CUDAGlobalAttr>() && "Use other ctor with GPU kernels!")(static_cast <bool> (!D->hasAttr<CUDAGlobalAttr>
() && "Use other ctor with GPU kernels!") ? void (0) :
__assert_fail ("!D->hasAttr<CUDAGlobalAttr>() && \"Use other ctor with GPU kernels!\""
, "clang/include/clang/AST/GlobalDecl.h", 63, __extension__ __PRETTY_FUNCTION__
))
;
64
65 Value.setPointer(D);
66 }
67
68public:
69 GlobalDecl() = default;
70 GlobalDecl(const VarDecl *D) { Init(D);}
71 GlobalDecl(const FunctionDecl *D, unsigned MVIndex = 0)
72 : MultiVersionIndex(MVIndex) {
73 if (!D->hasAttr<CUDAGlobalAttr>()) {
74 Init(D);
75 return;
76 }
77 Value.setPointerAndInt(D, unsigned(getDefaultKernelReference(D)));
78 }
79 GlobalDecl(const FunctionDecl *D, KernelReferenceKind Kind)
80 : Value(D, unsigned(Kind)) {
81 assert(D->hasAttr<CUDAGlobalAttr>() && "Decl is not a GPU kernel!")(static_cast <bool> (D->hasAttr<CUDAGlobalAttr>
() && "Decl is not a GPU kernel!") ? void (0) : __assert_fail
("D->hasAttr<CUDAGlobalAttr>() && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 81, __extension__ __PRETTY_FUNCTION__
))
;
82 }
83 GlobalDecl(const NamedDecl *D) { Init(D); }
84 GlobalDecl(const BlockDecl *D) { Init(D); }
85 GlobalDecl(const CapturedDecl *D) { Init(D); }
86 GlobalDecl(const ObjCMethodDecl *D) { Init(D); }
87 GlobalDecl(const OMPDeclareReductionDecl *D) { Init(D); }
88 GlobalDecl(const OMPDeclareMapperDecl *D) { Init(D); }
89 GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type) : Value(D, Type) {}
90 GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type) : Value(D, Type) {}
91 GlobalDecl(const VarDecl *D, DynamicInitKind StubKind)
92 : Value(D, unsigned(StubKind)) {}
93
94 GlobalDecl getCanonicalDecl() const {
95 GlobalDecl CanonGD;
96 CanonGD.Value.setPointer(Value.getPointer()->getCanonicalDecl());
97 CanonGD.Value.setInt(Value.getInt());
98 CanonGD.MultiVersionIndex = MultiVersionIndex;
99
100 return CanonGD;
101 }
102
103 const Decl *getDecl() const { return Value.getPointer(); }
39
Calling 'PointerIntPair::getPointer'
48
Returning from 'PointerIntPair::getPointer'
49
Returning pointer
104
105 CXXCtorType getCtorType() const {
106 assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!")(static_cast <bool> (isa<CXXConstructorDecl>(getDecl
()) && "Decl is not a ctor!") ? void (0) : __assert_fail
("isa<CXXConstructorDecl>(getDecl()) && \"Decl is not a ctor!\""
, "clang/include/clang/AST/GlobalDecl.h", 106, __extension__ __PRETTY_FUNCTION__
))
;
107 return static_cast<CXXCtorType>(Value.getInt());
108 }
109
110 CXXDtorType getDtorType() const {
111 assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!")(static_cast <bool> (isa<CXXDestructorDecl>(getDecl
()) && "Decl is not a dtor!") ? void (0) : __assert_fail
("isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a dtor!\""
, "clang/include/clang/AST/GlobalDecl.h", 111, __extension__ __PRETTY_FUNCTION__
))
;
112 return static_cast<CXXDtorType>(Value.getInt());
113 }
114
115 DynamicInitKind getDynamicInitKind() const {
116 assert(isa<VarDecl>(getDecl()) &&(static_cast <bool> (isa<VarDecl>(getDecl()) &&
cast<VarDecl>(getDecl())->hasGlobalStorage() &&
"Decl is not a global variable!") ? void (0) : __assert_fail
("isa<VarDecl>(getDecl()) && cast<VarDecl>(getDecl())->hasGlobalStorage() && \"Decl is not a global variable!\""
, "clang/include/clang/AST/GlobalDecl.h", 118, __extension__ __PRETTY_FUNCTION__
))
117 cast<VarDecl>(getDecl())->hasGlobalStorage() &&(static_cast <bool> (isa<VarDecl>(getDecl()) &&
cast<VarDecl>(getDecl())->hasGlobalStorage() &&
"Decl is not a global variable!") ? void (0) : __assert_fail
("isa<VarDecl>(getDecl()) && cast<VarDecl>(getDecl())->hasGlobalStorage() && \"Decl is not a global variable!\""
, "clang/include/clang/AST/GlobalDecl.h", 118, __extension__ __PRETTY_FUNCTION__
))
118 "Decl is not a global variable!")(static_cast <bool> (isa<VarDecl>(getDecl()) &&
cast<VarDecl>(getDecl())->hasGlobalStorage() &&
"Decl is not a global variable!") ? void (0) : __assert_fail
("isa<VarDecl>(getDecl()) && cast<VarDecl>(getDecl())->hasGlobalStorage() && \"Decl is not a global variable!\""
, "clang/include/clang/AST/GlobalDecl.h", 118, __extension__ __PRETTY_FUNCTION__
))
;
119 return static_cast<DynamicInitKind>(Value.getInt());
120 }
121
122 unsigned getMultiVersionIndex() const {
123 assert(isa<FunctionDecl>((static_cast <bool> (isa<FunctionDecl>( getDecl()
) && !cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>() && !isa<CXXConstructorDecl
>(getDecl()) && !isa<CXXDestructorDecl>(getDecl
()) && "Decl is not a plain FunctionDecl!") ? void (0
) : __assert_fail ("isa<FunctionDecl>( getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 128, __extension__ __PRETTY_FUNCTION__
))
124 getDecl()) &&(static_cast <bool> (isa<FunctionDecl>( getDecl()
) && !cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>() && !isa<CXXConstructorDecl
>(getDecl()) && !isa<CXXDestructorDecl>(getDecl
()) && "Decl is not a plain FunctionDecl!") ? void (0
) : __assert_fail ("isa<FunctionDecl>( getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 128, __extension__ __PRETTY_FUNCTION__
))
125 !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() &&(static_cast <bool> (isa<FunctionDecl>( getDecl()
) && !cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>() && !isa<CXXConstructorDecl
>(getDecl()) && !isa<CXXDestructorDecl>(getDecl
()) && "Decl is not a plain FunctionDecl!") ? void (0
) : __assert_fail ("isa<FunctionDecl>( getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 128, __extension__ __PRETTY_FUNCTION__
))
126 !isa<CXXConstructorDecl>(getDecl()) &&(static_cast <bool> (isa<FunctionDecl>( getDecl()
) && !cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>() && !isa<CXXConstructorDecl
>(getDecl()) && !isa<CXXDestructorDecl>(getDecl
()) && "Decl is not a plain FunctionDecl!") ? void (0
) : __assert_fail ("isa<FunctionDecl>( getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 128, __extension__ __PRETTY_FUNCTION__
))
127 !isa<CXXDestructorDecl>(getDecl()) &&(static_cast <bool> (isa<FunctionDecl>( getDecl()
) && !cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>() && !isa<CXXConstructorDecl
>(getDecl()) && !isa<CXXDestructorDecl>(getDecl
()) && "Decl is not a plain FunctionDecl!") ? void (0
) : __assert_fail ("isa<FunctionDecl>( getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 128, __extension__ __PRETTY_FUNCTION__
))
128 "Decl is not a plain FunctionDecl!")(static_cast <bool> (isa<FunctionDecl>( getDecl()
) && !cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>() && !isa<CXXConstructorDecl
>(getDecl()) && !isa<CXXDestructorDecl>(getDecl
()) && "Decl is not a plain FunctionDecl!") ? void (0
) : __assert_fail ("isa<FunctionDecl>( getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 128, __extension__ __PRETTY_FUNCTION__
))
;
129 return MultiVersionIndex;
130 }
131
132 KernelReferenceKind getKernelReferenceKind() const {
133 assert(((isa<FunctionDecl>(getDecl()) &&(static_cast <bool> (((isa<FunctionDecl>(getDecl(
)) && cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>
(getDecl()) && cast<FunctionTemplateDecl>(getDecl
()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>
())) && "Decl is not a GPU kernel!") ? void (0) : __assert_fail
("((isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>(getDecl()) && cast<FunctionTemplateDecl>(getDecl()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>())) && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 139, __extension__ __PRETTY_FUNCTION__
))
134 cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) ||(static_cast <bool> (((isa<FunctionDecl>(getDecl(
)) && cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>
(getDecl()) && cast<FunctionTemplateDecl>(getDecl
()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>
())) && "Decl is not a GPU kernel!") ? void (0) : __assert_fail
("((isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>(getDecl()) && cast<FunctionTemplateDecl>(getDecl()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>())) && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 139, __extension__ __PRETTY_FUNCTION__
))
135 (isa<FunctionTemplateDecl>(getDecl()) &&(static_cast <bool> (((isa<FunctionDecl>(getDecl(
)) && cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>
(getDecl()) && cast<FunctionTemplateDecl>(getDecl
()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>
())) && "Decl is not a GPU kernel!") ? void (0) : __assert_fail
("((isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>(getDecl()) && cast<FunctionTemplateDecl>(getDecl()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>())) && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 139, __extension__ __PRETTY_FUNCTION__
))
136 cast<FunctionTemplateDecl>(getDecl())(static_cast <bool> (((isa<FunctionDecl>(getDecl(
)) && cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>
(getDecl()) && cast<FunctionTemplateDecl>(getDecl
()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>
())) && "Decl is not a GPU kernel!") ? void (0) : __assert_fail
("((isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>(getDecl()) && cast<FunctionTemplateDecl>(getDecl()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>())) && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 139, __extension__ __PRETTY_FUNCTION__
))
137 ->getTemplatedDecl()(static_cast <bool> (((isa<FunctionDecl>(getDecl(
)) && cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>
(getDecl()) && cast<FunctionTemplateDecl>(getDecl
()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>
())) && "Decl is not a GPU kernel!") ? void (0) : __assert_fail
("((isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>(getDecl()) && cast<FunctionTemplateDecl>(getDecl()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>())) && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 139, __extension__ __PRETTY_FUNCTION__
))
138 ->hasAttr<CUDAGlobalAttr>())) &&(static_cast <bool> (((isa<FunctionDecl>(getDecl(
)) && cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>
(getDecl()) && cast<FunctionTemplateDecl>(getDecl
()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>
())) && "Decl is not a GPU kernel!") ? void (0) : __assert_fail
("((isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>(getDecl()) && cast<FunctionTemplateDecl>(getDecl()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>())) && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 139, __extension__ __PRETTY_FUNCTION__
))
139 "Decl is not a GPU kernel!")(static_cast <bool> (((isa<FunctionDecl>(getDecl(
)) && cast<FunctionDecl>(getDecl())->hasAttr
<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>
(getDecl()) && cast<FunctionTemplateDecl>(getDecl
()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>
())) && "Decl is not a GPU kernel!") ? void (0) : __assert_fail
("((isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) || (isa<FunctionTemplateDecl>(getDecl()) && cast<FunctionTemplateDecl>(getDecl()) ->getTemplatedDecl() ->hasAttr<CUDAGlobalAttr>())) && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 139, __extension__ __PRETTY_FUNCTION__
))
;
140 return static_cast<KernelReferenceKind>(Value.getInt());
141 }
142
143 friend bool operator==(const GlobalDecl &LHS, const GlobalDecl &RHS) {
144 return LHS.Value == RHS.Value &&
145 LHS.MultiVersionIndex == RHS.MultiVersionIndex;
146 }
147
148 void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
149
150 explicit operator bool() const { return getAsOpaquePtr(); }
151
152 static GlobalDecl getFromOpaquePtr(void *P) {
153 GlobalDecl GD;
154 GD.Value.setFromOpaqueValue(P);
155 return GD;
156 }
157
158 static KernelReferenceKind getDefaultKernelReference(const FunctionDecl *D) {
159 return D->getLangOpts().CUDAIsDevice ? KernelReferenceKind::Kernel
160 : KernelReferenceKind::Stub;
161 }
162
163 GlobalDecl getWithDecl(const Decl *D) {
164 GlobalDecl Result(*this);
165 Result.Value.setPointer(D);
166 return Result;
167 }
168
169 GlobalDecl getWithCtorType(CXXCtorType Type) {
170 assert(isa<CXXConstructorDecl>(getDecl()))(static_cast <bool> (isa<CXXConstructorDecl>(getDecl
())) ? void (0) : __assert_fail ("isa<CXXConstructorDecl>(getDecl())"
, "clang/include/clang/AST/GlobalDecl.h", 170, __extension__ __PRETTY_FUNCTION__
))
;
171 GlobalDecl Result(*this);
172 Result.Value.setInt(Type);
173 return Result;
174 }
175
176 GlobalDecl getWithDtorType(CXXDtorType Type) {
177 assert(isa<CXXDestructorDecl>(getDecl()))(static_cast <bool> (isa<CXXDestructorDecl>(getDecl
())) ? void (0) : __assert_fail ("isa<CXXDestructorDecl>(getDecl())"
, "clang/include/clang/AST/GlobalDecl.h", 177, __extension__ __PRETTY_FUNCTION__
))
;
178 GlobalDecl Result(*this);
179 Result.Value.setInt(Type);
180 return Result;
181 }
182
183 GlobalDecl getWithMultiVersionIndex(unsigned Index) {
184 assert(isa<FunctionDecl>(getDecl()) &&(static_cast <bool> (isa<FunctionDecl>(getDecl())
&& !cast<FunctionDecl>(getDecl())->hasAttr<
CUDAGlobalAttr>() && !isa<CXXConstructorDecl>
(getDecl()) && !isa<CXXDestructorDecl>(getDecl(
)) && "Decl is not a plain FunctionDecl!") ? void (0)
: __assert_fail ("isa<FunctionDecl>(getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 188, __extension__ __PRETTY_FUNCTION__
))
185 !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() &&(static_cast <bool> (isa<FunctionDecl>(getDecl())
&& !cast<FunctionDecl>(getDecl())->hasAttr<
CUDAGlobalAttr>() && !isa<CXXConstructorDecl>
(getDecl()) && !isa<CXXDestructorDecl>(getDecl(
)) && "Decl is not a plain FunctionDecl!") ? void (0)
: __assert_fail ("isa<FunctionDecl>(getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 188, __extension__ __PRETTY_FUNCTION__
))
186 !isa<CXXConstructorDecl>(getDecl()) &&(static_cast <bool> (isa<FunctionDecl>(getDecl())
&& !cast<FunctionDecl>(getDecl())->hasAttr<
CUDAGlobalAttr>() && !isa<CXXConstructorDecl>
(getDecl()) && !isa<CXXDestructorDecl>(getDecl(
)) && "Decl is not a plain FunctionDecl!") ? void (0)
: __assert_fail ("isa<FunctionDecl>(getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 188, __extension__ __PRETTY_FUNCTION__
))
187 !isa<CXXDestructorDecl>(getDecl()) &&(static_cast <bool> (isa<FunctionDecl>(getDecl())
&& !cast<FunctionDecl>(getDecl())->hasAttr<
CUDAGlobalAttr>() && !isa<CXXConstructorDecl>
(getDecl()) && !isa<CXXDestructorDecl>(getDecl(
)) && "Decl is not a plain FunctionDecl!") ? void (0)
: __assert_fail ("isa<FunctionDecl>(getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 188, __extension__ __PRETTY_FUNCTION__
))
188 "Decl is not a plain FunctionDecl!")(static_cast <bool> (isa<FunctionDecl>(getDecl())
&& !cast<FunctionDecl>(getDecl())->hasAttr<
CUDAGlobalAttr>() && !isa<CXXConstructorDecl>
(getDecl()) && !isa<CXXDestructorDecl>(getDecl(
)) && "Decl is not a plain FunctionDecl!") ? void (0)
: __assert_fail ("isa<FunctionDecl>(getDecl()) && !cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && !isa<CXXConstructorDecl>(getDecl()) && !isa<CXXDestructorDecl>(getDecl()) && \"Decl is not a plain FunctionDecl!\""
, "clang/include/clang/AST/GlobalDecl.h", 188, __extension__ __PRETTY_FUNCTION__
))
;
189 GlobalDecl Result(*this);
190 Result.MultiVersionIndex = Index;
191 return Result;
192 }
193
194 GlobalDecl getWithKernelReferenceKind(KernelReferenceKind Kind) {
195 assert(isa<FunctionDecl>(getDecl()) &&(static_cast <bool> (isa<FunctionDecl>(getDecl())
&& cast<FunctionDecl>(getDecl())->hasAttr<
CUDAGlobalAttr>() && "Decl is not a GPU kernel!") ?
void (0) : __assert_fail ("isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 197, __extension__ __PRETTY_FUNCTION__
))
196 cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() &&(static_cast <bool> (isa<FunctionDecl>(getDecl())
&& cast<FunctionDecl>(getDecl())->hasAttr<
CUDAGlobalAttr>() && "Decl is not a GPU kernel!") ?
void (0) : __assert_fail ("isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 197, __extension__ __PRETTY_FUNCTION__
))
197 "Decl is not a GPU kernel!")(static_cast <bool> (isa<FunctionDecl>(getDecl())
&& cast<FunctionDecl>(getDecl())->hasAttr<
CUDAGlobalAttr>() && "Decl is not a GPU kernel!") ?
void (0) : __assert_fail ("isa<FunctionDecl>(getDecl()) && cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() && \"Decl is not a GPU kernel!\""
, "clang/include/clang/AST/GlobalDecl.h", 197, __extension__ __PRETTY_FUNCTION__
))
;
198 GlobalDecl Result(*this);
199 Result.Value.setInt(unsigned(Kind));
200 return Result;
201 }
202};
203
204} // namespace clang
205
206namespace llvm {
207
208 template<> struct DenseMapInfo<clang::GlobalDecl> {
209 static inline clang::GlobalDecl getEmptyKey() {
210 return clang::GlobalDecl();
211 }
212
213 static inline clang::GlobalDecl getTombstoneKey() {
214 return clang::GlobalDecl::
215 getFromOpaquePtr(reinterpret_cast<void*>(-1));
216 }
217
218 static unsigned getHashValue(clang::GlobalDecl GD) {
219 return DenseMapInfo<void*>::getHashValue(GD.getAsOpaquePtr());
220 }
221
222 static bool isEqual(clang::GlobalDecl LHS,
223 clang::GlobalDecl RHS) {
224 return LHS == RHS;
225 }
226 };
227
228} // namespace llvm
229
230#endif // LLVM_CLANG_AST_GLOBALDECL_H

/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/include/llvm/ADT/PointerIntPair.h

1//===- llvm/ADT/PointerIntPair.h - Pair for pointer and int -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file defines the PointerIntPair class.
11///
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_ADT_POINTERINTPAIR_H
15#define LLVM_ADT_POINTERINTPAIR_H
16
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/PointerLikeTypeTraits.h"
19#include "llvm/Support/type_traits.h"
20#include <cassert>
21#include <cstdint>
22#include <limits>
23
24namespace llvm {
25
26template <typename T, typename Enable> struct DenseMapInfo;
27template <typename PointerT, unsigned IntBits, typename PtrTraits>
28struct PointerIntPairInfo;
29
30/// PointerIntPair - This class implements a pair of a pointer and small
31/// integer. It is designed to represent this in the space required by one
32/// pointer by bitmangling the integer into the low part of the pointer. This
33/// can only be done for small integers: typically up to 3 bits, but it depends
34/// on the number of bits available according to PointerLikeTypeTraits for the
35/// type.
36///
37/// Note that PointerIntPair always puts the IntVal part in the highest bits
38/// possible. For example, PointerIntPair<void*, 1, bool> will put the bit for
39/// the bool into bit #2, not bit #0, which allows the low two bits to be used
40/// for something else. For example, this allows:
41/// PointerIntPair<PointerIntPair<void*, 1, bool>, 1, bool>
42/// ... and the two bools will land in different bits.
43template <typename PointerTy, unsigned IntBits, typename IntType = unsigned,
44 typename PtrTraits = PointerLikeTypeTraits<PointerTy>,
45 typename Info = PointerIntPairInfo<PointerTy, IntBits, PtrTraits>>
46class PointerIntPair {
47 // Used by MSVC visualizer and generally helpful for debugging/visualizing.
48 using InfoTy = Info;
49 intptr_t Value = 0;
50
51public:
52 constexpr PointerIntPair() = default;
53
54 PointerIntPair(PointerTy PtrVal, IntType IntVal) {
55 setPointerAndInt(PtrVal, IntVal);
56 }
57
58 explicit PointerIntPair(PointerTy PtrVal) { initWithPointer(PtrVal); }
59
60 PointerTy getPointer() const { return Info::getPointer(Value); }
40
Calling 'PointerIntPairInfo::getPointer'
46
Returning from 'PointerIntPairInfo::getPointer'
47
Returning pointer
61
62 IntType getInt() const { return (IntType)Info::getInt(Value); }
63
64 void setPointer(PointerTy PtrVal) & {
65 Value = Info::updatePointer(Value, PtrVal);
66 }
67
68 void setInt(IntType IntVal) & {
69 Value = Info::updateInt(Value, static_cast<intptr_t>(IntVal));
70 }
71
72 void initWithPointer(PointerTy PtrVal) & {
73 Value = Info::updatePointer(0, PtrVal);
74 }
75
76 void setPointerAndInt(PointerTy PtrVal, IntType IntVal) & {
77 Value = Info::updateInt(Info::updatePointer(0, PtrVal),
78 static_cast<intptr_t>(IntVal));
79 }
80
81 PointerTy const *getAddrOfPointer() const {
82 return const_cast<PointerIntPair *>(this)->getAddrOfPointer();
83 }
84
85 PointerTy *getAddrOfPointer() {
86 assert(Value == reinterpret_cast<intptr_t>(getPointer()) &&(static_cast <bool> (Value == reinterpret_cast<intptr_t
>(getPointer()) && "Can only return the address if IntBits is cleared and "
"PtrTraits doesn't change the pointer") ? void (0) : __assert_fail
("Value == reinterpret_cast<intptr_t>(getPointer()) && \"Can only return the address if IntBits is cleared and \" \"PtrTraits doesn't change the pointer\""
, "llvm/include/llvm/ADT/PointerIntPair.h", 88, __extension__
__PRETTY_FUNCTION__))
87 "Can only return the address if IntBits is cleared and "(static_cast <bool> (Value == reinterpret_cast<intptr_t
>(getPointer()) && "Can only return the address if IntBits is cleared and "
"PtrTraits doesn't change the pointer") ? void (0) : __assert_fail
("Value == reinterpret_cast<intptr_t>(getPointer()) && \"Can only return the address if IntBits is cleared and \" \"PtrTraits doesn't change the pointer\""
, "llvm/include/llvm/ADT/PointerIntPair.h", 88, __extension__
__PRETTY_FUNCTION__))
88 "PtrTraits doesn't change the pointer")(static_cast <bool> (Value == reinterpret_cast<intptr_t
>(getPointer()) && "Can only return the address if IntBits is cleared and "
"PtrTraits doesn't change the pointer") ? void (0) : __assert_fail
("Value == reinterpret_cast<intptr_t>(getPointer()) && \"Can only return the address if IntBits is cleared and \" \"PtrTraits doesn't change the pointer\""
, "llvm/include/llvm/ADT/PointerIntPair.h", 88, __extension__
__PRETTY_FUNCTION__))
;
89 return reinterpret_cast<PointerTy *>(&Value);
90 }
91
92 void *getOpaqueValue() const { return reinterpret_cast<void *>(Value); }
93
94 void setFromOpaqueValue(void *Val) & {
95 Value = reinterpret_cast<intptr_t>(Val);
96 }
97
98 static PointerIntPair getFromOpaqueValue(void *V) {
99 PointerIntPair P;
100 P.setFromOpaqueValue(V);
101 return P;
102 }
103
104 // Allow PointerIntPairs to be created from const void * if and only if the
105 // pointer type could be created from a const void *.
106 static PointerIntPair getFromOpaqueValue(const void *V) {
107 (void)PtrTraits::getFromVoidPointer(V);
108 return getFromOpaqueValue(const_cast<void *>(V));
109 }
110
111 bool operator==(const PointerIntPair &RHS) const {
112 return Value == RHS.Value;
113 }
114
115 bool operator!=(const PointerIntPair &RHS) const {
116 return Value != RHS.Value;
117 }
118
119 bool operator<(const PointerIntPair &RHS) const { return Value < RHS.Value; }
120 bool operator>(const PointerIntPair &RHS) const { return Value > RHS.Value; }
121
122 bool operator<=(const PointerIntPair &RHS) const {
123 return Value <= RHS.Value;
124 }
125
126 bool operator>=(const PointerIntPair &RHS) const {
127 return Value >= RHS.Value;
128 }
129};
130
131// Specialize is_trivially_copyable to avoid limitation of llvm::is_trivially_copyable
132// when compiled with gcc 4.9.
133template <typename PointerTy, unsigned IntBits, typename IntType,
134 typename PtrTraits,
135 typename Info>
136struct is_trivially_copyable<PointerIntPair<PointerTy, IntBits, IntType, PtrTraits, Info>> : std::true_type {
137#ifdef HAVE_STD_IS_TRIVIALLY_COPYABLE
138 static_assert(std::is_trivially_copyable<PointerIntPair<PointerTy, IntBits, IntType, PtrTraits, Info>>::value,
139 "inconsistent behavior between llvm:: and std:: implementation of is_trivially_copyable");
140#endif
141};
142
143
144template <typename PointerT, unsigned IntBits, typename PtrTraits>
145struct PointerIntPairInfo {
146 static_assert(PtrTraits::NumLowBitsAvailable <
147 std::numeric_limits<uintptr_t>::digits,
148 "cannot use a pointer type that has all bits free");
149 static_assert(IntBits <= PtrTraits::NumLowBitsAvailable,
150 "PointerIntPair with integer size too large for pointer");
151 enum MaskAndShiftConstants : uintptr_t {
152 /// PointerBitMask - The bits that come from the pointer.
153 PointerBitMask =
154 ~(uintptr_t)(((intptr_t)1 << PtrTraits::NumLowBitsAvailable) - 1),
155
156 /// IntShift - The number of low bits that we reserve for other uses, and
157 /// keep zero.
158 IntShift = (uintptr_t)PtrTraits::NumLowBitsAvailable - IntBits,
159
160 /// IntMask - This is the unshifted mask for valid bits of the int type.
161 IntMask = (uintptr_t)(((intptr_t)1 << IntBits) - 1),
162
163 // ShiftedIntMask - This is the bits for the integer shifted in place.
164 ShiftedIntMask = (uintptr_t)(IntMask << IntShift)
165 };
166
167 static PointerT getPointer(intptr_t Value) {
168 return PtrTraits::getFromVoidPointer(
41
Calling 'PointerLikeTypeTraits::getFromVoidPointer'
44
Returning from 'PointerLikeTypeTraits::getFromVoidPointer'
45
Returning pointer
169 reinterpret_cast<void *>(Value & PointerBitMask));
170 }
171
172 static intptr_t getInt(intptr_t Value) {
173 return (Value >> IntShift) & IntMask;
174 }
175
176 static intptr_t updatePointer(intptr_t OrigValue, PointerT Ptr) {
177 intptr_t PtrWord =
178 reinterpret_cast<intptr_t>(PtrTraits::getAsVoidPointer(Ptr));
179 assert((PtrWord & ~PointerBitMask) == 0 &&(static_cast <bool> ((PtrWord & ~PointerBitMask) ==
0 && "Pointer is not sufficiently aligned") ? void (
0) : __assert_fail ("(PtrWord & ~PointerBitMask) == 0 && \"Pointer is not sufficiently aligned\""
, "llvm/include/llvm/ADT/PointerIntPair.h", 180, __extension__
__PRETTY_FUNCTION__))
180 "Pointer is not sufficiently aligned")(static_cast <bool> ((PtrWord & ~PointerBitMask) ==
0 && "Pointer is not sufficiently aligned") ? void (
0) : __assert_fail ("(PtrWord & ~PointerBitMask) == 0 && \"Pointer is not sufficiently aligned\""
, "llvm/include/llvm/ADT/PointerIntPair.h", 180, __extension__
__PRETTY_FUNCTION__))
;
181 // Preserve all low bits, just update the pointer.
182 return PtrWord | (OrigValue & ~PointerBitMask);
183 }
184
185 static intptr_t updateInt(intptr_t OrigValue, intptr_t Int) {
186 intptr_t IntWord = static_cast<intptr_t>(Int);
187 assert((IntWord & ~IntMask) == 0 && "Integer too large for field")(static_cast <bool> ((IntWord & ~IntMask) == 0 &&
"Integer too large for field") ? void (0) : __assert_fail ("(IntWord & ~IntMask) == 0 && \"Integer too large for field\""
, "llvm/include/llvm/ADT/PointerIntPair.h", 187, __extension__
__PRETTY_FUNCTION__))
;
188
189 // Preserve all bits other than the ones we are updating.
190 return (OrigValue & ~ShiftedIntMask) | IntWord << IntShift;
191 }
192};
193
194// Provide specialization of DenseMapInfo for PointerIntPair.
195template <typename PointerTy, unsigned IntBits, typename IntType>
196struct DenseMapInfo<PointerIntPair<PointerTy, IntBits, IntType>, void> {
197 using Ty = PointerIntPair<PointerTy, IntBits, IntType>;
198
199 static Ty getEmptyKey() {
200 uintptr_t Val = static_cast<uintptr_t>(-1);
201 Val <<= PointerLikeTypeTraits<Ty>::NumLowBitsAvailable;
202 return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
203 }
204
205 static Ty getTombstoneKey() {
206 uintptr_t Val = static_cast<uintptr_t>(-2);
207 Val <<= PointerLikeTypeTraits<PointerTy>::NumLowBitsAvailable;
208 return Ty::getFromOpaqueValue(reinterpret_cast<void *>(Val));
209 }
210
211 static unsigned getHashValue(Ty V) {
212 uintptr_t IV = reinterpret_cast<uintptr_t>(V.getOpaqueValue());
213 return unsigned(IV) ^ unsigned(IV >> 9);
214 }
215
216 static bool isEqual(const Ty &LHS, const Ty &RHS) { return LHS == RHS; }
217};
218
219// Teach SmallPtrSet that PointerIntPair is "basically a pointer".
220template <typename PointerTy, unsigned IntBits, typename IntType,
221 typename PtrTraits>
222struct PointerLikeTypeTraits<
223 PointerIntPair<PointerTy, IntBits, IntType, PtrTraits>> {
224 static inline void *
225 getAsVoidPointer(const PointerIntPair<PointerTy, IntBits, IntType> &P) {
226 return P.getOpaqueValue();
227 }
228
229 static inline PointerIntPair<PointerTy, IntBits, IntType>
230 getFromVoidPointer(void *P) {
231 return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
232 }
233
234 static inline PointerIntPair<PointerTy, IntBits, IntType>
235 getFromVoidPointer(const void *P) {
236 return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
237 }
238
239 static constexpr int NumLowBitsAvailable =
240 PtrTraits::NumLowBitsAvailable - IntBits;
241};
242
243} // end namespace llvm
244
245#endif // LLVM_ADT_POINTERINTPAIR_H

/build/llvm-toolchain-snapshot-15~++20220320100729+487629cc61b5/llvm/include/llvm/Support/PointerLikeTypeTraits.h

1//===- llvm/Support/PointerLikeTypeTraits.h - Pointer Traits ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the PointerLikeTypeTraits class. This allows data
10// structures to reason about pointers and other things that are pointer sized.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
15#define LLVM_SUPPORT_POINTERLIKETYPETRAITS_H
16
17#include "llvm/Support/DataTypes.h"
18#include <cassert>
19#include <type_traits>
20
21namespace llvm {
22
23/// A traits type that is used to handle pointer types and things that are just
24/// wrappers for pointers as a uniform entity.
25template <typename T> struct PointerLikeTypeTraits;
26
27namespace detail {
28/// A tiny meta function to compute the log2 of a compile time constant.
29template <size_t N>
30struct ConstantLog2
31 : std::integral_constant<size_t, ConstantLog2<N / 2>::value + 1> {};
32template <> struct ConstantLog2<1> : std::integral_constant<size_t, 0> {};
33
34// Provide a trait to check if T is pointer-like.
35template <typename T, typename U = void> struct HasPointerLikeTypeTraits {
36 static const bool value = false;
37};
38
39// sizeof(T) is valid only for a complete T.
40template <typename T>
41struct HasPointerLikeTypeTraits<
42 T, decltype((sizeof(PointerLikeTypeTraits<T>) + sizeof(T)), void())> {
43 static const bool value = true;
44};
45
46template <typename T> struct IsPointerLike {
47 static const bool value = HasPointerLikeTypeTraits<T>::value;
48};
49
50template <typename T> struct IsPointerLike<T *> {
51 static const bool value = true;
52};
53} // namespace detail
54
55// Provide PointerLikeTypeTraits for non-cvr pointers.
56template <typename T> struct PointerLikeTypeTraits<T *> {
57 static inline void *getAsVoidPointer(T *P) { return P; }
58 static inline T *getFromVoidPointer(void *P) { return static_cast<T *>(P); }
59
60 static constexpr int NumLowBitsAvailable =
61 detail::ConstantLog2<alignof(T)>::value;
62};
63
64template <> struct PointerLikeTypeTraits<void *> {
65 static inline void *getAsVoidPointer(void *P) { return P; }
66 static inline void *getFromVoidPointer(void *P) { return P; }
67
68 /// Note, we assume here that void* is related to raw malloc'ed memory and
69 /// that malloc returns objects at least 4-byte aligned. However, this may be
70 /// wrong, or pointers may be from something other than malloc. In this case,
71 /// you should specify a real typed pointer or avoid this template.
72 ///
73 /// All clients should use assertions to do a run-time check to ensure that
74 /// this is actually true.
75 static constexpr int NumLowBitsAvailable = 2;
76};
77
78// Provide PointerLikeTypeTraits for const things.
79template <typename T> struct PointerLikeTypeTraits<const T> {
80 typedef PointerLikeTypeTraits<T> NonConst;
81
82 static inline const void *getAsVoidPointer(const T P) {
83 return NonConst::getAsVoidPointer(P);
84 }
85 static inline const T getFromVoidPointer(const void *P) {
86 return NonConst::getFromVoidPointer(const_cast<void *>(P));
87 }
88 static constexpr int NumLowBitsAvailable = NonConst::NumLowBitsAvailable;
89};
90
91// Provide PointerLikeTypeTraits for const pointers.
92template <typename T> struct PointerLikeTypeTraits<const T *> {
93 typedef PointerLikeTypeTraits<T *> NonConst;
94
95 static inline const void *getAsVoidPointer(const T *P) {
96 return NonConst::getAsVoidPointer(const_cast<T *>(P));
97 }
98 static inline const T *getFromVoidPointer(const void *P) {
99 return NonConst::getFromVoidPointer(const_cast<void *>(P));
42
Passing value via 1st parameter 'P'
43
Returning pointer
100 }
101 static constexpr int NumLowBitsAvailable = NonConst::NumLowBitsAvailable;
102};
103
104// Provide PointerLikeTypeTraits for uintptr_t.
105template <> struct PointerLikeTypeTraits<uintptr_t> {
106 static inline void *getAsVoidPointer(uintptr_t P) {
107 return reinterpret_cast<void *>(P);
108 }
109 static inline uintptr_t getFromVoidPointer(void *P) {
110 return reinterpret_cast<uintptr_t>(P);
111 }
112 // No bits are available!
113 static constexpr int NumLowBitsAvailable = 0;
114};
115
116/// Provide suitable custom traits struct for function pointers.
117///
118/// Function pointers can't be directly given these traits as functions can't
119/// have their alignment computed with `alignof` and we need different casting.
120///
121/// To rely on higher alignment for a specialized use, you can provide a
122/// customized form of this template explicitly with higher alignment, and
123/// potentially use alignment attributes on functions to satisfy that.
124template <int Alignment, typename FunctionPointerT>
125struct FunctionPointerLikeTypeTraits {
126 static constexpr int NumLowBitsAvailable =
127 detail::ConstantLog2<Alignment>::value;
128 static inline void *getAsVoidPointer(FunctionPointerT P) {
129 assert((reinterpret_cast<uintptr_t>(P) &(static_cast <bool> ((reinterpret_cast<uintptr_t>
(P) & ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0
&& "Alignment not satisfied for an actual function pointer!"
) ? void (0) : __assert_fail ("(reinterpret_cast<uintptr_t>(P) & ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0 && \"Alignment not satisfied for an actual function pointer!\""
, "llvm/include/llvm/Support/PointerLikeTypeTraits.h", 131, __extension__
__PRETTY_FUNCTION__))
130 ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0 &&(static_cast <bool> ((reinterpret_cast<uintptr_t>
(P) & ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0
&& "Alignment not satisfied for an actual function pointer!"
) ? void (0) : __assert_fail ("(reinterpret_cast<uintptr_t>(P) & ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0 && \"Alignment not satisfied for an actual function pointer!\""
, "llvm/include/llvm/Support/PointerLikeTypeTraits.h", 131, __extension__
__PRETTY_FUNCTION__))
131 "Alignment not satisfied for an actual function pointer!")(static_cast <bool> ((reinterpret_cast<uintptr_t>
(P) & ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0
&& "Alignment not satisfied for an actual function pointer!"
) ? void (0) : __assert_fail ("(reinterpret_cast<uintptr_t>(P) & ~((uintptr_t)-1 << NumLowBitsAvailable)) == 0 && \"Alignment not satisfied for an actual function pointer!\""
, "llvm/include/llvm/Support/PointerLikeTypeTraits.h", 131, __extension__
__PRETTY_FUNCTION__))
;
132 return reinterpret_cast<void *>(P);
133 }
134 static inline FunctionPointerT getFromVoidPointer(void *P) {
135 return reinterpret_cast<FunctionPointerT>(P);
136 }
137};
138
139/// Provide a default specialization for function pointers that assumes 4-byte
140/// alignment.
141///
142/// We assume here that functions used with this are always at least 4-byte
143/// aligned. This means that, for example, thumb functions won't work or systems
144/// with weird unaligned function pointers won't work. But all practical systems
145/// we support satisfy this requirement.
146template <typename ReturnT, typename... ParamTs>
147struct PointerLikeTypeTraits<ReturnT (*)(ParamTs...)>
148 : FunctionPointerLikeTypeTraits<4, ReturnT (*)(ParamTs...)> {};
149
150} // end namespace llvm
151
152#endif