Bug Summary

File:build/source/lld/ELF/SyntheticSections.cpp
Warning:line 2025, column 26
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name SyntheticSections.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/source/build-llvm -resource-dir /usr/lib/llvm-17/lib/clang/17 -I tools/lld/ELF -I /build/source/lld/ELF -I /build/source/lld/include -I tools/lld/include -I include -I /build/source/llvm/include -D LLD_VENDOR="Debian" -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-17/lib/clang/17/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/source/build-llvm=build-llvm -fmacro-prefix-map=/build/source/= -fcoverage-prefix-map=/build/source/build-llvm=build-llvm -fcoverage-prefix-map=/build/source/= -source-date-epoch 1675721604 -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/source/build-llvm -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -fdebug-prefix-map=/build/source/build-llvm=build-llvm -fdebug-prefix-map=/build/source/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2023-02-07-030702-17298-1 -x c++ /build/source/lld/ELF/SyntheticSections.cpp

/build/source/lld/ELF/SyntheticSections.cpp

1//===- SyntheticSections.cpp ----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains linker-synthesized sections. Currently,
10// synthetic sections are created either output sections or input sections,
11// but we are rewriting code so that all synthetic sections are created as
12// input sections.
13//
14//===----------------------------------------------------------------------===//
15
16#include "SyntheticSections.h"
17#include "Config.h"
18#include "DWARF.h"
19#include "EhFrame.h"
20#include "InputFiles.h"
21#include "LinkerScript.h"
22#include "OutputSections.h"
23#include "SymbolTable.h"
24#include "Symbols.h"
25#include "Target.h"
26#include "Thunks.h"
27#include "Writer.h"
28#include "lld/Common/CommonLinkerContext.h"
29#include "lld/Common/DWARF.h"
30#include "lld/Common/Strings.h"
31#include "lld/Common/Version.h"
32#include "llvm/ADT/STLExtras.h"
33#include "llvm/ADT/SetOperations.h"
34#include "llvm/ADT/StringExtras.h"
35#include "llvm/BinaryFormat/Dwarf.h"
36#include "llvm/BinaryFormat/ELF.h"
37#include "llvm/DebugInfo/DWARF/DWARFDebugPubTable.h"
38#include "llvm/Support/Endian.h"
39#include "llvm/Support/LEB128.h"
40#include "llvm/Support/Parallel.h"
41#include "llvm/Support/TimeProfiler.h"
42#include <cstdlib>
43
44using namespace llvm;
45using namespace llvm::dwarf;
46using namespace llvm::ELF;
47using namespace llvm::object;
48using namespace llvm::support;
49using namespace lld;
50using namespace lld::elf;
51
52using llvm::support::endian::read32le;
53using llvm::support::endian::write32le;
54using llvm::support::endian::write64le;
55
56constexpr size_t MergeNoTailSection::numShards;
57
58static uint64_t readUint(uint8_t *buf) {
59 return config->is64 ? read64(buf) : read32(buf);
60}
61
62static void writeUint(uint8_t *buf, uint64_t val) {
63 if (config->is64)
64 write64(buf, val);
65 else
66 write32(buf, val);
67}
68
69// Returns an LLD version string.
70static ArrayRef<uint8_t> getVersion() {
71 // Check LLD_VERSION first for ease of testing.
72 // You can get consistent output by using the environment variable.
73 // This is only for testing.
74 StringRef s = getenv("LLD_VERSION");
75 if (s.empty())
76 s = saver().save(Twine("Linker: ") + getLLDVersion());
77
78 // +1 to include the terminating '\0'.
79 return {(const uint8_t *)s.data(), s.size() + 1};
80}
81
82// Creates a .comment section containing LLD version info.
83// With this feature, you can identify LLD-generated binaries easily
84// by "readelf --string-dump .comment <file>".
85// The returned object is a mergeable string section.
86MergeInputSection *elf::createCommentSection() {
87 auto *sec = make<MergeInputSection>(SHF_MERGE | SHF_STRINGS, SHT_PROGBITS, 1,
88 getVersion(), ".comment");
89 sec->splitIntoPieces();
90 return sec;
91}
92
93// .MIPS.abiflags section.
94template <class ELFT>
95MipsAbiFlagsSection<ELFT>::MipsAbiFlagsSection(Elf_Mips_ABIFlags flags)
96 : SyntheticSection(SHF_ALLOC, SHT_MIPS_ABIFLAGS, 8, ".MIPS.abiflags"),
97 flags(flags) {
98 this->entsize = sizeof(Elf_Mips_ABIFlags);
99}
100
101template <class ELFT> void MipsAbiFlagsSection<ELFT>::writeTo(uint8_t *buf) {
102 memcpy(buf, &flags, sizeof(flags));
103}
104
105template <class ELFT>
106std::unique_ptr<MipsAbiFlagsSection<ELFT>> MipsAbiFlagsSection<ELFT>::create() {
107 Elf_Mips_ABIFlags flags = {};
108 bool create = false;
109
110 for (InputSectionBase *sec : ctx.inputSections) {
111 if (sec->type != SHT_MIPS_ABIFLAGS)
112 continue;
113 sec->markDead();
114 create = true;
115
116 std::string filename = toString(sec->file);
117 const size_t size = sec->content().size();
118 // Older version of BFD (such as the default FreeBSD linker) concatenate
119 // .MIPS.abiflags instead of merging. To allow for this case (or potential
120 // zero padding) we ignore everything after the first Elf_Mips_ABIFlags
121 if (size < sizeof(Elf_Mips_ABIFlags)) {
122 error(filename + ": invalid size of .MIPS.abiflags section: got " +
123 Twine(size) + " instead of " + Twine(sizeof(Elf_Mips_ABIFlags)));
124 return nullptr;
125 }
126 auto *s =
127 reinterpret_cast<const Elf_Mips_ABIFlags *>(sec->content().data());
128 if (s->version != 0) {
129 error(filename + ": unexpected .MIPS.abiflags version " +
130 Twine(s->version));
131 return nullptr;
132 }
133
134 // LLD checks ISA compatibility in calcMipsEFlags(). Here we just
135 // select the highest number of ISA/Rev/Ext.
136 flags.isa_level = std::max(flags.isa_level, s->isa_level);
137 flags.isa_rev = std::max(flags.isa_rev, s->isa_rev);
138 flags.isa_ext = std::max(flags.isa_ext, s->isa_ext);
139 flags.gpr_size = std::max(flags.gpr_size, s->gpr_size);
140 flags.cpr1_size = std::max(flags.cpr1_size, s->cpr1_size);
141 flags.cpr2_size = std::max(flags.cpr2_size, s->cpr2_size);
142 flags.ases |= s->ases;
143 flags.flags1 |= s->flags1;
144 flags.flags2 |= s->flags2;
145 flags.fp_abi = elf::getMipsFpAbiFlag(flags.fp_abi, s->fp_abi, filename);
146 };
147
148 if (create)
149 return std::make_unique<MipsAbiFlagsSection<ELFT>>(flags);
150 return nullptr;
151}
152
153// .MIPS.options section.
154template <class ELFT>
155MipsOptionsSection<ELFT>::MipsOptionsSection(Elf_Mips_RegInfo reginfo)
156 : SyntheticSection(SHF_ALLOC, SHT_MIPS_OPTIONS, 8, ".MIPS.options"),
157 reginfo(reginfo) {
158 this->entsize = sizeof(Elf_Mips_Options) + sizeof(Elf_Mips_RegInfo);
159}
160
161template <class ELFT> void MipsOptionsSection<ELFT>::writeTo(uint8_t *buf) {
162 auto *options = reinterpret_cast<Elf_Mips_Options *>(buf);
163 options->kind = ODK_REGINFO;
164 options->size = getSize();
165
166 if (!config->relocatable)
167 reginfo.ri_gp_value = in.mipsGot->getGp();
168 memcpy(buf + sizeof(Elf_Mips_Options), &reginfo, sizeof(reginfo));
169}
170
171template <class ELFT>
172std::unique_ptr<MipsOptionsSection<ELFT>> MipsOptionsSection<ELFT>::create() {
173 // N64 ABI only.
174 if (!ELFT::Is64Bits)
175 return nullptr;
176
177 SmallVector<InputSectionBase *, 0> sections;
178 for (InputSectionBase *sec : ctx.inputSections)
179 if (sec->type == SHT_MIPS_OPTIONS)
180 sections.push_back(sec);
181
182 if (sections.empty())
183 return nullptr;
184
185 Elf_Mips_RegInfo reginfo = {};
186 for (InputSectionBase *sec : sections) {
187 sec->markDead();
188
189 std::string filename = toString(sec->file);
190 ArrayRef<uint8_t> d = sec->content();
191
192 while (!d.empty()) {
193 if (d.size() < sizeof(Elf_Mips_Options)) {
194 error(filename + ": invalid size of .MIPS.options section");
195 break;
196 }
197
198 auto *opt = reinterpret_cast<const Elf_Mips_Options *>(d.data());
199 if (opt->kind == ODK_REGINFO) {
200 reginfo.ri_gprmask |= opt->getRegInfo().ri_gprmask;
201 sec->getFile<ELFT>()->mipsGp0 = opt->getRegInfo().ri_gp_value;
202 break;
203 }
204
205 if (!opt->size)
206 fatal(filename + ": zero option descriptor size");
207 d = d.slice(opt->size);
208 }
209 };
210
211 return std::make_unique<MipsOptionsSection<ELFT>>(reginfo);
212}
213
214// MIPS .reginfo section.
215template <class ELFT>
216MipsReginfoSection<ELFT>::MipsReginfoSection(Elf_Mips_RegInfo reginfo)
217 : SyntheticSection(SHF_ALLOC, SHT_MIPS_REGINFO, 4, ".reginfo"),
218 reginfo(reginfo) {
219 this->entsize = sizeof(Elf_Mips_RegInfo);
220}
221
222template <class ELFT> void MipsReginfoSection<ELFT>::writeTo(uint8_t *buf) {
223 if (!config->relocatable)
224 reginfo.ri_gp_value = in.mipsGot->getGp();
225 memcpy(buf, &reginfo, sizeof(reginfo));
226}
227
228template <class ELFT>
229std::unique_ptr<MipsReginfoSection<ELFT>> MipsReginfoSection<ELFT>::create() {
230 // Section should be alive for O32 and N32 ABIs only.
231 if (ELFT::Is64Bits)
232 return nullptr;
233
234 SmallVector<InputSectionBase *, 0> sections;
235 for (InputSectionBase *sec : ctx.inputSections)
236 if (sec->type == SHT_MIPS_REGINFO)
237 sections.push_back(sec);
238
239 if (sections.empty())
240 return nullptr;
241
242 Elf_Mips_RegInfo reginfo = {};
243 for (InputSectionBase *sec : sections) {
244 sec->markDead();
245
246 if (sec->content().size() != sizeof(Elf_Mips_RegInfo)) {
247 error(toString(sec->file) + ": invalid size of .reginfo section");
248 return nullptr;
249 }
250
251 auto *r = reinterpret_cast<const Elf_Mips_RegInfo *>(sec->content().data());
252 reginfo.ri_gprmask |= r->ri_gprmask;
253 sec->getFile<ELFT>()->mipsGp0 = r->ri_gp_value;
254 };
255
256 return std::make_unique<MipsReginfoSection<ELFT>>(reginfo);
257}
258
259InputSection *elf::createInterpSection() {
260 // StringSaver guarantees that the returned string ends with '\0'.
261 StringRef s = saver().save(config->dynamicLinker);
262 ArrayRef<uint8_t> contents = {(const uint8_t *)s.data(), s.size() + 1};
263
264 return make<InputSection>(nullptr, SHF_ALLOC, SHT_PROGBITS, 1, contents,
265 ".interp");
266}
267
268Defined *elf::addSyntheticLocal(StringRef name, uint8_t type, uint64_t value,
269 uint64_t size, InputSectionBase &section) {
270 Defined *s = makeDefined(section.file, name, STB_LOCAL, STV_DEFAULT, type,
271 value, size, &section);
272 if (in.symTab)
273 in.symTab->addSymbol(s);
274 return s;
275}
276
277static size_t getHashSize() {
278 switch (config->buildId) {
279 case BuildIdKind::Fast:
280 return 8;
281 case BuildIdKind::Md5:
282 case BuildIdKind::Uuid:
283 return 16;
284 case BuildIdKind::Sha1:
285 return 20;
286 case BuildIdKind::Hexstring:
287 return config->buildIdVector.size();
288 default:
289 llvm_unreachable("unknown BuildIdKind")::llvm::llvm_unreachable_internal("unknown BuildIdKind", "lld/ELF/SyntheticSections.cpp"
, 289)
;
290 }
291}
292
293// This class represents a linker-synthesized .note.gnu.property section.
294//
295// In x86 and AArch64, object files may contain feature flags indicating the
296// features that they have used. The flags are stored in a .note.gnu.property
297// section.
298//
299// lld reads the sections from input files and merges them by computing AND of
300// the flags. The result is written as a new .note.gnu.property section.
301//
302// If the flag is zero (which indicates that the intersection of the feature
303// sets is empty, or some input files didn't have .note.gnu.property sections),
304// we don't create this section.
305GnuPropertySection::GnuPropertySection()
306 : SyntheticSection(llvm::ELF::SHF_ALLOC, llvm::ELF::SHT_NOTE,
307 config->wordsize, ".note.gnu.property") {}
308
309void GnuPropertySection::writeTo(uint8_t *buf) {
310 uint32_t featureAndType = config->emachine == EM_AARCH64
311 ? GNU_PROPERTY_AARCH64_FEATURE_1_AND
312 : GNU_PROPERTY_X86_FEATURE_1_AND;
313
314 write32(buf, 4); // Name size
315 write32(buf + 4, config->is64 ? 16 : 12); // Content size
316 write32(buf + 8, NT_GNU_PROPERTY_TYPE_0); // Type
317 memcpy(buf + 12, "GNU", 4); // Name string
318 write32(buf + 16, featureAndType); // Feature type
319 write32(buf + 20, 4); // Feature size
320 write32(buf + 24, config->andFeatures); // Feature flags
321 if (config->is64)
322 write32(buf + 28, 0); // Padding
323}
324
325size_t GnuPropertySection::getSize() const { return config->is64 ? 32 : 28; }
326
327BuildIdSection::BuildIdSection()
328 : SyntheticSection(SHF_ALLOC, SHT_NOTE, 4, ".note.gnu.build-id"),
329 hashSize(getHashSize()) {}
330
331void BuildIdSection::writeTo(uint8_t *buf) {
332 write32(buf, 4); // Name size
333 write32(buf + 4, hashSize); // Content size
334 write32(buf + 8, NT_GNU_BUILD_ID); // Type
335 memcpy(buf + 12, "GNU", 4); // Name string
336 hashBuf = buf + 16;
337}
338
339void BuildIdSection::writeBuildId(ArrayRef<uint8_t> buf) {
340 assert(buf.size() == hashSize)(static_cast <bool> (buf.size() == hashSize) ? void (0)
: __assert_fail ("buf.size() == hashSize", "lld/ELF/SyntheticSections.cpp"
, 340, __extension__ __PRETTY_FUNCTION__))
;
341 memcpy(hashBuf, buf.data(), hashSize);
342}
343
344BssSection::BssSection(StringRef name, uint64_t size, uint32_t alignment)
345 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_NOBITS, alignment, name) {
346 this->bss = true;
347 this->size = size;
348}
349
350EhFrameSection::EhFrameSection()
351 : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 1, ".eh_frame") {}
352
353// Search for an existing CIE record or create a new one.
354// CIE records from input object files are uniquified by their contents
355// and where their relocations point to.
356template <class ELFT, class RelTy>
357CieRecord *EhFrameSection::addCie(EhSectionPiece &cie, ArrayRef<RelTy> rels) {
358 Symbol *personality = nullptr;
359 unsigned firstRelI = cie.firstRelocation;
360 if (firstRelI != (unsigned)-1)
361 personality =
362 &cie.sec->template getFile<ELFT>()->getRelocTargetSym(rels[firstRelI]);
363
364 // Search for an existing CIE by CIE contents/relocation target pair.
365 CieRecord *&rec = cieMap[{cie.data(), personality}];
366
367 // If not found, create a new one.
368 if (!rec) {
369 rec = make<CieRecord>();
370 rec->cie = &cie;
371 cieRecords.push_back(rec);
372 }
373 return rec;
374}
375
376// There is one FDE per function. Returns a non-null pointer to the function
377// symbol if the given FDE points to a live function.
378template <class ELFT, class RelTy>
379Defined *EhFrameSection::isFdeLive(EhSectionPiece &fde, ArrayRef<RelTy> rels) {
380 auto *sec = cast<EhInputSection>(fde.sec);
381 unsigned firstRelI = fde.firstRelocation;
382
383 // An FDE should point to some function because FDEs are to describe
384 // functions. That's however not always the case due to an issue of
385 // ld.gold with -r. ld.gold may discard only functions and leave their
386 // corresponding FDEs, which results in creating bad .eh_frame sections.
387 // To deal with that, we ignore such FDEs.
388 if (firstRelI == (unsigned)-1)
389 return nullptr;
390
391 const RelTy &rel = rels[firstRelI];
392 Symbol &b = sec->template getFile<ELFT>()->getRelocTargetSym(rel);
393
394 // FDEs for garbage-collected or merged-by-ICF sections, or sections in
395 // another partition, are dead.
396 if (auto *d = dyn_cast<Defined>(&b))
397 if (!d->folded && d->section && d->section->partition == partition)
398 return d;
399 return nullptr;
400}
401
402// .eh_frame is a sequence of CIE or FDE records. In general, there
403// is one CIE record per input object file which is followed by
404// a list of FDEs. This function searches an existing CIE or create a new
405// one and associates FDEs to the CIE.
406template <class ELFT, class RelTy>
407void EhFrameSection::addRecords(EhInputSection *sec, ArrayRef<RelTy> rels) {
408 offsetToCie.clear();
409 for (EhSectionPiece &cie : sec->cies)
410 offsetToCie[cie.inputOff] = addCie<ELFT>(cie, rels);
411 for (EhSectionPiece &fde : sec->fdes) {
412 uint32_t id = endian::read32<ELFT::TargetEndianness>(fde.data().data() + 4);
413 CieRecord *rec = offsetToCie[fde.inputOff + 4 - id];
414 if (!rec)
415 fatal(toString(sec) + ": invalid CIE reference");
416
417 if (!isFdeLive<ELFT>(fde, rels))
418 continue;
419 rec->fdes.push_back(&fde);
420 numFdes++;
421 }
422}
423
424template <class ELFT>
425void EhFrameSection::addSectionAux(EhInputSection *sec) {
426 if (!sec->isLive())
427 return;
428 const RelsOrRelas<ELFT> rels = sec->template relsOrRelas<ELFT>();
429 if (rels.areRelocsRel())
430 addRecords<ELFT>(sec, rels.rels);
431 else
432 addRecords<ELFT>(sec, rels.relas);
433}
434
435// Used by ICF<ELFT>::handleLSDA(). This function is very similar to
436// EhFrameSection::addRecords().
437template <class ELFT, class RelTy>
438void EhFrameSection::iterateFDEWithLSDAAux(
439 EhInputSection &sec, ArrayRef<RelTy> rels, DenseSet<size_t> &ciesWithLSDA,
440 llvm::function_ref<void(InputSection &)> fn) {
441 for (EhSectionPiece &cie : sec.cies)
442 if (hasLSDA(cie))
443 ciesWithLSDA.insert(cie.inputOff);
444 for (EhSectionPiece &fde : sec.fdes) {
445 uint32_t id = endian::read32<ELFT::TargetEndianness>(fde.data().data() + 4);
446 if (!ciesWithLSDA.contains(fde.inputOff + 4 - id))
447 continue;
448
449 // The CIE has a LSDA argument. Call fn with d's section.
450 if (Defined *d = isFdeLive<ELFT>(fde, rels))
451 if (auto *s = dyn_cast_or_null<InputSection>(d->section))
452 fn(*s);
453 }
454}
455
456template <class ELFT>
457void EhFrameSection::iterateFDEWithLSDA(
458 llvm::function_ref<void(InputSection &)> fn) {
459 DenseSet<size_t> ciesWithLSDA;
460 for (EhInputSection *sec : sections) {
461 ciesWithLSDA.clear();
462 const RelsOrRelas<ELFT> rels = sec->template relsOrRelas<ELFT>();
463 if (rels.areRelocsRel())
464 iterateFDEWithLSDAAux<ELFT>(*sec, rels.rels, ciesWithLSDA, fn);
465 else
466 iterateFDEWithLSDAAux<ELFT>(*sec, rels.relas, ciesWithLSDA, fn);
467 }
468}
469
470static void writeCieFde(uint8_t *buf, ArrayRef<uint8_t> d) {
471 memcpy(buf, d.data(), d.size());
472 // Fix the size field. -4 since size does not include the size field itself.
473 write32(buf, d.size() - 4);
474}
475
476void EhFrameSection::finalizeContents() {
477 assert(!this->size)(static_cast <bool> (!this->size) ? void (0) : __assert_fail
("!this->size", "lld/ELF/SyntheticSections.cpp", 477, __extension__
__PRETTY_FUNCTION__))
; // Not finalized.
478
479 switch (config->ekind) {
480 case ELFNoneKind:
481 llvm_unreachable("invalid ekind")::llvm::llvm_unreachable_internal("invalid ekind", "lld/ELF/SyntheticSections.cpp"
, 481)
;
482 case ELF32LEKind:
483 for (EhInputSection *sec : sections)
484 addSectionAux<ELF32LE>(sec);
485 break;
486 case ELF32BEKind:
487 for (EhInputSection *sec : sections)
488 addSectionAux<ELF32BE>(sec);
489 break;
490 case ELF64LEKind:
491 for (EhInputSection *sec : sections)
492 addSectionAux<ELF64LE>(sec);
493 break;
494 case ELF64BEKind:
495 for (EhInputSection *sec : sections)
496 addSectionAux<ELF64BE>(sec);
497 break;
498 }
499
500 size_t off = 0;
501 for (CieRecord *rec : cieRecords) {
502 rec->cie->outputOff = off;
503 off += rec->cie->size;
504
505 for (EhSectionPiece *fde : rec->fdes) {
506 fde->outputOff = off;
507 off += fde->size;
508 }
509 }
510
511 // The LSB standard does not allow a .eh_frame section with zero
512 // Call Frame Information records. glibc unwind-dw2-fde.c
513 // classify_object_over_fdes expects there is a CIE record length 0 as a
514 // terminator. Thus we add one unconditionally.
515 off += 4;
516
517 this->size = off;
518}
519
520// Returns data for .eh_frame_hdr. .eh_frame_hdr is a binary search table
521// to get an FDE from an address to which FDE is applied. This function
522// returns a list of such pairs.
523SmallVector<EhFrameSection::FdeData, 0> EhFrameSection::getFdeData() const {
524 uint8_t *buf = Out::bufferStart + getParent()->offset + outSecOff;
525 SmallVector<FdeData, 0> ret;
526
527 uint64_t va = getPartition().ehFrameHdr->getVA();
528 for (CieRecord *rec : cieRecords) {
529 uint8_t enc = getFdeEncoding(rec->cie);
530 for (EhSectionPiece *fde : rec->fdes) {
531 uint64_t pc = getFdePc(buf, fde->outputOff, enc);
532 uint64_t fdeVA = getParent()->addr + fde->outputOff;
533 if (!isInt<32>(pc - va))
534 fatal(toString(fde->sec) + ": PC offset is too large: 0x" +
535 Twine::utohexstr(pc - va));
536 ret.push_back({uint32_t(pc - va), uint32_t(fdeVA - va)});
537 }
538 }
539
540 // Sort the FDE list by their PC and uniqueify. Usually there is only
541 // one FDE for a PC (i.e. function), but if ICF merges two functions
542 // into one, there can be more than one FDEs pointing to the address.
543 auto less = [](const FdeData &a, const FdeData &b) {
544 return a.pcRel < b.pcRel;
545 };
546 llvm::stable_sort(ret, less);
547 auto eq = [](const FdeData &a, const FdeData &b) {
548 return a.pcRel == b.pcRel;
549 };
550 ret.erase(std::unique(ret.begin(), ret.end(), eq), ret.end());
551
552 return ret;
553}
554
555static uint64_t readFdeAddr(uint8_t *buf, int size) {
556 switch (size) {
557 case DW_EH_PE_udata2:
558 return read16(buf);
559 case DW_EH_PE_sdata2:
560 return (int16_t)read16(buf);
561 case DW_EH_PE_udata4:
562 return read32(buf);
563 case DW_EH_PE_sdata4:
564 return (int32_t)read32(buf);
565 case DW_EH_PE_udata8:
566 case DW_EH_PE_sdata8:
567 return read64(buf);
568 case DW_EH_PE_absptr:
569 return readUint(buf);
570 }
571 fatal("unknown FDE size encoding");
572}
573
574// Returns the VA to which a given FDE (on a mmap'ed buffer) is applied to.
575// We need it to create .eh_frame_hdr section.
576uint64_t EhFrameSection::getFdePc(uint8_t *buf, size_t fdeOff,
577 uint8_t enc) const {
578 // The starting address to which this FDE applies is
579 // stored at FDE + 8 byte.
580 size_t off = fdeOff + 8;
581 uint64_t addr = readFdeAddr(buf + off, enc & 0xf);
582 if ((enc & 0x70) == DW_EH_PE_absptr)
583 return addr;
584 if ((enc & 0x70) == DW_EH_PE_pcrel)
585 return addr + getParent()->addr + off;
586 fatal("unknown FDE size relative encoding");
587}
588
589void EhFrameSection::writeTo(uint8_t *buf) {
590 // Write CIE and FDE records.
591 for (CieRecord *rec : cieRecords) {
592 size_t cieOffset = rec->cie->outputOff;
593 writeCieFde(buf + cieOffset, rec->cie->data());
594
595 for (EhSectionPiece *fde : rec->fdes) {
596 size_t off = fde->outputOff;
597 writeCieFde(buf + off, fde->data());
598
599 // FDE's second word should have the offset to an associated CIE.
600 // Write it.
601 write32(buf + off + 4, off + 4 - cieOffset);
602 }
603 }
604
605 // Apply relocations. .eh_frame section contents are not contiguous
606 // in the output buffer, but relocateAlloc() still works because
607 // getOffset() takes care of discontiguous section pieces.
608 for (EhInputSection *s : sections)
609 target->relocateAlloc(*s, buf);
610
611 if (getPartition().ehFrameHdr && getPartition().ehFrameHdr->getParent())
612 getPartition().ehFrameHdr->write();
613}
614
615GotSection::GotSection()
616 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
617 target->gotEntrySize, ".got") {
618 numEntries = target->gotHeaderEntriesNum;
619}
620
621void GotSection::addConstant(const Relocation &r) { relocations.push_back(r); }
622void GotSection::addEntry(Symbol &sym) {
623 assert(sym.auxIdx == symAux.size() - 1)(static_cast <bool> (sym.auxIdx == symAux.size() - 1) ?
void (0) : __assert_fail ("sym.auxIdx == symAux.size() - 1",
"lld/ELF/SyntheticSections.cpp", 623, __extension__ __PRETTY_FUNCTION__
))
;
624 symAux.back().gotIdx = numEntries++;
625}
626
627bool GotSection::addTlsDescEntry(Symbol &sym) {
628 assert(sym.auxIdx == symAux.size() - 1)(static_cast <bool> (sym.auxIdx == symAux.size() - 1) ?
void (0) : __assert_fail ("sym.auxIdx == symAux.size() - 1",
"lld/ELF/SyntheticSections.cpp", 628, __extension__ __PRETTY_FUNCTION__
))
;
629 symAux.back().tlsDescIdx = numEntries;
630 numEntries += 2;
631 return true;
632}
633
634bool GotSection::addDynTlsEntry(Symbol &sym) {
635 assert(sym.auxIdx == symAux.size() - 1)(static_cast <bool> (sym.auxIdx == symAux.size() - 1) ?
void (0) : __assert_fail ("sym.auxIdx == symAux.size() - 1",
"lld/ELF/SyntheticSections.cpp", 635, __extension__ __PRETTY_FUNCTION__
))
;
636 symAux.back().tlsGdIdx = numEntries;
637 // Global Dynamic TLS entries take two GOT slots.
638 numEntries += 2;
639 return true;
640}
641
642// Reserves TLS entries for a TLS module ID and a TLS block offset.
643// In total it takes two GOT slots.
644bool GotSection::addTlsIndex() {
645 if (tlsIndexOff != uint32_t(-1))
646 return false;
647 tlsIndexOff = numEntries * config->wordsize;
648 numEntries += 2;
649 return true;
650}
651
652uint32_t GotSection::getTlsDescOffset(const Symbol &sym) const {
653 return sym.getTlsDescIdx() * config->wordsize;
654}
655
656uint64_t GotSection::getTlsDescAddr(const Symbol &sym) const {
657 return getVA() + getTlsDescOffset(sym);
658}
659
660uint64_t GotSection::getGlobalDynAddr(const Symbol &b) const {
661 return this->getVA() + b.getTlsGdIdx() * config->wordsize;
662}
663
664uint64_t GotSection::getGlobalDynOffset(const Symbol &b) const {
665 return b.getTlsGdIdx() * config->wordsize;
666}
667
668void GotSection::finalizeContents() {
669 if (config->emachine == EM_PPC64 &&
670 numEntries <= target->gotHeaderEntriesNum && !ElfSym::globalOffsetTable)
671 size = 0;
672 else
673 size = numEntries * config->wordsize;
674}
675
676bool GotSection::isNeeded() const {
677 // Needed if the GOT symbol is used or the number of entries is more than just
678 // the header. A GOT with just the header may not be needed.
679 return hasGotOffRel || numEntries > target->gotHeaderEntriesNum;
680}
681
682void GotSection::writeTo(uint8_t *buf) {
683 // On PPC64 .got may be needed but empty. Skip the write.
684 if (size == 0)
685 return;
686 target->writeGotHeader(buf);
687 target->relocateAlloc(*this, buf);
688}
689
690static uint64_t getMipsPageAddr(uint64_t addr) {
691 return (addr + 0x8000) & ~0xffff;
692}
693
694static uint64_t getMipsPageCount(uint64_t size) {
695 return (size + 0xfffe) / 0xffff + 1;
696}
697
698MipsGotSection::MipsGotSection()
699 : SyntheticSection(SHF_ALLOC | SHF_WRITE | SHF_MIPS_GPREL, SHT_PROGBITS, 16,
700 ".got") {}
701
702void MipsGotSection::addEntry(InputFile &file, Symbol &sym, int64_t addend,
703 RelExpr expr) {
704 FileGot &g = getGot(file);
705 if (expr == R_MIPS_GOT_LOCAL_PAGE) {
706 if (const OutputSection *os = sym.getOutputSection())
707 g.pagesMap.insert({os, {}});
708 else
709 g.local16.insert({{nullptr, getMipsPageAddr(sym.getVA(addend))}, 0});
710 } else if (sym.isTls())
711 g.tls.insert({&sym, 0});
712 else if (sym.isPreemptible && expr == R_ABS)
713 g.relocs.insert({&sym, 0});
714 else if (sym.isPreemptible)
715 g.global.insert({&sym, 0});
716 else if (expr == R_MIPS_GOT_OFF32)
717 g.local32.insert({{&sym, addend}, 0});
718 else
719 g.local16.insert({{&sym, addend}, 0});
720}
721
722void MipsGotSection::addDynTlsEntry(InputFile &file, Symbol &sym) {
723 getGot(file).dynTlsSymbols.insert({&sym, 0});
724}
725
726void MipsGotSection::addTlsIndex(InputFile &file) {
727 getGot(file).dynTlsSymbols.insert({nullptr, 0});
728}
729
730size_t MipsGotSection::FileGot::getEntriesNum() const {
731 return getPageEntriesNum() + local16.size() + global.size() + relocs.size() +
732 tls.size() + dynTlsSymbols.size() * 2;
733}
734
735size_t MipsGotSection::FileGot::getPageEntriesNum() const {
736 size_t num = 0;
737 for (const std::pair<const OutputSection *, FileGot::PageBlock> &p : pagesMap)
738 num += p.second.count;
739 return num;
740}
741
742size_t MipsGotSection::FileGot::getIndexedEntriesNum() const {
743 size_t count = getPageEntriesNum() + local16.size() + global.size();
744 // If there are relocation-only entries in the GOT, TLS entries
745 // are allocated after them. TLS entries should be addressable
746 // by 16-bit index so count both reloc-only and TLS entries.
747 if (!tls.empty() || !dynTlsSymbols.empty())
748 count += relocs.size() + tls.size() + dynTlsSymbols.size() * 2;
749 return count;
750}
751
752MipsGotSection::FileGot &MipsGotSection::getGot(InputFile &f) {
753 if (f.mipsGotIndex == uint32_t(-1)) {
754 gots.emplace_back();
755 gots.back().file = &f;
756 f.mipsGotIndex = gots.size() - 1;
757 }
758 return gots[f.mipsGotIndex];
759}
760
761uint64_t MipsGotSection::getPageEntryOffset(const InputFile *f,
762 const Symbol &sym,
763 int64_t addend) const {
764 const FileGot &g = gots[f->mipsGotIndex];
765 uint64_t index = 0;
766 if (const OutputSection *outSec = sym.getOutputSection()) {
767 uint64_t secAddr = getMipsPageAddr(outSec->addr);
768 uint64_t symAddr = getMipsPageAddr(sym.getVA(addend));
769 index = g.pagesMap.lookup(outSec).firstIndex + (symAddr - secAddr) / 0xffff;
770 } else {
771 index = g.local16.lookup({nullptr, getMipsPageAddr(sym.getVA(addend))});
772 }
773 return index * config->wordsize;
774}
775
776uint64_t MipsGotSection::getSymEntryOffset(const InputFile *f, const Symbol &s,
777 int64_t addend) const {
778 const FileGot &g = gots[f->mipsGotIndex];
779 Symbol *sym = const_cast<Symbol *>(&s);
780 if (sym->isTls())
781 return g.tls.lookup(sym) * config->wordsize;
782 if (sym->isPreemptible)
783 return g.global.lookup(sym) * config->wordsize;
784 return g.local16.lookup({sym, addend}) * config->wordsize;
785}
786
787uint64_t MipsGotSection::getTlsIndexOffset(const InputFile *f) const {
788 const FileGot &g = gots[f->mipsGotIndex];
789 return g.dynTlsSymbols.lookup(nullptr) * config->wordsize;
790}
791
792uint64_t MipsGotSection::getGlobalDynOffset(const InputFile *f,
793 const Symbol &s) const {
794 const FileGot &g = gots[f->mipsGotIndex];
795 Symbol *sym = const_cast<Symbol *>(&s);
796 return g.dynTlsSymbols.lookup(sym) * config->wordsize;
797}
798
799const Symbol *MipsGotSection::getFirstGlobalEntry() const {
800 if (gots.empty())
801 return nullptr;
802 const FileGot &primGot = gots.front();
803 if (!primGot.global.empty())
804 return primGot.global.front().first;
805 if (!primGot.relocs.empty())
806 return primGot.relocs.front().first;
807 return nullptr;
808}
809
810unsigned MipsGotSection::getLocalEntriesNum() const {
811 if (gots.empty())
812 return headerEntriesNum;
813 return headerEntriesNum + gots.front().getPageEntriesNum() +
814 gots.front().local16.size();
815}
816
817bool MipsGotSection::tryMergeGots(FileGot &dst, FileGot &src, bool isPrimary) {
818 FileGot tmp = dst;
819 set_union(tmp.pagesMap, src.pagesMap);
820 set_union(tmp.local16, src.local16);
821 set_union(tmp.global, src.global);
822 set_union(tmp.relocs, src.relocs);
823 set_union(tmp.tls, src.tls);
824 set_union(tmp.dynTlsSymbols, src.dynTlsSymbols);
825
826 size_t count = isPrimary ? headerEntriesNum : 0;
827 count += tmp.getIndexedEntriesNum();
828
829 if (count * config->wordsize > config->mipsGotSize)
830 return false;
831
832 std::swap(tmp, dst);
833 return true;
834}
835
836void MipsGotSection::finalizeContents() { updateAllocSize(); }
837
838bool MipsGotSection::updateAllocSize() {
839 size = headerEntriesNum * config->wordsize;
840 for (const FileGot &g : gots)
841 size += g.getEntriesNum() * config->wordsize;
842 return false;
843}
844
845void MipsGotSection::build() {
846 if (gots.empty())
847 return;
848
849 std::vector<FileGot> mergedGots(1);
850
851 // For each GOT move non-preemptible symbols from the `Global`
852 // to `Local16` list. Preemptible symbol might become non-preemptible
853 // one if, for example, it gets a related copy relocation.
854 for (FileGot &got : gots) {
855 for (auto &p: got.global)
856 if (!p.first->isPreemptible)
857 got.local16.insert({{p.first, 0}, 0});
858 got.global.remove_if([&](const std::pair<Symbol *, size_t> &p) {
859 return !p.first->isPreemptible;
860 });
861 }
862
863 // For each GOT remove "reloc-only" entry if there is "global"
864 // entry for the same symbol. And add local entries which indexed
865 // using 32-bit value at the end of 16-bit entries.
866 for (FileGot &got : gots) {
867 got.relocs.remove_if([&](const std::pair<Symbol *, size_t> &p) {
868 return got.global.count(p.first);
869 });
870 set_union(got.local16, got.local32);
871 got.local32.clear();
872 }
873
874 // Evaluate number of "reloc-only" entries in the resulting GOT.
875 // To do that put all unique "reloc-only" and "global" entries
876 // from all GOTs to the future primary GOT.
877 FileGot *primGot = &mergedGots.front();
878 for (FileGot &got : gots) {
879 set_union(primGot->relocs, got.global);
880 set_union(primGot->relocs, got.relocs);
881 got.relocs.clear();
882 }
883
884 // Evaluate number of "page" entries in each GOT.
885 for (FileGot &got : gots) {
886 for (std::pair<const OutputSection *, FileGot::PageBlock> &p :
887 got.pagesMap) {
888 const OutputSection *os = p.first;
889 uint64_t secSize = 0;
890 for (SectionCommand *cmd : os->commands) {
891 if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
892 for (InputSection *isec : isd->sections) {
893 uint64_t off = alignToPowerOf2(secSize, isec->addralign);
894 secSize = off + isec->getSize();
895 }
896 }
897 p.second.count = getMipsPageCount(secSize);
898 }
899 }
900
901 // Merge GOTs. Try to join as much as possible GOTs but do not exceed
902 // maximum GOT size. At first, try to fill the primary GOT because
903 // the primary GOT can be accessed in the most effective way. If it
904 // is not possible, try to fill the last GOT in the list, and finally
905 // create a new GOT if both attempts failed.
906 for (FileGot &srcGot : gots) {
907 InputFile *file = srcGot.file;
908 if (tryMergeGots(mergedGots.front(), srcGot, true)) {
909 file->mipsGotIndex = 0;
910 } else {
911 // If this is the first time we failed to merge with the primary GOT,
912 // MergedGots.back() will also be the primary GOT. We must make sure not
913 // to try to merge again with isPrimary=false, as otherwise, if the
914 // inputs are just right, we could allow the primary GOT to become 1 or 2
915 // words bigger due to ignoring the header size.
916 if (mergedGots.size() == 1 ||
917 !tryMergeGots(mergedGots.back(), srcGot, false)) {
918 mergedGots.emplace_back();
919 std::swap(mergedGots.back(), srcGot);
920 }
921 file->mipsGotIndex = mergedGots.size() - 1;
922 }
923 }
924 std::swap(gots, mergedGots);
925
926 // Reduce number of "reloc-only" entries in the primary GOT
927 // by subtracting "global" entries in the primary GOT.
928 primGot = &gots.front();
929 primGot->relocs.remove_if([&](const std::pair<Symbol *, size_t> &p) {
930 return primGot->global.count(p.first);
931 });
932
933 // Calculate indexes for each GOT entry.
934 size_t index = headerEntriesNum;
935 for (FileGot &got : gots) {
936 got.startIndex = &got == primGot ? 0 : index;
937 for (std::pair<const OutputSection *, FileGot::PageBlock> &p :
938 got.pagesMap) {
939 // For each output section referenced by GOT page relocations calculate
940 // and save into pagesMap an upper bound of MIPS GOT entries required
941 // to store page addresses of local symbols. We assume the worst case -
942 // each 64kb page of the output section has at least one GOT relocation
943 // against it. And take in account the case when the section intersects
944 // page boundaries.
945 p.second.firstIndex = index;
946 index += p.second.count;
947 }
948 for (auto &p: got.local16)
949 p.second = index++;
950 for (auto &p: got.global)
951 p.second = index++;
952 for (auto &p: got.relocs)
953 p.second = index++;
954 for (auto &p: got.tls)
955 p.second = index++;
956 for (auto &p: got.dynTlsSymbols) {
957 p.second = index;
958 index += 2;
959 }
960 }
961
962 // Update SymbolAux::gotIdx field to use this
963 // value later in the `sortMipsSymbols` function.
964 for (auto &p : primGot->global) {
965 if (p.first->auxIdx == 0)
966 p.first->allocateAux();
967 symAux.back().gotIdx = p.second;
968 }
969 for (auto &p : primGot->relocs) {
970 if (p.first->auxIdx == 0)
971 p.first->allocateAux();
972 symAux.back().gotIdx = p.second;
973 }
974
975 // Create dynamic relocations.
976 for (FileGot &got : gots) {
977 // Create dynamic relocations for TLS entries.
978 for (std::pair<Symbol *, size_t> &p : got.tls) {
979 Symbol *s = p.first;
980 uint64_t offset = p.second * config->wordsize;
981 // When building a shared library we still need a dynamic relocation
982 // for the TP-relative offset as we don't know how much other data will
983 // be allocated before us in the static TLS block.
984 if (s->isPreemptible || config->shared)
985 mainPart->relaDyn->addReloc({target->tlsGotRel, this, offset,
986 DynamicReloc::AgainstSymbolWithTargetVA,
987 *s, 0, R_ABS});
988 }
989 for (std::pair<Symbol *, size_t> &p : got.dynTlsSymbols) {
990 Symbol *s = p.first;
991 uint64_t offset = p.second * config->wordsize;
992 if (s == nullptr) {
993 if (!config->shared)
994 continue;
995 mainPart->relaDyn->addReloc({target->tlsModuleIndexRel, this, offset});
996 } else {
997 // When building a shared library we still need a dynamic relocation
998 // for the module index. Therefore only checking for
999 // S->isPreemptible is not sufficient (this happens e.g. for
1000 // thread-locals that have been marked as local through a linker script)
1001 if (!s->isPreemptible && !config->shared)
1002 continue;
1003 mainPart->relaDyn->addSymbolReloc(target->tlsModuleIndexRel, *this,
1004 offset, *s);
1005 // However, we can skip writing the TLS offset reloc for non-preemptible
1006 // symbols since it is known even in shared libraries
1007 if (!s->isPreemptible)
1008 continue;
1009 offset += config->wordsize;
1010 mainPart->relaDyn->addSymbolReloc(target->tlsOffsetRel, *this, offset,
1011 *s);
1012 }
1013 }
1014
1015 // Do not create dynamic relocations for non-TLS
1016 // entries in the primary GOT.
1017 if (&got == primGot)
1018 continue;
1019
1020 // Dynamic relocations for "global" entries.
1021 for (const std::pair<Symbol *, size_t> &p : got.global) {
1022 uint64_t offset = p.second * config->wordsize;
1023 mainPart->relaDyn->addSymbolReloc(target->relativeRel, *this, offset,
1024 *p.first);
1025 }
1026 if (!config->isPic)
1027 continue;
1028 // Dynamic relocations for "local" entries in case of PIC.
1029 for (const std::pair<const OutputSection *, FileGot::PageBlock> &l :
1030 got.pagesMap) {
1031 size_t pageCount = l.second.count;
1032 for (size_t pi = 0; pi < pageCount; ++pi) {
1033 uint64_t offset = (l.second.firstIndex + pi) * config->wordsize;
1034 mainPart->relaDyn->addReloc({target->relativeRel, this, offset, l.first,
1035 int64_t(pi * 0x10000)});
1036 }
1037 }
1038 for (const std::pair<GotEntry, size_t> &p : got.local16) {
1039 uint64_t offset = p.second * config->wordsize;
1040 mainPart->relaDyn->addReloc({target->relativeRel, this, offset,
1041 DynamicReloc::AddendOnlyWithTargetVA,
1042 *p.first.first, p.first.second, R_ABS});
1043 }
1044 }
1045}
1046
1047bool MipsGotSection::isNeeded() const {
1048 // We add the .got section to the result for dynamic MIPS target because
1049 // its address and properties are mentioned in the .dynamic section.
1050 return !config->relocatable;
1051}
1052
1053uint64_t MipsGotSection::getGp(const InputFile *f) const {
1054 // For files without related GOT or files refer a primary GOT
1055 // returns "common" _gp value. For secondary GOTs calculate
1056 // individual _gp values.
1057 if (!f || f->mipsGotIndex == uint32_t(-1) || f->mipsGotIndex == 0)
1058 return ElfSym::mipsGp->getVA(0);
1059 return getVA() + gots[f->mipsGotIndex].startIndex * config->wordsize + 0x7ff0;
1060}
1061
1062void MipsGotSection::writeTo(uint8_t *buf) {
1063 // Set the MSB of the second GOT slot. This is not required by any
1064 // MIPS ABI documentation, though.
1065 //
1066 // There is a comment in glibc saying that "The MSB of got[1] of a
1067 // gnu object is set to identify gnu objects," and in GNU gold it
1068 // says "the second entry will be used by some runtime loaders".
1069 // But how this field is being used is unclear.
1070 //
1071 // We are not really willing to mimic other linkers behaviors
1072 // without understanding why they do that, but because all files
1073 // generated by GNU tools have this special GOT value, and because
1074 // we've been doing this for years, it is probably a safe bet to
1075 // keep doing this for now. We really need to revisit this to see
1076 // if we had to do this.
1077 writeUint(buf + config->wordsize, (uint64_t)1 << (config->wordsize * 8 - 1));
1078 for (const FileGot &g : gots) {
1079 auto write = [&](size_t i, const Symbol *s, int64_t a) {
1080 uint64_t va = a;
1081 if (s)
1082 va = s->getVA(a);
1083 writeUint(buf + i * config->wordsize, va);
1084 };
1085 // Write 'page address' entries to the local part of the GOT.
1086 for (const std::pair<const OutputSection *, FileGot::PageBlock> &l :
1087 g.pagesMap) {
1088 size_t pageCount = l.second.count;
1089 uint64_t firstPageAddr = getMipsPageAddr(l.first->addr);
1090 for (size_t pi = 0; pi < pageCount; ++pi)
1091 write(l.second.firstIndex + pi, nullptr, firstPageAddr + pi * 0x10000);
1092 }
1093 // Local, global, TLS, reloc-only entries.
1094 // If TLS entry has a corresponding dynamic relocations, leave it
1095 // initialized by zero. Write down adjusted TLS symbol's values otherwise.
1096 // To calculate the adjustments use offsets for thread-local storage.
1097 // http://web.archive.org/web/20190324223224/https://www.linux-mips.org/wiki/NPTL
1098 for (const std::pair<GotEntry, size_t> &p : g.local16)
1099 write(p.second, p.first.first, p.first.second);
1100 // Write VA to the primary GOT only. For secondary GOTs that
1101 // will be done by REL32 dynamic relocations.
1102 if (&g == &gots.front())
1103 for (const std::pair<Symbol *, size_t> &p : g.global)
1104 write(p.second, p.first, 0);
1105 for (const std::pair<Symbol *, size_t> &p : g.relocs)
1106 write(p.second, p.first, 0);
1107 for (const std::pair<Symbol *, size_t> &p : g.tls)
1108 write(p.second, p.first,
1109 p.first->isPreemptible || config->shared ? 0 : -0x7000);
1110 for (const std::pair<Symbol *, size_t> &p : g.dynTlsSymbols) {
1111 if (p.first == nullptr && !config->shared)
1112 write(p.second, nullptr, 1);
1113 else if (p.first && !p.first->isPreemptible) {
1114 // If we are emitting a shared library with relocations we mustn't write
1115 // anything to the GOT here. When using Elf_Rel relocations the value
1116 // one will be treated as an addend and will cause crashes at runtime
1117 if (!config->shared)
1118 write(p.second, nullptr, 1);
1119 write(p.second + 1, p.first, -0x8000);
1120 }
1121 }
1122 }
1123}
1124
1125// On PowerPC the .plt section is used to hold the table of function addresses
1126// instead of the .got.plt, and the type is SHT_NOBITS similar to a .bss
1127// section. I don't know why we have a BSS style type for the section but it is
1128// consistent across both 64-bit PowerPC ABIs as well as the 32-bit PowerPC ABI.
1129GotPltSection::GotPltSection()
1130 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, config->wordsize,
1131 ".got.plt") {
1132 if (config->emachine == EM_PPC) {
1133 name = ".plt";
1134 } else if (config->emachine == EM_PPC64) {
1135 type = SHT_NOBITS;
1136 name = ".plt";
1137 }
1138}
1139
1140void GotPltSection::addEntry(Symbol &sym) {
1141 assert(sym.auxIdx == symAux.size() - 1 &&(static_cast <bool> (sym.auxIdx == symAux.size() - 1 &&
symAux.back().pltIdx == entries.size()) ? void (0) : __assert_fail
("sym.auxIdx == symAux.size() - 1 && symAux.back().pltIdx == entries.size()"
, "lld/ELF/SyntheticSections.cpp", 1142, __extension__ __PRETTY_FUNCTION__
))
1142 symAux.back().pltIdx == entries.size())(static_cast <bool> (sym.auxIdx == symAux.size() - 1 &&
symAux.back().pltIdx == entries.size()) ? void (0) : __assert_fail
("sym.auxIdx == symAux.size() - 1 && symAux.back().pltIdx == entries.size()"
, "lld/ELF/SyntheticSections.cpp", 1142, __extension__ __PRETTY_FUNCTION__
))
;
1143 entries.push_back(&sym);
1144}
1145
1146size_t GotPltSection::getSize() const {
1147 return (target->gotPltHeaderEntriesNum + entries.size()) *
1148 target->gotEntrySize;
1149}
1150
1151void GotPltSection::writeTo(uint8_t *buf) {
1152 target->writeGotPltHeader(buf);
1153 buf += target->gotPltHeaderEntriesNum * target->gotEntrySize;
1154 for (const Symbol *b : entries) {
1155 target->writeGotPlt(buf, *b);
1156 buf += target->gotEntrySize;
1157 }
1158}
1159
1160bool GotPltSection::isNeeded() const {
1161 // We need to emit GOTPLT even if it's empty if there's a relocation relative
1162 // to it.
1163 return !entries.empty() || hasGotPltOffRel;
1164}
1165
1166static StringRef getIgotPltName() {
1167 // On ARM the IgotPltSection is part of the GotSection.
1168 if (config->emachine == EM_ARM)
1169 return ".got";
1170
1171 // On PowerPC64 the GotPltSection is renamed to '.plt' so the IgotPltSection
1172 // needs to be named the same.
1173 if (config->emachine == EM_PPC64)
1174 return ".plt";
1175
1176 return ".got.plt";
1177}
1178
1179// On PowerPC64 the GotPltSection type is SHT_NOBITS so we have to follow suit
1180// with the IgotPltSection.
1181IgotPltSection::IgotPltSection()
1182 : SyntheticSection(SHF_ALLOC | SHF_WRITE,
1183 config->emachine == EM_PPC64 ? SHT_NOBITS : SHT_PROGBITS,
1184 target->gotEntrySize, getIgotPltName()) {}
1185
1186void IgotPltSection::addEntry(Symbol &sym) {
1187 assert(symAux.back().pltIdx == entries.size())(static_cast <bool> (symAux.back().pltIdx == entries.size
()) ? void (0) : __assert_fail ("symAux.back().pltIdx == entries.size()"
, "lld/ELF/SyntheticSections.cpp", 1187, __extension__ __PRETTY_FUNCTION__
))
;
1188 entries.push_back(&sym);
1189}
1190
1191size_t IgotPltSection::getSize() const {
1192 return entries.size() * target->gotEntrySize;
1193}
1194
1195void IgotPltSection::writeTo(uint8_t *buf) {
1196 for (const Symbol *b : entries) {
1197 target->writeIgotPlt(buf, *b);
1198 buf += target->gotEntrySize;
1199 }
1200}
1201
1202StringTableSection::StringTableSection(StringRef name, bool dynamic)
1203 : SyntheticSection(dynamic ? (uint64_t)SHF_ALLOC : 0, SHT_STRTAB, 1, name),
1204 dynamic(dynamic) {
1205 // ELF string tables start with a NUL byte.
1206 strings.push_back("");
1207 stringMap.try_emplace(CachedHashStringRef(""), 0);
1208 size = 1;
1209}
1210
1211// Adds a string to the string table. If `hashIt` is true we hash and check for
1212// duplicates. It is optional because the name of global symbols are already
1213// uniqued and hashing them again has a big cost for a small value: uniquing
1214// them with some other string that happens to be the same.
1215unsigned StringTableSection::addString(StringRef s, bool hashIt) {
1216 if (hashIt) {
1217 auto r = stringMap.try_emplace(CachedHashStringRef(s), size);
1218 if (!r.second)
1219 return r.first->second;
1220 }
1221 if (s.empty())
1222 return 0;
1223 unsigned ret = this->size;
1224 this->size = this->size + s.size() + 1;
1225 strings.push_back(s);
1226 return ret;
1227}
1228
1229void StringTableSection::writeTo(uint8_t *buf) {
1230 for (StringRef s : strings) {
1231 memcpy(buf, s.data(), s.size());
1232 buf[s.size()] = '\0';
1233 buf += s.size() + 1;
1234 }
1235}
1236
1237// Returns the number of entries in .gnu.version_d: the number of
1238// non-VER_NDX_LOCAL-non-VER_NDX_GLOBAL definitions, plus 1.
1239// Note that we don't support vd_cnt > 1 yet.
1240static unsigned getVerDefNum() {
1241 return namedVersionDefs().size() + 1;
1242}
1243
1244template <class ELFT>
1245DynamicSection<ELFT>::DynamicSection()
1246 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_DYNAMIC, config->wordsize,
1247 ".dynamic") {
1248 this->entsize = ELFT::Is64Bits ? 16 : 8;
1249
1250 // .dynamic section is not writable on MIPS and on Fuchsia OS
1251 // which passes -z rodynamic.
1252 // See "Special Section" in Chapter 4 in the following document:
1253 // ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
1254 if (config->emachine == EM_MIPS || config->zRodynamic)
1255 this->flags = SHF_ALLOC;
1256}
1257
1258// The output section .rela.dyn may include these synthetic sections:
1259//
1260// - part.relaDyn
1261// - in.relaIplt: this is included if in.relaIplt is named .rela.dyn
1262// - in.relaPlt: this is included if a linker script places .rela.plt inside
1263// .rela.dyn
1264//
1265// DT_RELASZ is the total size of the included sections.
1266static uint64_t addRelaSz(const RelocationBaseSection &relaDyn) {
1267 size_t size = relaDyn.getSize();
1268 if (in.relaIplt->getParent() == relaDyn.getParent())
1269 size += in.relaIplt->getSize();
1270 if (in.relaPlt->getParent() == relaDyn.getParent())
1271 size += in.relaPlt->getSize();
1272 return size;
1273}
1274
1275// A Linker script may assign the RELA relocation sections to the same
1276// output section. When this occurs we cannot just use the OutputSection
1277// Size. Moreover the [DT_JMPREL, DT_JMPREL + DT_PLTRELSZ) is permitted to
1278// overlap with the [DT_RELA, DT_RELA + DT_RELASZ).
1279static uint64_t addPltRelSz() {
1280 size_t size = in.relaPlt->getSize();
1281 if (in.relaIplt->getParent() == in.relaPlt->getParent() &&
1282 in.relaIplt->name == in.relaPlt->name)
1283 size += in.relaIplt->getSize();
1284 return size;
1285}
1286
1287// Add remaining entries to complete .dynamic contents.
1288template <class ELFT>
1289std::vector<std::pair<int32_t, uint64_t>>
1290DynamicSection<ELFT>::computeContents() {
1291 elf::Partition &part = getPartition();
1292 bool isMain = part.name.empty();
1293 std::vector<std::pair<int32_t, uint64_t>> entries;
1294
1295 auto addInt = [&](int32_t tag, uint64_t val) {
1296 entries.emplace_back(tag, val);
1297 };
1298 auto addInSec = [&](int32_t tag, const InputSection &sec) {
1299 entries.emplace_back(tag, sec.getVA());
1300 };
1301
1302 for (StringRef s : config->filterList)
1303 addInt(DT_FILTER, part.dynStrTab->addString(s));
1304 for (StringRef s : config->auxiliaryList)
1305 addInt(DT_AUXILIARY, part.dynStrTab->addString(s));
1306
1307 if (!config->rpath.empty())
1308 addInt(config->enableNewDtags ? DT_RUNPATH : DT_RPATH,
1309 part.dynStrTab->addString(config->rpath));
1310
1311 for (SharedFile *file : ctx.sharedFiles)
1312 if (file->isNeeded)
1313 addInt(DT_NEEDED, part.dynStrTab->addString(file->soName));
1314
1315 if (isMain) {
1316 if (!config->soName.empty())
1317 addInt(DT_SONAME, part.dynStrTab->addString(config->soName));
1318 } else {
1319 if (!config->soName.empty())
1320 addInt(DT_NEEDED, part.dynStrTab->addString(config->soName));
1321 addInt(DT_SONAME, part.dynStrTab->addString(part.name));
1322 }
1323
1324 // Set DT_FLAGS and DT_FLAGS_1.
1325 uint32_t dtFlags = 0;
1326 uint32_t dtFlags1 = 0;
1327 if (config->bsymbolic == BsymbolicKind::All)
1328 dtFlags |= DF_SYMBOLIC;
1329 if (config->zGlobal)
1330 dtFlags1 |= DF_1_GLOBAL;
1331 if (config->zInitfirst)
1332 dtFlags1 |= DF_1_INITFIRST;
1333 if (config->zInterpose)
1334 dtFlags1 |= DF_1_INTERPOSE;
1335 if (config->zNodefaultlib)
1336 dtFlags1 |= DF_1_NODEFLIB;
1337 if (config->zNodelete)
1338 dtFlags1 |= DF_1_NODELETE;
1339 if (config->zNodlopen)
1340 dtFlags1 |= DF_1_NOOPEN;
1341 if (config->pie)
1342 dtFlags1 |= DF_1_PIE;
1343 if (config->zNow) {
1344 dtFlags |= DF_BIND_NOW;
1345 dtFlags1 |= DF_1_NOW;
1346 }
1347 if (config->zOrigin) {
1348 dtFlags |= DF_ORIGIN;
1349 dtFlags1 |= DF_1_ORIGIN;
1350 }
1351 if (!config->zText)
1352 dtFlags |= DF_TEXTREL;
1353 if (ctx.hasTlsIe && config->shared)
1354 dtFlags |= DF_STATIC_TLS;
1355
1356 if (dtFlags)
1357 addInt(DT_FLAGS, dtFlags);
1358 if (dtFlags1)
1359 addInt(DT_FLAGS_1, dtFlags1);
1360
1361 // DT_DEBUG is a pointer to debug information used by debuggers at runtime. We
1362 // need it for each process, so we don't write it for DSOs. The loader writes
1363 // the pointer into this entry.
1364 //
1365 // DT_DEBUG is the only .dynamic entry that needs to be written to. Some
1366 // systems (currently only Fuchsia OS) provide other means to give the
1367 // debugger this information. Such systems may choose make .dynamic read-only.
1368 // If the target is such a system (used -z rodynamic) don't write DT_DEBUG.
1369 if (!config->shared && !config->relocatable && !config->zRodynamic)
1370 addInt(DT_DEBUG, 0);
1371
1372 if (part.relaDyn->isNeeded() ||
1373 (in.relaIplt->isNeeded() &&
1374 part.relaDyn->getParent() == in.relaIplt->getParent())) {
1375 addInSec(part.relaDyn->dynamicTag, *part.relaDyn);
1376 entries.emplace_back(part.relaDyn->sizeDynamicTag,
1377 addRelaSz(*part.relaDyn));
1378
1379 bool isRela = config->isRela;
1380 addInt(isRela ? DT_RELAENT : DT_RELENT,
1381 isRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel));
1382
1383 // MIPS dynamic loader does not support RELCOUNT tag.
1384 // The problem is in the tight relation between dynamic
1385 // relocations and GOT. So do not emit this tag on MIPS.
1386 if (config->emachine != EM_MIPS) {
1387 size_t numRelativeRels = part.relaDyn->getRelativeRelocCount();
1388 if (config->zCombreloc && numRelativeRels)
1389 addInt(isRela ? DT_RELACOUNT : DT_RELCOUNT, numRelativeRels);
1390 }
1391 }
1392 if (part.relrDyn && part.relrDyn->getParent() &&
1393 !part.relrDyn->relocs.empty()) {
1394 addInSec(config->useAndroidRelrTags ? DT_ANDROID_RELR : DT_RELR,
1395 *part.relrDyn);
1396 addInt(config->useAndroidRelrTags ? DT_ANDROID_RELRSZ : DT_RELRSZ,
1397 part.relrDyn->getParent()->size);
1398 addInt(config->useAndroidRelrTags ? DT_ANDROID_RELRENT : DT_RELRENT,
1399 sizeof(Elf_Relr));
1400 }
1401 // .rel[a].plt section usually consists of two parts, containing plt and
1402 // iplt relocations. It is possible to have only iplt relocations in the
1403 // output. In that case relaPlt is empty and have zero offset, the same offset
1404 // as relaIplt has. And we still want to emit proper dynamic tags for that
1405 // case, so here we always use relaPlt as marker for the beginning of
1406 // .rel[a].plt section.
1407 if (isMain && (in.relaPlt->isNeeded() || in.relaIplt->isNeeded())) {
1408 addInSec(DT_JMPREL, *in.relaPlt);
1409 entries.emplace_back(DT_PLTRELSZ, addPltRelSz());
1410 switch (config->emachine) {
1411 case EM_MIPS:
1412 addInSec(DT_MIPS_PLTGOT, *in.gotPlt);
1413 break;
1414 case EM_SPARCV9:
1415 addInSec(DT_PLTGOT, *in.plt);
1416 break;
1417 case EM_AARCH64:
1418 if (llvm::find_if(in.relaPlt->relocs, [](const DynamicReloc &r) {
1419 return r.type == target->pltRel &&
1420 r.sym->stOther & STO_AARCH64_VARIANT_PCS;
1421 }) != in.relaPlt->relocs.end())
1422 addInt(DT_AARCH64_VARIANT_PCS, 0);
1423 addInSec(DT_PLTGOT, *in.gotPlt);
1424 break;
1425 case EM_RISCV:
1426 if (llvm::any_of(in.relaPlt->relocs, [](const DynamicReloc &r) {
1427 return r.type == target->pltRel &&
1428 (r.sym->stOther & STO_RISCV_VARIANT_CC);
1429 }))
1430 addInt(DT_RISCV_VARIANT_CC, 0);
1431 [[fallthrough]];
1432 default:
1433 addInSec(DT_PLTGOT, *in.gotPlt);
1434 break;
1435 }
1436 addInt(DT_PLTREL, config->isRela ? DT_RELA : DT_REL);
1437 }
1438
1439 if (config->emachine == EM_AARCH64) {
1440 if (config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI)
1441 addInt(DT_AARCH64_BTI_PLT, 0);
1442 if (config->zPacPlt)
1443 addInt(DT_AARCH64_PAC_PLT, 0);
1444 }
1445
1446 addInSec(DT_SYMTAB, *part.dynSymTab);
1447 addInt(DT_SYMENT, sizeof(Elf_Sym));
1448 addInSec(DT_STRTAB, *part.dynStrTab);
1449 addInt(DT_STRSZ, part.dynStrTab->getSize());
1450 if (!config->zText)
1451 addInt(DT_TEXTREL, 0);
1452 if (part.gnuHashTab && part.gnuHashTab->getParent())
1453 addInSec(DT_GNU_HASH, *part.gnuHashTab);
1454 if (part.hashTab && part.hashTab->getParent())
1455 addInSec(DT_HASH, *part.hashTab);
1456
1457 if (isMain) {
1458 if (Out::preinitArray) {
1459 addInt(DT_PREINIT_ARRAY, Out::preinitArray->addr);
1460 addInt(DT_PREINIT_ARRAYSZ, Out::preinitArray->size);
1461 }
1462 if (Out::initArray) {
1463 addInt(DT_INIT_ARRAY, Out::initArray->addr);
1464 addInt(DT_INIT_ARRAYSZ, Out::initArray->size);
1465 }
1466 if (Out::finiArray) {
1467 addInt(DT_FINI_ARRAY, Out::finiArray->addr);
1468 addInt(DT_FINI_ARRAYSZ, Out::finiArray->size);
1469 }
1470
1471 if (Symbol *b = symtab.find(config->init))
1472 if (b->isDefined())
1473 addInt(DT_INIT, b->getVA());
1474 if (Symbol *b = symtab.find(config->fini))
1475 if (b->isDefined())
1476 addInt(DT_FINI, b->getVA());
1477 }
1478
1479 if (part.verSym && part.verSym->isNeeded())
1480 addInSec(DT_VERSYM, *part.verSym);
1481 if (part.verDef && part.verDef->isLive()) {
1482 addInSec(DT_VERDEF, *part.verDef);
1483 addInt(DT_VERDEFNUM, getVerDefNum());
1484 }
1485 if (part.verNeed && part.verNeed->isNeeded()) {
1486 addInSec(DT_VERNEED, *part.verNeed);
1487 unsigned needNum = 0;
1488 for (SharedFile *f : ctx.sharedFiles)
1489 if (!f->vernauxs.empty())
1490 ++needNum;
1491 addInt(DT_VERNEEDNUM, needNum);
1492 }
1493
1494 if (config->emachine == EM_MIPS) {
1495 addInt(DT_MIPS_RLD_VERSION, 1);
1496 addInt(DT_MIPS_FLAGS, RHF_NOTPOT);
1497 addInt(DT_MIPS_BASE_ADDRESS, target->getImageBase());
1498 addInt(DT_MIPS_SYMTABNO, part.dynSymTab->getNumSymbols());
1499 addInt(DT_MIPS_LOCAL_GOTNO, in.mipsGot->getLocalEntriesNum());
1500
1501 if (const Symbol *b = in.mipsGot->getFirstGlobalEntry())
1502 addInt(DT_MIPS_GOTSYM, b->dynsymIndex);
1503 else
1504 addInt(DT_MIPS_GOTSYM, part.dynSymTab->getNumSymbols());
1505 addInSec(DT_PLTGOT, *in.mipsGot);
1506 if (in.mipsRldMap) {
1507 if (!config->pie)
1508 addInSec(DT_MIPS_RLD_MAP, *in.mipsRldMap);
1509 // Store the offset to the .rld_map section
1510 // relative to the address of the tag.
1511 addInt(DT_MIPS_RLD_MAP_REL,
1512 in.mipsRldMap->getVA() - (getVA() + entries.size() * entsize));
1513 }
1514 }
1515
1516 // DT_PPC_GOT indicates to glibc Secure PLT is used. If DT_PPC_GOT is absent,
1517 // glibc assumes the old-style BSS PLT layout which we don't support.
1518 if (config->emachine == EM_PPC)
1519 addInSec(DT_PPC_GOT, *in.got);
1520
1521 // Glink dynamic tag is required by the V2 abi if the plt section isn't empty.
1522 if (config->emachine == EM_PPC64 && in.plt->isNeeded()) {
1523 // The Glink tag points to 32 bytes before the first lazy symbol resolution
1524 // stub, which starts directly after the header.
1525 addInt(DT_PPC64_GLINK, in.plt->getVA() + target->pltHeaderSize - 32);
1526 }
1527
1528 addInt(DT_NULL, 0);
1529 return entries;
1530}
1531
1532template <class ELFT> void DynamicSection<ELFT>::finalizeContents() {
1533 if (OutputSection *sec = getPartition().dynStrTab->getParent())
1534 getParent()->link = sec->sectionIndex;
1535 this->size = computeContents().size() * this->entsize;
1536}
1537
1538template <class ELFT> void DynamicSection<ELFT>::writeTo(uint8_t *buf) {
1539 auto *p = reinterpret_cast<Elf_Dyn *>(buf);
1540
1541 for (std::pair<int32_t, uint64_t> kv : computeContents()) {
1542 p->d_tag = kv.first;
1543 p->d_un.d_val = kv.second;
1544 ++p;
1545 }
1546}
1547
1548uint64_t DynamicReloc::getOffset() const {
1549 return inputSec->getVA(offsetInSec);
1550}
1551
1552int64_t DynamicReloc::computeAddend() const {
1553 switch (kind) {
1554 case AddendOnly:
1555 assert(sym == nullptr)(static_cast <bool> (sym == nullptr) ? void (0) : __assert_fail
("sym == nullptr", "lld/ELF/SyntheticSections.cpp", 1555, __extension__
__PRETTY_FUNCTION__))
;
1556 return addend;
1557 case AgainstSymbol:
1558 assert(sym != nullptr)(static_cast <bool> (sym != nullptr) ? void (0) : __assert_fail
("sym != nullptr", "lld/ELF/SyntheticSections.cpp", 1558, __extension__
__PRETTY_FUNCTION__))
;
1559 return addend;
1560 case AddendOnlyWithTargetVA:
1561 case AgainstSymbolWithTargetVA:
1562 return InputSection::getRelocTargetVA(inputSec->file, type, addend,
1563 getOffset(), *sym, expr);
1564 case MipsMultiGotPage:
1565 assert(sym == nullptr)(static_cast <bool> (sym == nullptr) ? void (0) : __assert_fail
("sym == nullptr", "lld/ELF/SyntheticSections.cpp", 1565, __extension__
__PRETTY_FUNCTION__))
;
1566 return getMipsPageAddr(outputSec->addr) + addend;
1567 }
1568 llvm_unreachable("Unknown DynamicReloc::Kind enum")::llvm::llvm_unreachable_internal("Unknown DynamicReloc::Kind enum"
, "lld/ELF/SyntheticSections.cpp", 1568)
;
1569}
1570
1571uint32_t DynamicReloc::getSymIndex(SymbolTableBaseSection *symTab) const {
1572 if (!needsDynSymIndex())
1573 return 0;
1574
1575 size_t index = symTab->getSymbolIndex(sym);
1576 assert((index != 0 || (type != target->gotRel && type != target->pltRel) ||(static_cast <bool> ((index != 0 || (type != target->
gotRel && type != target->pltRel) || !mainPart->
dynSymTab->getParent()) && "GOT or PLT relocation must refer to symbol in dynamic symbol table"
) ? void (0) : __assert_fail ("(index != 0 || (type != target->gotRel && type != target->pltRel) || !mainPart->dynSymTab->getParent()) && \"GOT or PLT relocation must refer to symbol in dynamic symbol table\""
, "lld/ELF/SyntheticSections.cpp", 1578, __extension__ __PRETTY_FUNCTION__
))
1577 !mainPart->dynSymTab->getParent()) &&(static_cast <bool> ((index != 0 || (type != target->
gotRel && type != target->pltRel) || !mainPart->
dynSymTab->getParent()) && "GOT or PLT relocation must refer to symbol in dynamic symbol table"
) ? void (0) : __assert_fail ("(index != 0 || (type != target->gotRel && type != target->pltRel) || !mainPart->dynSymTab->getParent()) && \"GOT or PLT relocation must refer to symbol in dynamic symbol table\""
, "lld/ELF/SyntheticSections.cpp", 1578, __extension__ __PRETTY_FUNCTION__
))
1578 "GOT or PLT relocation must refer to symbol in dynamic symbol table")(static_cast <bool> ((index != 0 || (type != target->
gotRel && type != target->pltRel) || !mainPart->
dynSymTab->getParent()) && "GOT or PLT relocation must refer to symbol in dynamic symbol table"
) ? void (0) : __assert_fail ("(index != 0 || (type != target->gotRel && type != target->pltRel) || !mainPart->dynSymTab->getParent()) && \"GOT or PLT relocation must refer to symbol in dynamic symbol table\""
, "lld/ELF/SyntheticSections.cpp", 1578, __extension__ __PRETTY_FUNCTION__
))
;
1579 return index;
1580}
1581
1582RelocationBaseSection::RelocationBaseSection(StringRef name, uint32_t type,
1583 int32_t dynamicTag,
1584 int32_t sizeDynamicTag,
1585 bool combreloc,
1586 unsigned concurrency)
1587 : SyntheticSection(SHF_ALLOC, type, config->wordsize, name),
1588 dynamicTag(dynamicTag), sizeDynamicTag(sizeDynamicTag),
1589 relocsVec(concurrency), combreloc(combreloc) {}
1590
1591void RelocationBaseSection::addSymbolReloc(
1592 RelType dynType, InputSectionBase &isec, uint64_t offsetInSec, Symbol &sym,
1593 int64_t addend, std::optional<RelType> addendRelType) {
1594 addReloc(DynamicReloc::AgainstSymbol, dynType, isec, offsetInSec, sym, addend,
1595 R_ADDEND, addendRelType ? *addendRelType : target->noneRel);
1596}
1597
1598void RelocationBaseSection::addAddendOnlyRelocIfNonPreemptible(
1599 RelType dynType, GotSection &sec, uint64_t offsetInSec, Symbol &sym,
1600 RelType addendRelType) {
1601 // No need to write an addend to the section for preemptible symbols.
1602 if (sym.isPreemptible)
1603 addReloc({dynType, &sec, offsetInSec, DynamicReloc::AgainstSymbol, sym, 0,
1604 R_ABS});
1605 else
1606 addReloc(DynamicReloc::AddendOnlyWithTargetVA, dynType, sec, offsetInSec,
1607 sym, 0, R_ABS, addendRelType);
1608}
1609
1610void RelocationBaseSection::mergeRels() {
1611 size_t newSize = relocs.size();
1612 for (const auto &v : relocsVec)
1613 newSize += v.size();
1614 relocs.reserve(newSize);
1615 for (const auto &v : relocsVec)
1616 llvm::append_range(relocs, v);
1617 relocsVec.clear();
1618}
1619
1620void RelocationBaseSection::partitionRels() {
1621 if (!combreloc)
1622 return;
1623 const RelType relativeRel = target->relativeRel;
1624 numRelativeRelocs =
1625 llvm::partition(relocs, [=](auto &r) { return r.type == relativeRel; }) -
1626 relocs.begin();
1627}
1628
1629void RelocationBaseSection::finalizeContents() {
1630 SymbolTableBaseSection *symTab = getPartition().dynSymTab.get();
1631
1632 // When linking glibc statically, .rel{,a}.plt contains R_*_IRELATIVE
1633 // relocations due to IFUNC (e.g. strcpy). sh_link will be set to 0 in that
1634 // case.
1635 if (symTab && symTab->getParent())
1636 getParent()->link = symTab->getParent()->sectionIndex;
1637 else
1638 getParent()->link = 0;
1639
1640 if (in.relaPlt.get() == this && in.gotPlt->getParent()) {
1641 getParent()->flags |= ELF::SHF_INFO_LINK;
1642 getParent()->info = in.gotPlt->getParent()->sectionIndex;
1643 }
1644 if (in.relaIplt.get() == this && in.igotPlt->getParent()) {
1645 getParent()->flags |= ELF::SHF_INFO_LINK;
1646 getParent()->info = in.igotPlt->getParent()->sectionIndex;
1647 }
1648}
1649
1650void DynamicReloc::computeRaw(SymbolTableBaseSection *symtab) {
1651 r_offset = getOffset();
1652 r_sym = getSymIndex(symtab);
1653 addend = computeAddend();
1654 kind = AddendOnly; // Catch errors
1655}
1656
1657void RelocationBaseSection::computeRels() {
1658 SymbolTableBaseSection *symTab = getPartition().dynSymTab.get();
1659 parallelForEach(relocs,
1660 [symTab](DynamicReloc &rel) { rel.computeRaw(symTab); });
1661 // Sort by (!IsRelative,SymIndex,r_offset). DT_REL[A]COUNT requires us to
1662 // place R_*_RELATIVE first. SymIndex is to improve locality, while r_offset
1663 // is to make results easier to read.
1664 if (combreloc) {
1665 auto nonRelative = relocs.begin() + numRelativeRelocs;
1666 parallelSort(relocs.begin(), nonRelative,
1667 [&](auto &a, auto &b) { return a.r_offset < b.r_offset; });
1668 // Non-relative relocations are few, so don't bother with parallelSort.
1669 llvm::sort(nonRelative, relocs.end(), [&](auto &a, auto &b) {
1670 return std::tie(a.r_sym, a.r_offset) < std::tie(b.r_sym, b.r_offset);
1671 });
1672 }
1673}
1674
1675template <class ELFT>
1676RelocationSection<ELFT>::RelocationSection(StringRef name, bool combreloc,
1677 unsigned concurrency)
1678 : RelocationBaseSection(name, config->isRela ? SHT_RELA : SHT_REL,
1679 config->isRela ? DT_RELA : DT_REL,
1680 config->isRela ? DT_RELASZ : DT_RELSZ, combreloc,
1681 concurrency) {
1682 this->entsize = config->isRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
1683}
1684
1685template <class ELFT> void RelocationSection<ELFT>::writeTo(uint8_t *buf) {
1686 computeRels();
1687 for (const DynamicReloc &rel : relocs) {
1688 auto *p = reinterpret_cast<Elf_Rela *>(buf);
1689 p->r_offset = rel.r_offset;
1690 p->setSymbolAndType(rel.r_sym, rel.type, config->isMips64EL);
1691 if (config->isRela)
1692 p->r_addend = rel.addend;
1693 buf += config->isRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
1694 }
1695}
1696
1697RelrBaseSection::RelrBaseSection(unsigned concurrency)
1698 : SyntheticSection(SHF_ALLOC,
1699 config->useAndroidRelrTags ? SHT_ANDROID_RELR : SHT_RELR,
1700 config->wordsize, ".relr.dyn"),
1701 relocsVec(concurrency) {}
1702
1703void RelrBaseSection::mergeRels() {
1704 size_t newSize = relocs.size();
1705 for (const auto &v : relocsVec)
1706 newSize += v.size();
1707 relocs.reserve(newSize);
1708 for (const auto &v : relocsVec)
1709 llvm::append_range(relocs, v);
1710 relocsVec.clear();
1711}
1712
1713template <class ELFT>
1714AndroidPackedRelocationSection<ELFT>::AndroidPackedRelocationSection(
1715 StringRef name, unsigned concurrency)
1716 : RelocationBaseSection(
1717 name, config->isRela ? SHT_ANDROID_RELA : SHT_ANDROID_REL,
1718 config->isRela ? DT_ANDROID_RELA : DT_ANDROID_REL,
1719 config->isRela ? DT_ANDROID_RELASZ : DT_ANDROID_RELSZ,
1720 /*combreloc=*/false, concurrency) {
1721 this->entsize = 1;
1722}
1723
1724template <class ELFT>
1725bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() {
1726 // This function computes the contents of an Android-format packed relocation
1727 // section.
1728 //
1729 // This format compresses relocations by using relocation groups to factor out
1730 // fields that are common between relocations and storing deltas from previous
1731 // relocations in SLEB128 format (which has a short representation for small
1732 // numbers). A good example of a relocation type with common fields is
1733 // R_*_RELATIVE, which is normally used to represent function pointers in
1734 // vtables. In the REL format, each relative relocation has the same r_info
1735 // field, and is only different from other relative relocations in terms of
1736 // the r_offset field. By sorting relocations by offset, grouping them by
1737 // r_info and representing each relocation with only the delta from the
1738 // previous offset, each 8-byte relocation can be compressed to as little as 1
1739 // byte (or less with run-length encoding). This relocation packer was able to
1740 // reduce the size of the relocation section in an Android Chromium DSO from
1741 // 2,911,184 bytes to 174,693 bytes, or 6% of the original size.
1742 //
1743 // A relocation section consists of a header containing the literal bytes
1744 // 'APS2' followed by a sequence of SLEB128-encoded integers. The first two
1745 // elements are the total number of relocations in the section and an initial
1746 // r_offset value. The remaining elements define a sequence of relocation
1747 // groups. Each relocation group starts with a header consisting of the
1748 // following elements:
1749 //
1750 // - the number of relocations in the relocation group
1751 // - flags for the relocation group
1752 // - (if RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG is set) the r_offset delta
1753 // for each relocation in the group.
1754 // - (if RELOCATION_GROUPED_BY_INFO_FLAG is set) the value of the r_info
1755 // field for each relocation in the group.
1756 // - (if RELOCATION_GROUP_HAS_ADDEND_FLAG and
1757 // RELOCATION_GROUPED_BY_ADDEND_FLAG are set) the r_addend delta for
1758 // each relocation in the group.
1759 //
1760 // Following the relocation group header are descriptions of each of the
1761 // relocations in the group. They consist of the following elements:
1762 //
1763 // - (if RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG is not set) the r_offset
1764 // delta for this relocation.
1765 // - (if RELOCATION_GROUPED_BY_INFO_FLAG is not set) the value of the r_info
1766 // field for this relocation.
1767 // - (if RELOCATION_GROUP_HAS_ADDEND_FLAG is set and
1768 // RELOCATION_GROUPED_BY_ADDEND_FLAG is not set) the r_addend delta for
1769 // this relocation.
1770
1771 size_t oldSize = relocData.size();
1772
1773 relocData = {'A', 'P', 'S', '2'};
1774 raw_svector_ostream os(relocData);
1775 auto add = [&](int64_t v) { encodeSLEB128(v, os); };
1776
1777 // The format header includes the number of relocations and the initial
1778 // offset (we set this to zero because the first relocation group will
1779 // perform the initial adjustment).
1780 add(relocs.size());
1781 add(0);
1782
1783 std::vector<Elf_Rela> relatives, nonRelatives;
1784
1785 for (const DynamicReloc &rel : relocs) {
1786 Elf_Rela r;
1787 r.r_offset = rel.getOffset();
1788 r.setSymbolAndType(rel.getSymIndex(getPartition().dynSymTab.get()),
1789 rel.type, false);
1790 r.r_addend = config->isRela ? rel.computeAddend() : 0;
1791
1792 if (r.getType(config->isMips64EL) == target->relativeRel)
1793 relatives.push_back(r);
1794 else
1795 nonRelatives.push_back(r);
1796 }
1797
1798 llvm::sort(relatives, [](const Elf_Rel &a, const Elf_Rel &b) {
1799 return a.r_offset < b.r_offset;
1800 });
1801
1802 // Try to find groups of relative relocations which are spaced one word
1803 // apart from one another. These generally correspond to vtable entries. The
1804 // format allows these groups to be encoded using a sort of run-length
1805 // encoding, but each group will cost 7 bytes in addition to the offset from
1806 // the previous group, so it is only profitable to do this for groups of
1807 // size 8 or larger.
1808 std::vector<Elf_Rela> ungroupedRelatives;
1809 std::vector<std::vector<Elf_Rela>> relativeGroups;
1810 for (auto i = relatives.begin(), e = relatives.end(); i != e;) {
1811 std::vector<Elf_Rela> group;
1812 do {
1813 group.push_back(*i++);
1814 } while (i != e && (i - 1)->r_offset + config->wordsize == i->r_offset);
1815
1816 if (group.size() < 8)
1817 ungroupedRelatives.insert(ungroupedRelatives.end(), group.begin(),
1818 group.end());
1819 else
1820 relativeGroups.emplace_back(std::move(group));
1821 }
1822
1823 // For non-relative relocations, we would like to:
1824 // 1. Have relocations with the same symbol offset to be consecutive, so
1825 // that the runtime linker can speed-up symbol lookup by implementing an
1826 // 1-entry cache.
1827 // 2. Group relocations by r_info to reduce the size of the relocation
1828 // section.
1829 // Since the symbol offset is the high bits in r_info, sorting by r_info
1830 // allows us to do both.
1831 //
1832 // For Rela, we also want to sort by r_addend when r_info is the same. This
1833 // enables us to group by r_addend as well.
1834 llvm::sort(nonRelatives, [](const Elf_Rela &a, const Elf_Rela &b) {
1835 if (a.r_info != b.r_info)
1836 return a.r_info < b.r_info;
1837 if (a.r_addend != b.r_addend)
1838 return a.r_addend < b.r_addend;
1839 return a.r_offset < b.r_offset;
1840 });
1841
1842 // Group relocations with the same r_info. Note that each group emits a group
1843 // header and that may make the relocation section larger. It is hard to
1844 // estimate the size of a group header as the encoded size of that varies
1845 // based on r_info. However, we can approximate this trade-off by the number
1846 // of values encoded. Each group header contains 3 values, and each relocation
1847 // in a group encodes one less value, as compared to when it is not grouped.
1848 // Therefore, we only group relocations if there are 3 or more of them with
1849 // the same r_info.
1850 //
1851 // For Rela, the addend for most non-relative relocations is zero, and thus we
1852 // can usually get a smaller relocation section if we group relocations with 0
1853 // addend as well.
1854 std::vector<Elf_Rela> ungroupedNonRelatives;
1855 std::vector<std::vector<Elf_Rela>> nonRelativeGroups;
1856 for (auto i = nonRelatives.begin(), e = nonRelatives.end(); i != e;) {
1857 auto j = i + 1;
1858 while (j != e && i->r_info == j->r_info &&
1859 (!config->isRela || i->r_addend == j->r_addend))
1860 ++j;
1861 if (j - i < 3 || (config->isRela && i->r_addend != 0))
1862 ungroupedNonRelatives.insert(ungroupedNonRelatives.end(), i, j);
1863 else
1864 nonRelativeGroups.emplace_back(i, j);
1865 i = j;
1866 }
1867
1868 // Sort ungrouped relocations by offset to minimize the encoded length.
1869 llvm::sort(ungroupedNonRelatives, [](const Elf_Rela &a, const Elf_Rela &b) {
1870 return a.r_offset < b.r_offset;
1871 });
1872
1873 unsigned hasAddendIfRela =
1874 config->isRela ? RELOCATION_GROUP_HAS_ADDEND_FLAG : 0;
1875
1876 uint64_t offset = 0;
1877 uint64_t addend = 0;
1878
1879 // Emit the run-length encoding for the groups of adjacent relative
1880 // relocations. Each group is represented using two groups in the packed
1881 // format. The first is used to set the current offset to the start of the
1882 // group (and also encodes the first relocation), and the second encodes the
1883 // remaining relocations.
1884 for (std::vector<Elf_Rela> &g : relativeGroups) {
1885 // The first relocation in the group.
1886 add(1);
1887 add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG |
1888 RELOCATION_GROUPED_BY_INFO_FLAG | hasAddendIfRela);
1889 add(g[0].r_offset - offset);
1890 add(target->relativeRel);
1891 if (config->isRela) {
1892 add(g[0].r_addend - addend);
1893 addend = g[0].r_addend;
1894 }
1895
1896 // The remaining relocations.
1897 add(g.size() - 1);
1898 add(RELOCATION_GROUPED_BY_OFFSET_DELTA_FLAG |
1899 RELOCATION_GROUPED_BY_INFO_FLAG | hasAddendIfRela);
1900 add(config->wordsize);
1901 add(target->relativeRel);
1902 if (config->isRela) {
1903 for (const auto &i : llvm::drop_begin(g)) {
1904 add(i.r_addend - addend);
1905 addend = i.r_addend;
1906 }
1907 }
1908
1909 offset = g.back().r_offset;
1910 }
1911
1912 // Now the ungrouped relatives.
1913 if (!ungroupedRelatives.empty()) {
1914 add(ungroupedRelatives.size());
1915 add(RELOCATION_GROUPED_BY_INFO_FLAG | hasAddendIfRela);
1916 add(target->relativeRel);
1917 for (Elf_Rela &r : ungroupedRelatives) {
1918 add(r.r_offset - offset);
1919 offset = r.r_offset;
1920 if (config->isRela) {
1921 add(r.r_addend - addend);
1922 addend = r.r_addend;
1923 }
1924 }
1925 }
1926
1927 // Grouped non-relatives.
1928 for (ArrayRef<Elf_Rela> g : nonRelativeGroups) {
1929 add(g.size());
1930 add(RELOCATION_GROUPED_BY_INFO_FLAG);
1931 add(g[0].r_info);
1932 for (const Elf_Rela &r : g) {
1933 add(r.r_offset - offset);
1934 offset = r.r_offset;
1935 }
1936 addend = 0;
1937 }
1938
1939 // Finally the ungrouped non-relative relocations.
1940 if (!ungroupedNonRelatives.empty()) {
1941 add(ungroupedNonRelatives.size());
1942 add(hasAddendIfRela);
1943 for (Elf_Rela &r : ungroupedNonRelatives) {
1944 add(r.r_offset - offset);
1945 offset = r.r_offset;
1946 add(r.r_info);
1947 if (config->isRela) {
1948 add(r.r_addend - addend);
1949 addend = r.r_addend;
1950 }
1951 }
1952 }
1953
1954 // Don't allow the section to shrink; otherwise the size of the section can
1955 // oscillate infinitely.
1956 if (relocData.size() < oldSize)
1957 relocData.append(oldSize - relocData.size(), 0);
1958
1959 // Returns whether the section size changed. We need to keep recomputing both
1960 // section layout and the contents of this section until the size converges
1961 // because changing this section's size can affect section layout, which in
1962 // turn can affect the sizes of the LEB-encoded integers stored in this
1963 // section.
1964 return relocData.size() != oldSize;
1965}
1966
1967template <class ELFT>
1968RelrSection<ELFT>::RelrSection(unsigned concurrency)
1969 : RelrBaseSection(concurrency) {
1970 this->entsize = config->wordsize;
1971}
1972
1973template <class ELFT> bool RelrSection<ELFT>::updateAllocSize() {
1974 // This function computes the contents of an SHT_RELR packed relocation
1975 // section.
1976 //
1977 // Proposal for adding SHT_RELR sections to generic-abi is here:
1978 // https://groups.google.com/forum/#!topic/generic-abi/bX460iggiKg
1979 //
1980 // The encoded sequence of Elf64_Relr entries in a SHT_RELR section looks
1981 // like [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ]
1982 //
1983 // i.e. start with an address, followed by any number of bitmaps. The address
1984 // entry encodes 1 relocation. The subsequent bitmap entries encode up to 63
1985 // relocations each, at subsequent offsets following the last address entry.
1986 //
1987 // The bitmap entries must have 1 in the least significant bit. The assumption
1988 // here is that an address cannot have 1 in lsb. Odd addresses are not
1989 // supported.
1990 //
1991 // Excluding the least significant bit in the bitmap, each non-zero bit in
1992 // the bitmap represents a relocation to be applied to a corresponding machine
1993 // word that follows the base address word. The second least significant bit
1994 // represents the machine word immediately following the initial address, and
1995 // each bit that follows represents the next word, in linear order. As such,
1996 // a single bitmap can encode up to 31 relocations in a 32-bit object, and
1997 // 63 relocations in a 64-bit object.
1998 //
1999 // This encoding has a couple of interesting properties:
2000 // 1. Looking at any entry, it is clear whether it's an address or a bitmap:
2001 // even means address, odd means bitmap.
2002 // 2. Just a simple list of addresses is a valid encoding.
2003
2004 size_t oldSize = relrRelocs.size();
2005 relrRelocs.clear();
2006
2007 // Same as Config->Wordsize but faster because this is a compile-time
2008 // constant.
2009 const size_t wordsize = sizeof(typename ELFT::uint);
2010
2011 // Number of bits to use for the relocation offsets bitmap.
2012 // Must be either 63 or 31.
2013 const size_t nBits = wordsize * 8 - 1;
2014
2015 // Get offsets for all relative relocations and sort them.
2016 std::unique_ptr<uint64_t[]> offsets(new uint64_t[relocs.size()]);
1
Storing uninitialized value
2
Calling constructor for 'unique_ptr<unsigned long[], std::default_delete<unsigned long[]>>'
7
Returning from constructor for 'unique_ptr<unsigned long[], std::default_delete<unsigned long[]>>'
2017 for (auto [i, r] : llvm::enumerate(relocs))
2018 offsets[i] = r.getOffset();
2019 llvm::sort(offsets.get(), offsets.get() + relocs.size());
8
Calling 'sort<unsigned long *>'
16
Returning from 'sort<unsigned long *>'
2020
2021 // For each leading relocation, find following ones that can be folded
2022 // as a bitmap and fold them.
2023 for (size_t i = 0, e = relocs.size(); i != e;) {
17
'i' initialized to 0
18
Assuming 'i' is not equal to 'e'
19
Loop condition is true. Entering loop body
2024 // Add a leading relocation.
2025 relrRelocs.push_back(Elf_Relr(offsets[i]));
20
Passing the value 0 via 1st parameter '__i'
21
1st function call argument is an uninitialized value
2026 uint64_t base = offsets[i] + wordsize;
2027 ++i;
2028
2029 // Find foldable relocations to construct bitmaps.
2030 for (;;) {
2031 uint64_t bitmap = 0;
2032 for (; i != e; ++i) {
2033 uint64_t d = offsets[i] - base;
2034 if (d >= nBits * wordsize || d % wordsize)
2035 break;
2036 bitmap |= uint64_t(1) << (d / wordsize);
2037 }
2038 if (!bitmap)
2039 break;
2040 relrRelocs.push_back(Elf_Relr((bitmap << 1) | 1));
2041 base += nBits * wordsize;
2042 }
2043 }
2044
2045 // Don't allow the section to shrink; otherwise the size of the section can
2046 // oscillate infinitely. Trailing 1s do not decode to more relocations.
2047 if (relrRelocs.size() < oldSize) {
2048 log(".relr.dyn needs " + Twine(oldSize - relrRelocs.size()) +
2049 " padding word(s)");
2050 relrRelocs.resize(oldSize, Elf_Relr(1));
2051 }
2052
2053 return relrRelocs.size() != oldSize;
2054}
2055
2056SymbolTableBaseSection::SymbolTableBaseSection(StringTableSection &strTabSec)
2057 : SyntheticSection(strTabSec.isDynamic() ? (uint64_t)SHF_ALLOC : 0,
2058 strTabSec.isDynamic() ? SHT_DYNSYM : SHT_SYMTAB,
2059 config->wordsize,
2060 strTabSec.isDynamic() ? ".dynsym" : ".symtab"),
2061 strTabSec(strTabSec) {}
2062
2063// Orders symbols according to their positions in the GOT,
2064// in compliance with MIPS ABI rules.
2065// See "Global Offset Table" in Chapter 5 in the following document
2066// for detailed description:
2067// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
2068static bool sortMipsSymbols(const SymbolTableEntry &l,
2069 const SymbolTableEntry &r) {
2070 // Sort entries related to non-local preemptible symbols by GOT indexes.
2071 // All other entries go to the beginning of a dynsym in arbitrary order.
2072 if (l.sym->isInGot() && r.sym->isInGot())
2073 return l.sym->getGotIdx() < r.sym->getGotIdx();
2074 if (!l.sym->isInGot() && !r.sym->isInGot())
2075 return false;
2076 return !l.sym->isInGot();
2077}
2078
2079void SymbolTableBaseSection::finalizeContents() {
2080 if (OutputSection *sec = strTabSec.getParent())
2081 getParent()->link = sec->sectionIndex;
2082
2083 if (this->type != SHT_DYNSYM) {
2084 sortSymTabSymbols();
2085 return;
2086 }
2087
2088 // If it is a .dynsym, there should be no local symbols, but we need
2089 // to do a few things for the dynamic linker.
2090
2091 // Section's Info field has the index of the first non-local symbol.
2092 // Because the first symbol entry is a null entry, 1 is the first.
2093 getParent()->info = 1;
2094
2095 if (getPartition().gnuHashTab) {
2096 // NB: It also sorts Symbols to meet the GNU hash table requirements.
2097 getPartition().gnuHashTab->addSymbols(symbols);
2098 } else if (config->emachine == EM_MIPS) {
2099 llvm::stable_sort(symbols, sortMipsSymbols);
2100 }
2101
2102 // Only the main partition's dynsym indexes are stored in the symbols
2103 // themselves. All other partitions use a lookup table.
2104 if (this == mainPart->dynSymTab.get()) {
2105 size_t i = 0;
2106 for (const SymbolTableEntry &s : symbols)
2107 s.sym->dynsymIndex = ++i;
2108 }
2109}
2110
2111// The ELF spec requires that all local symbols precede global symbols, so we
2112// sort symbol entries in this function. (For .dynsym, we don't do that because
2113// symbols for dynamic linking are inherently all globals.)
2114//
2115// Aside from above, we put local symbols in groups starting with the STT_FILE
2116// symbol. That is convenient for purpose of identifying where are local symbols
2117// coming from.
2118void SymbolTableBaseSection::sortSymTabSymbols() {
2119 // Move all local symbols before global symbols.
2120 auto e = std::stable_partition(
2121 symbols.begin(), symbols.end(),
2122 [](const SymbolTableEntry &s) { return s.sym->isLocal(); });
2123 size_t numLocals = e - symbols.begin();
2124 getParent()->info = numLocals + 1;
2125
2126 // We want to group the local symbols by file. For that we rebuild the local
2127 // part of the symbols vector. We do not need to care about the STT_FILE
2128 // symbols, they are already naturally placed first in each group. That
2129 // happens because STT_FILE is always the first symbol in the object and hence
2130 // precede all other local symbols we add for a file.
2131 MapVector<InputFile *, SmallVector<SymbolTableEntry, 0>> arr;
2132 for (const SymbolTableEntry &s : llvm::make_range(symbols.begin(), e))
2133 arr[s.sym->file].push_back(s);
2134
2135 auto i = symbols.begin();
2136 for (auto &p : arr)
2137 for (SymbolTableEntry &entry : p.second)
2138 *i++ = entry;
2139}
2140
2141void SymbolTableBaseSection::addSymbol(Symbol *b) {
2142 // Adding a local symbol to a .dynsym is a bug.
2143 assert(this->type != SHT_DYNSYM || !b->isLocal())(static_cast <bool> (this->type != SHT_DYNSYM || !b->
isLocal()) ? void (0) : __assert_fail ("this->type != SHT_DYNSYM || !b->isLocal()"
, "lld/ELF/SyntheticSections.cpp", 2143, __extension__ __PRETTY_FUNCTION__
))
;
2144 symbols.push_back({b, strTabSec.addString(b->getName(), false)});
2145}
2146
2147size_t SymbolTableBaseSection::getSymbolIndex(Symbol *sym) {
2148 if (this == mainPart->dynSymTab.get())
2149 return sym->dynsymIndex;
2150
2151 // Initializes symbol lookup tables lazily. This is used only for -r,
2152 // --emit-relocs and dynsyms in partitions other than the main one.
2153 llvm::call_once(onceFlag, [&] {
2154 symbolIndexMap.reserve(symbols.size());
2155 size_t i = 0;
2156 for (const SymbolTableEntry &e : symbols) {
2157 if (e.sym->type == STT_SECTION)
2158 sectionIndexMap[e.sym->getOutputSection()] = ++i;
2159 else
2160 symbolIndexMap[e.sym] = ++i;
2161 }
2162 });
2163
2164 // Section symbols are mapped based on their output sections
2165 // to maintain their semantics.
2166 if (sym->type == STT_SECTION)
2167 return sectionIndexMap.lookup(sym->getOutputSection());
2168 return symbolIndexMap.lookup(sym);
2169}
2170
2171template <class ELFT>
2172SymbolTableSection<ELFT>::SymbolTableSection(StringTableSection &strTabSec)
2173 : SymbolTableBaseSection(strTabSec) {
2174 this->entsize = sizeof(Elf_Sym);
2175}
2176
2177static BssSection *getCommonSec(Symbol *sym) {
2178 if (config->relocatable)
2179 if (auto *d = dyn_cast<Defined>(sym))
2180 return dyn_cast_or_null<BssSection>(d->section);
2181 return nullptr;
2182}
2183
2184static uint32_t getSymSectionIndex(Symbol *sym) {
2185 assert(!(sym->hasFlag(NEEDS_COPY) && sym->isObject()))(static_cast <bool> (!(sym->hasFlag(NEEDS_COPY) &&
sym->isObject())) ? void (0) : __assert_fail ("!(sym->hasFlag(NEEDS_COPY) && sym->isObject())"
, "lld/ELF/SyntheticSections.cpp", 2185, __extension__ __PRETTY_FUNCTION__
))
;
2186 if (!isa<Defined>(sym) || sym->hasFlag(NEEDS_COPY))
2187 return SHN_UNDEF;
2188 if (const OutputSection *os = sym->getOutputSection())
2189 return os->sectionIndex >= SHN_LORESERVE ? (uint32_t)SHN_XINDEX
2190 : os->sectionIndex;
2191 return SHN_ABS;
2192}
2193
2194// Write the internal symbol table contents to the output symbol table.
2195template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *buf) {
2196 // The first entry is a null entry as per the ELF spec.
2197 buf += sizeof(Elf_Sym);
2198
2199 auto *eSym = reinterpret_cast<Elf_Sym *>(buf);
2200
2201 for (SymbolTableEntry &ent : symbols) {
2202 Symbol *sym = ent.sym;
2203 bool isDefinedHere = type == SHT_SYMTAB || sym->partition == partition;
2204
2205 // Set st_name, st_info and st_other.
2206 eSym->st_name = ent.strTabOffset;
2207 eSym->setBindingAndType(sym->binding, sym->type);
2208 eSym->st_other = sym->stOther;
2209
2210 if (BssSection *commonSec = getCommonSec(sym)) {
2211 // When -r is specified, a COMMON symbol is not allocated. Its st_shndx
2212 // holds SHN_COMMON and st_value holds the alignment.
2213 eSym->st_shndx = SHN_COMMON;
2214 eSym->st_value = commonSec->addralign;
2215 eSym->st_size = cast<Defined>(sym)->size;
2216 } else {
2217 const uint32_t shndx = getSymSectionIndex(sym);
2218 if (isDefinedHere) {
2219 eSym->st_shndx = shndx;
2220 eSym->st_value = sym->getVA();
2221 // Copy symbol size if it is a defined symbol. st_size is not
2222 // significant for undefined symbols, so whether copying it or not is up
2223 // to us if that's the case. We'll leave it as zero because by not
2224 // setting a value, we can get the exact same outputs for two sets of
2225 // input files that differ only in undefined symbol size in DSOs.
2226 eSym->st_size = shndx != SHN_UNDEF ? cast<Defined>(sym)->size : 0;
2227 } else {
2228 eSym->st_shndx = 0;
2229 eSym->st_value = 0;
2230 eSym->st_size = 0;
2231 }
2232 }
2233
2234 ++eSym;
2235 }
2236
2237 // On MIPS we need to mark symbol which has a PLT entry and requires
2238 // pointer equality by STO_MIPS_PLT flag. That is necessary to help
2239 // dynamic linker distinguish such symbols and MIPS lazy-binding stubs.
2240 // https://sourceware.org/ml/binutils/2008-07/txt00000.txt
2241 if (config->emachine == EM_MIPS) {
2242 auto *eSym = reinterpret_cast<Elf_Sym *>(buf);
2243
2244 for (SymbolTableEntry &ent : symbols) {
2245 Symbol *sym = ent.sym;
2246 if (sym->isInPlt() && sym->hasFlag(NEEDS_COPY))
2247 eSym->st_other |= STO_MIPS_PLT;
2248 if (isMicroMips()) {
2249 // We already set the less-significant bit for symbols
2250 // marked by the `STO_MIPS_MICROMIPS` flag and for microMIPS PLT
2251 // records. That allows us to distinguish such symbols in
2252 // the `MIPS<ELFT>::relocate()` routine. Now we should
2253 // clear that bit for non-dynamic symbol table, so tools
2254 // like `objdump` will be able to deal with a correct
2255 // symbol position.
2256 if (sym->isDefined() &&
2257 ((sym->stOther & STO_MIPS_MICROMIPS) || sym->hasFlag(NEEDS_COPY))) {
2258 if (!strTabSec.isDynamic())
2259 eSym->st_value &= ~1;
2260 eSym->st_other |= STO_MIPS_MICROMIPS;
2261 }
2262 }
2263 if (config->relocatable)
2264 if (auto *d = dyn_cast<Defined>(sym))
2265 if (isMipsPIC<ELFT>(d))
2266 eSym->st_other |= STO_MIPS_PIC;
2267 ++eSym;
2268 }
2269 }
2270}
2271
2272SymtabShndxSection::SymtabShndxSection()
2273 : SyntheticSection(0, SHT_SYMTAB_SHNDX, 4, ".symtab_shndx") {
2274 this->entsize = 4;
2275}
2276
2277void SymtabShndxSection::writeTo(uint8_t *buf) {
2278 // We write an array of 32 bit values, where each value has 1:1 association
2279 // with an entry in .symtab. If the corresponding entry contains SHN_XINDEX,
2280 // we need to write actual index, otherwise, we must write SHN_UNDEF(0).
2281 buf += 4; // Ignore .symtab[0] entry.
2282 for (const SymbolTableEntry &entry : in.symTab->getSymbols()) {
2283 if (!getCommonSec(entry.sym) && getSymSectionIndex(entry.sym) == SHN_XINDEX)
2284 write32(buf, entry.sym->getOutputSection()->sectionIndex);
2285 buf += 4;
2286 }
2287}
2288
2289bool SymtabShndxSection::isNeeded() const {
2290 // SHT_SYMTAB can hold symbols with section indices values up to
2291 // SHN_LORESERVE. If we need more, we want to use extension SHT_SYMTAB_SHNDX
2292 // section. Problem is that we reveal the final section indices a bit too
2293 // late, and we do not know them here. For simplicity, we just always create
2294 // a .symtab_shndx section when the amount of output sections is huge.
2295 size_t size = 0;
2296 for (SectionCommand *cmd : script->sectionCommands)
2297 if (isa<OutputDesc>(cmd))
2298 ++size;
2299 return size >= SHN_LORESERVE;
2300}
2301
2302void SymtabShndxSection::finalizeContents() {
2303 getParent()->link = in.symTab->getParent()->sectionIndex;
2304}
2305
2306size_t SymtabShndxSection::getSize() const {
2307 return in.symTab->getNumSymbols() * 4;
2308}
2309
2310// .hash and .gnu.hash sections contain on-disk hash tables that map
2311// symbol names to their dynamic symbol table indices. Their purpose
2312// is to help the dynamic linker resolve symbols quickly. If ELF files
2313// don't have them, the dynamic linker has to do linear search on all
2314// dynamic symbols, which makes programs slower. Therefore, a .hash
2315// section is added to a DSO by default.
2316//
2317// The Unix semantics of resolving dynamic symbols is somewhat expensive.
2318// Each ELF file has a list of DSOs that the ELF file depends on and a
2319// list of dynamic symbols that need to be resolved from any of the
2320// DSOs. That means resolving all dynamic symbols takes O(m)*O(n)
2321// where m is the number of DSOs and n is the number of dynamic
2322// symbols. For modern large programs, both m and n are large. So
2323// making each step faster by using hash tables substantially
2324// improves time to load programs.
2325//
2326// (Note that this is not the only way to design the shared library.
2327// For instance, the Windows DLL takes a different approach. On
2328// Windows, each dynamic symbol has a name of DLL from which the symbol
2329// has to be resolved. That makes the cost of symbol resolution O(n).
2330// This disables some hacky techniques you can use on Unix such as
2331// LD_PRELOAD, but this is arguably better semantics than the Unix ones.)
2332//
2333// Due to historical reasons, we have two different hash tables, .hash
2334// and .gnu.hash. They are for the same purpose, and .gnu.hash is a new
2335// and better version of .hash. .hash is just an on-disk hash table, but
2336// .gnu.hash has a bloom filter in addition to a hash table to skip
2337// DSOs very quickly. If you are sure that your dynamic linker knows
2338// about .gnu.hash, you want to specify --hash-style=gnu. Otherwise, a
2339// safe bet is to specify --hash-style=both for backward compatibility.
2340GnuHashTableSection::GnuHashTableSection()
2341 : SyntheticSection(SHF_ALLOC, SHT_GNU_HASH, config->wordsize, ".gnu.hash") {
2342}
2343
2344void GnuHashTableSection::finalizeContents() {
2345 if (OutputSection *sec = getPartition().dynSymTab->getParent())
2346 getParent()->link = sec->sectionIndex;
2347
2348 // Computes bloom filter size in word size. We want to allocate 12
2349 // bits for each symbol. It must be a power of two.
2350 if (symbols.empty()) {
2351 maskWords = 1;
2352 } else {
2353 uint64_t numBits = symbols.size() * 12;
2354 maskWords = NextPowerOf2(numBits / (config->wordsize * 8));
2355 }
2356
2357 size = 16; // Header
2358 size += config->wordsize * maskWords; // Bloom filter
2359 size += nBuckets * 4; // Hash buckets
2360 size += symbols.size() * 4; // Hash values
2361}
2362
2363void GnuHashTableSection::writeTo(uint8_t *buf) {
2364 // Write a header.
2365 write32(buf, nBuckets);
2366 write32(buf + 4, getPartition().dynSymTab->getNumSymbols() - symbols.size());
2367 write32(buf + 8, maskWords);
2368 write32(buf + 12, Shift2);
2369 buf += 16;
2370
2371 // Write the 2-bit bloom filter.
2372 const unsigned c = config->is64 ? 64 : 32;
2373 for (const Entry &sym : symbols) {
2374 // When C = 64, we choose a word with bits [6:...] and set 1 to two bits in
2375 // the word using bits [0:5] and [26:31].
2376 size_t i = (sym.hash / c) & (maskWords - 1);
2377 uint64_t val = readUint(buf + i * config->wordsize);
2378 val |= uint64_t(1) << (sym.hash % c);
2379 val |= uint64_t(1) << ((sym.hash >> Shift2) % c);
2380 writeUint(buf + i * config->wordsize, val);
2381 }
2382 buf += config->wordsize * maskWords;
2383
2384 // Write the hash table.
2385 uint32_t *buckets = reinterpret_cast<uint32_t *>(buf);
2386 uint32_t oldBucket = -1;
2387 uint32_t *values = buckets + nBuckets;
2388 for (auto i = symbols.begin(), e = symbols.end(); i != e; ++i) {
2389 // Write a hash value. It represents a sequence of chains that share the
2390 // same hash modulo value. The last element of each chain is terminated by
2391 // LSB 1.
2392 uint32_t hash = i->hash;
2393 bool isLastInChain = (i + 1) == e || i->bucketIdx != (i + 1)->bucketIdx;
2394 hash = isLastInChain ? hash | 1 : hash & ~1;
2395 write32(values++, hash);
2396
2397 if (i->bucketIdx == oldBucket)
2398 continue;
2399 // Write a hash bucket. Hash buckets contain indices in the following hash
2400 // value table.
2401 write32(buckets + i->bucketIdx,
2402 getPartition().dynSymTab->getSymbolIndex(i->sym));
2403 oldBucket = i->bucketIdx;
2404 }
2405}
2406
2407// Add symbols to this symbol hash table. Note that this function
2408// destructively sort a given vector -- which is needed because
2409// GNU-style hash table places some sorting requirements.
2410void GnuHashTableSection::addSymbols(SmallVectorImpl<SymbolTableEntry> &v) {
2411 // We cannot use 'auto' for Mid because GCC 6.1 cannot deduce
2412 // its type correctly.
2413 auto mid =
2414 std::stable_partition(v.begin(), v.end(), [&](const SymbolTableEntry &s) {
2415 return !s.sym->isDefined() || s.sym->partition != partition;
2416 });
2417
2418 // We chose load factor 4 for the on-disk hash table. For each hash
2419 // collision, the dynamic linker will compare a uint32_t hash value.
2420 // Since the integer comparison is quite fast, we believe we can
2421 // make the load factor even larger. 4 is just a conservative choice.
2422 //
2423 // Note that we don't want to create a zero-sized hash table because
2424 // Android loader as of 2018 doesn't like a .gnu.hash containing such
2425 // table. If that's the case, we create a hash table with one unused
2426 // dummy slot.
2427 nBuckets = std::max<size_t>((v.end() - mid) / 4, 1);
2428
2429 if (mid == v.end())
2430 return;
2431
2432 for (SymbolTableEntry &ent : llvm::make_range(mid, v.end())) {
2433 Symbol *b = ent.sym;
2434 uint32_t hash = hashGnu(b->getName());
2435 uint32_t bucketIdx = hash % nBuckets;
2436 symbols.push_back({b, ent.strTabOffset, hash, bucketIdx});
2437 }
2438
2439 llvm::sort(symbols, [](const Entry &l, const Entry &r) {
2440 return std::tie(l.bucketIdx, l.strTabOffset) <
2441 std::tie(r.bucketIdx, r.strTabOffset);
2442 });
2443
2444 v.erase(mid, v.end());
2445 for (const Entry &ent : symbols)
2446 v.push_back({ent.sym, ent.strTabOffset});
2447}
2448
2449HashTableSection::HashTableSection()
2450 : SyntheticSection(SHF_ALLOC, SHT_HASH, 4, ".hash") {
2451 this->entsize = 4;
2452}
2453
2454void HashTableSection::finalizeContents() {
2455 SymbolTableBaseSection *symTab = getPartition().dynSymTab.get();
2456
2457 if (OutputSection *sec = symTab->getParent())
2458 getParent()->link = sec->sectionIndex;
2459
2460 unsigned numEntries = 2; // nbucket and nchain.
2461 numEntries += symTab->getNumSymbols(); // The chain entries.
2462
2463 // Create as many buckets as there are symbols.
2464 numEntries += symTab->getNumSymbols();
2465 this->size = numEntries * 4;
2466}
2467
2468void HashTableSection::writeTo(uint8_t *buf) {
2469 SymbolTableBaseSection *symTab = getPartition().dynSymTab.get();
2470 unsigned numSymbols = symTab->getNumSymbols();
2471
2472 uint32_t *p = reinterpret_cast<uint32_t *>(buf);
2473 write32(p++, numSymbols); // nbucket
2474 write32(p++, numSymbols); // nchain
2475
2476 uint32_t *buckets = p;
2477 uint32_t *chains = p + numSymbols;
2478
2479 for (const SymbolTableEntry &s : symTab->getSymbols()) {
2480 Symbol *sym = s.sym;
2481 StringRef name = sym->getName();
2482 unsigned i = sym->dynsymIndex;
2483 uint32_t hash = hashSysV(name) % numSymbols;
2484 chains[i] = buckets[hash];
2485 write32(buckets + hash, i);
2486 }
2487}
2488
2489PltSection::PltSection()
2490 : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16, ".plt"),
2491 headerSize(target->pltHeaderSize) {
2492 // On PowerPC, this section contains lazy symbol resolvers.
2493 if (config->emachine == EM_PPC64) {
2494 name = ".glink";
2495 addralign = 4;
2496 }
2497
2498 // On x86 when IBT is enabled, this section contains the second PLT (lazy
2499 // symbol resolvers).
2500 if ((config->emachine == EM_386 || config->emachine == EM_X86_64) &&
2501 (config->andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT))
2502 name = ".plt.sec";
2503
2504 // The PLT needs to be writable on SPARC as the dynamic linker will
2505 // modify the instructions in the PLT entries.
2506 if (config->emachine == EM_SPARCV9)
2507 this->flags |= SHF_WRITE;
2508}
2509
2510void PltSection::writeTo(uint8_t *buf) {
2511 // At beginning of PLT, we have code to call the dynamic
2512 // linker to resolve dynsyms at runtime. Write such code.
2513 target->writePltHeader(buf);
2514 size_t off = headerSize;
2515
2516 for (const Symbol *sym : entries) {
2517 target->writePlt(buf + off, *sym, getVA() + off);
2518 off += target->pltEntrySize;
2519 }
2520}
2521
2522void PltSection::addEntry(Symbol &sym) {
2523 assert(sym.auxIdx == symAux.size() - 1)(static_cast <bool> (sym.auxIdx == symAux.size() - 1) ?
void (0) : __assert_fail ("sym.auxIdx == symAux.size() - 1",
"lld/ELF/SyntheticSections.cpp", 2523, __extension__ __PRETTY_FUNCTION__
))
;
2524 symAux.back().pltIdx = entries.size();
2525 entries.push_back(&sym);
2526}
2527
2528size_t PltSection::getSize() const {
2529 return headerSize + entries.size() * target->pltEntrySize;
2530}
2531
2532bool PltSection::isNeeded() const {
2533 // For -z retpolineplt, .iplt needs the .plt header.
2534 return !entries.empty() || (config->zRetpolineplt && in.iplt->isNeeded());
2535}
2536
2537// Used by ARM to add mapping symbols in the PLT section, which aid
2538// disassembly.
2539void PltSection::addSymbols() {
2540 target->addPltHeaderSymbols(*this);
2541
2542 size_t off = headerSize;
2543 for (size_t i = 0; i < entries.size(); ++i) {
2544 target->addPltSymbols(*this, off);
2545 off += target->pltEntrySize;
2546 }
2547}
2548
2549IpltSection::IpltSection()
2550 : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16, ".iplt") {
2551 if (config->emachine == EM_PPC || config->emachine == EM_PPC64) {
2552 name = ".glink";
2553 addralign = 4;
2554 }
2555}
2556
2557void IpltSection::writeTo(uint8_t *buf) {
2558 uint32_t off = 0;
2559 for (const Symbol *sym : entries) {
2560 target->writeIplt(buf + off, *sym, getVA() + off);
2561 off += target->ipltEntrySize;
2562 }
2563}
2564
2565size_t IpltSection::getSize() const {
2566 return entries.size() * target->ipltEntrySize;
2567}
2568
2569void IpltSection::addEntry(Symbol &sym) {
2570 assert(sym.auxIdx == symAux.size() - 1)(static_cast <bool> (sym.auxIdx == symAux.size() - 1) ?
void (0) : __assert_fail ("sym.auxIdx == symAux.size() - 1",
"lld/ELF/SyntheticSections.cpp", 2570, __extension__ __PRETTY_FUNCTION__
))
;
2571 symAux.back().pltIdx = entries.size();
2572 entries.push_back(&sym);
2573}
2574
2575// ARM uses mapping symbols to aid disassembly.
2576void IpltSection::addSymbols() {
2577 size_t off = 0;
2578 for (size_t i = 0, e = entries.size(); i != e; ++i) {
2579 target->addPltSymbols(*this, off);
2580 off += target->pltEntrySize;
2581 }
2582}
2583
2584PPC32GlinkSection::PPC32GlinkSection() {
2585 name = ".glink";
2586 addralign = 4;
2587}
2588
2589void PPC32GlinkSection::writeTo(uint8_t *buf) {
2590 writePPC32GlinkSection(buf, entries.size());
2591}
2592
2593size_t PPC32GlinkSection::getSize() const {
2594 return headerSize + entries.size() * target->pltEntrySize + footerSize;
2595}
2596
2597// This is an x86-only extra PLT section and used only when a security
2598// enhancement feature called CET is enabled. In this comment, I'll explain what
2599// the feature is and why we have two PLT sections if CET is enabled.
2600//
2601// So, what does CET do? CET introduces a new restriction to indirect jump
2602// instructions. CET works this way. Assume that CET is enabled. Then, if you
2603// execute an indirect jump instruction, the processor verifies that a special
2604// "landing pad" instruction (which is actually a repurposed NOP instruction and
2605// now called "endbr32" or "endbr64") is at the jump target. If the jump target
2606// does not start with that instruction, the processor raises an exception
2607// instead of continuing executing code.
2608//
2609// If CET is enabled, the compiler emits endbr to all locations where indirect
2610// jumps may jump to.
2611//
2612// This mechanism makes it extremely hard to transfer the control to a middle of
2613// a function that is not supporsed to be a indirect jump target, preventing
2614// certain types of attacks such as ROP or JOP.
2615//
2616// Note that the processors in the market as of 2019 don't actually support the
2617// feature. Only the spec is available at the moment.
2618//
2619// Now, I'll explain why we have this extra PLT section for CET.
2620//
2621// Since you can indirectly jump to a PLT entry, we have to make PLT entries
2622// start with endbr. The problem is there's no extra space for endbr (which is 4
2623// bytes long), as the PLT entry is only 16 bytes long and all bytes are already
2624// used.
2625//
2626// In order to deal with the issue, we split a PLT entry into two PLT entries.
2627// Remember that each PLT entry contains code to jump to an address read from
2628// .got.plt AND code to resolve a dynamic symbol lazily. With the 2-PLT scheme,
2629// the former code is written to .plt.sec, and the latter code is written to
2630// .plt.
2631//
2632// Lazy symbol resolution in the 2-PLT scheme works in the usual way, except
2633// that the regular .plt is now called .plt.sec and .plt is repurposed to
2634// contain only code for lazy symbol resolution.
2635//
2636// In other words, this is how the 2-PLT scheme works. Application code is
2637// supposed to jump to .plt.sec to call an external function. Each .plt.sec
2638// entry contains code to read an address from a corresponding .got.plt entry
2639// and jump to that address. Addresses in .got.plt initially point to .plt, so
2640// when an application calls an external function for the first time, the
2641// control is transferred to a function that resolves a symbol name from
2642// external shared object files. That function then rewrites a .got.plt entry
2643// with a resolved address, so that the subsequent function calls directly jump
2644// to a desired location from .plt.sec.
2645//
2646// There is an open question as to whether the 2-PLT scheme was desirable or
2647// not. We could have simply extended the PLT entry size to 32-bytes to
2648// accommodate endbr, and that scheme would have been much simpler than the
2649// 2-PLT scheme. One reason to split PLT was, by doing that, we could keep hot
2650// code (.plt.sec) from cold code (.plt). But as far as I know no one proved
2651// that the optimization actually makes a difference.
2652//
2653// That said, the 2-PLT scheme is a part of the ABI, debuggers and other tools
2654// depend on it, so we implement the ABI.
2655IBTPltSection::IBTPltSection()
2656 : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16, ".plt") {}
2657
2658void IBTPltSection::writeTo(uint8_t *buf) {
2659 target->writeIBTPlt(buf, in.plt->getNumEntries());
2660}
2661
2662size_t IBTPltSection::getSize() const {
2663 // 16 is the header size of .plt.
2664 return 16 + in.plt->getNumEntries() * target->pltEntrySize;
2665}
2666
2667bool IBTPltSection::isNeeded() const { return in.plt->getNumEntries() > 0; }
2668
2669// The string hash function for .gdb_index.
2670static uint32_t computeGdbHash(StringRef s) {
2671 uint32_t h = 0;
2672 for (uint8_t c : s)
2673 h = h * 67 + toLower(c) - 113;
2674 return h;
2675}
2676
2677GdbIndexSection::GdbIndexSection()
2678 : SyntheticSection(0, SHT_PROGBITS, 1, ".gdb_index") {}
2679
2680// Returns the desired size of an on-disk hash table for a .gdb_index section.
2681// There's a tradeoff between size and collision rate. We aim 75% utilization.
2682size_t GdbIndexSection::computeSymtabSize() const {
2683 return std::max<size_t>(NextPowerOf2(symbols.size() * 4 / 3), 1024);
2684}
2685
2686static SmallVector<GdbIndexSection::CuEntry, 0>
2687readCuList(DWARFContext &dwarf) {
2688 SmallVector<GdbIndexSection::CuEntry, 0> ret;
2689 for (std::unique_ptr<DWARFUnit> &cu : dwarf.compile_units())
2690 ret.push_back({cu->getOffset(), cu->getLength() + 4});
2691 return ret;
2692}
2693
2694static SmallVector<GdbIndexSection::AddressEntry, 0>
2695readAddressAreas(DWARFContext &dwarf, InputSection *sec) {
2696 SmallVector<GdbIndexSection::AddressEntry, 0> ret;
2697
2698 uint32_t cuIdx = 0;
2699 for (std::unique_ptr<DWARFUnit> &cu : dwarf.compile_units()) {
2700 if (Error e = cu->tryExtractDIEsIfNeeded(false)) {
2701 warn(toString(sec) + ": " + toString(std::move(e)));
2702 return {};
2703 }
2704 Expected<DWARFAddressRangesVector> ranges = cu->collectAddressRanges();
2705 if (!ranges) {
2706 warn(toString(sec) + ": " + toString(ranges.takeError()));
2707 return {};
2708 }
2709
2710 ArrayRef<InputSectionBase *> sections = sec->file->getSections();
2711 for (DWARFAddressRange &r : *ranges) {
2712 if (r.SectionIndex == -1ULL)
2713 continue;
2714 // Range list with zero size has no effect.
2715 InputSectionBase *s = sections[r.SectionIndex];
2716 if (s && s != &InputSection::discarded && s->isLive())
2717 if (r.LowPC != r.HighPC)
2718 ret.push_back({cast<InputSection>(s), r.LowPC, r.HighPC, cuIdx});
2719 }
2720 ++cuIdx;
2721 }
2722
2723 return ret;
2724}
2725
2726template <class ELFT>
2727static SmallVector<GdbIndexSection::NameAttrEntry, 0>
2728readPubNamesAndTypes(const LLDDwarfObj<ELFT> &obj,
2729 const SmallVectorImpl<GdbIndexSection::CuEntry> &cus) {
2730 const LLDDWARFSection &pubNames = obj.getGnuPubnamesSection();
2731 const LLDDWARFSection &pubTypes = obj.getGnuPubtypesSection();
2732
2733 SmallVector<GdbIndexSection::NameAttrEntry, 0> ret;
2734 for (const LLDDWARFSection *pub : {&pubNames, &pubTypes}) {
2735 DWARFDataExtractor data(obj, *pub, config->isLE, config->wordsize);
2736 DWARFDebugPubTable table;
2737 table.extract(data, /*GnuStyle=*/true, [&](Error e) {
2738 warn(toString(pub->sec) + ": " + toString(std::move(e)));
2739 });
2740 for (const DWARFDebugPubTable::Set &set : table.getData()) {
2741 // The value written into the constant pool is kind << 24 | cuIndex. As we
2742 // don't know how many compilation units precede this object to compute
2743 // cuIndex, we compute (kind << 24 | cuIndexInThisObject) instead, and add
2744 // the number of preceding compilation units later.
2745 uint32_t i = llvm::partition_point(cus,
2746 [&](GdbIndexSection::CuEntry cu) {
2747 return cu.cuOffset < set.Offset;
2748 }) -
2749 cus.begin();
2750 for (const DWARFDebugPubTable::Entry &ent : set.Entries)
2751 ret.push_back({{ent.Name, computeGdbHash(ent.Name)},
2752 (ent.Descriptor.toBits() << 24) | i});
2753 }
2754 }
2755 return ret;
2756}
2757
2758// Create a list of symbols from a given list of symbol names and types
2759// by uniquifying them by name.
2760static std::pair<SmallVector<GdbIndexSection::GdbSymbol, 0>, size_t>
2761createSymbols(
2762 ArrayRef<SmallVector<GdbIndexSection::NameAttrEntry, 0>> nameAttrs,
2763 const SmallVector<GdbIndexSection::GdbChunk, 0> &chunks) {
2764 using GdbSymbol = GdbIndexSection::GdbSymbol;
2765 using NameAttrEntry = GdbIndexSection::NameAttrEntry;
2766
2767 // For each chunk, compute the number of compilation units preceding it.
2768 uint32_t cuIdx = 0;
2769 std::unique_ptr<uint32_t[]> cuIdxs(new uint32_t[chunks.size()]);
2770 for (uint32_t i = 0, e = chunks.size(); i != e; ++i) {
2771 cuIdxs[i] = cuIdx;
2772 cuIdx += chunks[i].compilationUnits.size();
2773 }
2774
2775 // The number of symbols we will handle in this function is of the order
2776 // of millions for very large executables, so we use multi-threading to
2777 // speed it up.
2778 constexpr size_t numShards = 32;
2779 const size_t concurrency =
2780 llvm::bit_floor(std::min<size_t>(config->threadCount, numShards));
2781
2782 // A sharded map to uniquify symbols by name.
2783 auto map =
2784 std::make_unique<DenseMap<CachedHashStringRef, size_t>[]>(numShards);
2785 size_t shift = 32 - llvm::countr_zero(numShards);
2786
2787 // Instantiate GdbSymbols while uniqufying them by name.
2788 auto symbols = std::make_unique<SmallVector<GdbSymbol, 0>[]>(numShards);
2789
2790 parallelFor(0, concurrency, [&](size_t threadId) {
2791 uint32_t i = 0;
2792 for (ArrayRef<NameAttrEntry> entries : nameAttrs) {
2793 for (const NameAttrEntry &ent : entries) {
2794 size_t shardId = ent.name.hash() >> shift;
2795 if ((shardId & (concurrency - 1)) != threadId)
2796 continue;
2797
2798 uint32_t v = ent.cuIndexAndAttrs + cuIdxs[i];
2799 size_t &idx = map[shardId][ent.name];
2800 if (idx) {
2801 symbols[shardId][idx - 1].cuVector.push_back(v);
2802 continue;
2803 }
2804
2805 idx = symbols[shardId].size() + 1;
2806 symbols[shardId].push_back({ent.name, {v}, 0, 0});
2807 }
2808 ++i;
2809 }
2810 });
2811
2812 size_t numSymbols = 0;
2813 for (ArrayRef<GdbSymbol> v : ArrayRef(symbols.get(), numShards))
2814 numSymbols += v.size();
2815
2816 // The return type is a flattened vector, so we'll copy each vector
2817 // contents to Ret.
2818 SmallVector<GdbSymbol, 0> ret;
2819 ret.reserve(numSymbols);
2820 for (SmallVector<GdbSymbol, 0> &vec :
2821 MutableArrayRef(symbols.get(), numShards))
2822 for (GdbSymbol &sym : vec)
2823 ret.push_back(std::move(sym));
2824
2825 // CU vectors and symbol names are adjacent in the output file.
2826 // We can compute their offsets in the output file now.
2827 size_t off = 0;
2828 for (GdbSymbol &sym : ret) {
2829 sym.cuVectorOff = off;
2830 off += (sym.cuVector.size() + 1) * 4;
2831 }
2832 for (GdbSymbol &sym : ret) {
2833 sym.nameOff = off;
2834 off += sym.name.size() + 1;
2835 }
2836 // If off overflows, the last symbol's nameOff likely overflows.
2837 if (!isUInt<32>(off))
2838 errorOrWarn("--gdb-index: constant pool size (" + Twine(off) +
2839 ") exceeds UINT32_MAX");
2840
2841 return {ret, off};
2842}
2843
2844// Returns a newly-created .gdb_index section.
2845template <class ELFT> GdbIndexSection *GdbIndexSection::create() {
2846 llvm::TimeTraceScope timeScope("Create gdb index");
2847
2848 // Collect InputFiles with .debug_info. See the comment in
2849 // LLDDwarfObj<ELFT>::LLDDwarfObj. If we do lightweight parsing in the future,
2850 // note that isec->data() may uncompress the full content, which should be
2851 // parallelized.
2852 SetVector<InputFile *> files;
2853 for (InputSectionBase *s : ctx.inputSections) {
2854 InputSection *isec = dyn_cast<InputSection>(s);
2855 if (!isec)
2856 continue;
2857 // .debug_gnu_pub{names,types} are useless in executables.
2858 // They are present in input object files solely for creating
2859 // a .gdb_index. So we can remove them from the output.
2860 if (s->name == ".debug_gnu_pubnames" || s->name == ".debug_gnu_pubtypes")
2861 s->markDead();
2862 else if (isec->name == ".debug_info")
2863 files.insert(isec->file);
2864 }
2865 // Drop .rel[a].debug_gnu_pub{names,types} for --emit-relocs.
2866 llvm::erase_if(ctx.inputSections, [](InputSectionBase *s) {
2867 if (auto *isec = dyn_cast<InputSection>(s))
2868 if (InputSectionBase *rel = isec->getRelocatedSection())
2869 return !rel->isLive();
2870 return !s->isLive();
2871 });
2872
2873 SmallVector<GdbChunk, 0> chunks(files.size());
2874 SmallVector<SmallVector<NameAttrEntry, 0>, 0> nameAttrs(files.size());
2875
2876 parallelFor(0, files.size(), [&](size_t i) {
2877 // To keep memory usage low, we don't want to keep cached DWARFContext, so
2878 // avoid getDwarf() here.
2879 ObjFile<ELFT> *file = cast<ObjFile<ELFT>>(files[i]);
2880 DWARFContext dwarf(std::make_unique<LLDDwarfObj<ELFT>>(file));
2881 auto &dobj = static_cast<const LLDDwarfObj<ELFT> &>(dwarf.getDWARFObj());
2882
2883 // If the are multiple compile units .debug_info (very rare ld -r --unique),
2884 // this only picks the last one. Other address ranges are lost.
2885 chunks[i].sec = dobj.getInfoSection();
2886 chunks[i].compilationUnits = readCuList(dwarf);
2887 chunks[i].addressAreas = readAddressAreas(dwarf, chunks[i].sec);
2888 nameAttrs[i] = readPubNamesAndTypes<ELFT>(dobj, chunks[i].compilationUnits);
2889 });
2890
2891 auto *ret = make<GdbIndexSection>();
2892 ret->chunks = std::move(chunks);
2893 std::tie(ret->symbols, ret->size) = createSymbols(nameAttrs, ret->chunks);
2894
2895 // Count the areas other than the constant pool.
2896 ret->size += sizeof(GdbIndexHeader) + ret->computeSymtabSize() * 8;
2897 for (GdbChunk &chunk : ret->chunks)
2898 ret->size +=
2899 chunk.compilationUnits.size() * 16 + chunk.addressAreas.size() * 20;
2900
2901 return ret;
2902}
2903
2904void GdbIndexSection::writeTo(uint8_t *buf) {
2905 // Write the header.
2906 auto *hdr = reinterpret_cast<GdbIndexHeader *>(buf);
2907 uint8_t *start = buf;
2908 hdr->version = 7;
2909 buf += sizeof(*hdr);
2910
2911 // Write the CU list.
2912 hdr->cuListOff = buf - start;
2913 for (GdbChunk &chunk : chunks) {
2914 for (CuEntry &cu : chunk.compilationUnits) {
2915 write64le(buf, chunk.sec->outSecOff + cu.cuOffset);
2916 write64le(buf + 8, cu.cuLength);
2917 buf += 16;
2918 }
2919 }
2920
2921 // Write the address area.
2922 hdr->cuTypesOff = buf - start;
2923 hdr->addressAreaOff = buf - start;
2924 uint32_t cuOff = 0;
2925 for (GdbChunk &chunk : chunks) {
2926 for (AddressEntry &e : chunk.addressAreas) {
2927 // In the case of ICF there may be duplicate address range entries.
2928 const uint64_t baseAddr = e.section->repl->getVA(0);
2929 write64le(buf, baseAddr + e.lowAddress);
2930 write64le(buf + 8, baseAddr + e.highAddress);
2931 write32le(buf + 16, e.cuIndex + cuOff);
2932 buf += 20;
2933 }
2934 cuOff += chunk.compilationUnits.size();
2935 }
2936
2937 // Write the on-disk open-addressing hash table containing symbols.
2938 hdr->symtabOff = buf - start;
2939 size_t symtabSize = computeSymtabSize();
2940 uint32_t mask = symtabSize - 1;
2941
2942 for (GdbSymbol &sym : symbols) {
2943 uint32_t h = sym.name.hash();
2944 uint32_t i = h & mask;
2945 uint32_t step = ((h * 17) & mask) | 1;
2946
2947 while (read32le(buf + i * 8))
2948 i = (i + step) & mask;
2949
2950 write32le(buf + i * 8, sym.nameOff);
2951 write32le(buf + i * 8 + 4, sym.cuVectorOff);
2952 }
2953
2954 buf += symtabSize * 8;
2955
2956 // Write the string pool.
2957 hdr->constantPoolOff = buf - start;
2958 parallelForEach(symbols, [&](GdbSymbol &sym) {
2959 memcpy(buf + sym.nameOff, sym.name.data(), sym.name.size());
2960 });
2961
2962 // Write the CU vectors.
2963 for (GdbSymbol &sym : symbols) {
2964 write32le(buf, sym.cuVector.size());
2965 buf += 4;
2966 for (uint32_t val : sym.cuVector) {
2967 write32le(buf, val);
2968 buf += 4;
2969 }
2970 }
2971}
2972
2973bool GdbIndexSection::isNeeded() const { return !chunks.empty(); }
2974
2975EhFrameHeader::EhFrameHeader()
2976 : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 4, ".eh_frame_hdr") {}
2977
2978void EhFrameHeader::writeTo(uint8_t *buf) {
2979 // Unlike most sections, the EhFrameHeader section is written while writing
2980 // another section, namely EhFrameSection, which calls the write() function
2981 // below from its writeTo() function. This is necessary because the contents
2982 // of EhFrameHeader depend on the relocated contents of EhFrameSection and we
2983 // don't know which order the sections will be written in.
2984}
2985
2986// .eh_frame_hdr contains a binary search table of pointers to FDEs.
2987// Each entry of the search table consists of two values,
2988// the starting PC from where FDEs covers, and the FDE's address.
2989// It is sorted by PC.
2990void EhFrameHeader::write() {
2991 uint8_t *buf = Out::bufferStart + getParent()->offset + outSecOff;
2992 using FdeData = EhFrameSection::FdeData;
2993 SmallVector<FdeData, 0> fdes = getPartition().ehFrame->getFdeData();
2994
2995 buf[0] = 1;
2996 buf[1] = DW_EH_PE_pcrel | DW_EH_PE_sdata4;
2997 buf[2] = DW_EH_PE_udata4;
2998 buf[3] = DW_EH_PE_datarel | DW_EH_PE_sdata4;
2999 write32(buf + 4,
3000 getPartition().ehFrame->getParent()->addr - this->getVA() - 4);
3001 write32(buf + 8, fdes.size());
3002 buf += 12;
3003
3004 for (FdeData &fde : fdes) {
3005 write32(buf, fde.pcRel);
3006 write32(buf + 4, fde.fdeVARel);
3007 buf += 8;
3008 }
3009}
3010
3011size_t EhFrameHeader::getSize() const {
3012 // .eh_frame_hdr has a 12 bytes header followed by an array of FDEs.
3013 return 12 + getPartition().ehFrame->numFdes * 8;
3014}
3015
3016bool EhFrameHeader::isNeeded() const {
3017 return isLive() && getPartition().ehFrame->isNeeded();
3018}
3019
3020VersionDefinitionSection::VersionDefinitionSection()
3021 : SyntheticSection(SHF_ALLOC, SHT_GNU_verdef, sizeof(uint32_t),
3022 ".gnu.version_d") {}
3023
3024StringRef VersionDefinitionSection::getFileDefName() {
3025 if (!getPartition().name.empty())
3026 return getPartition().name;
3027 if (!config->soName.empty())
3028 return config->soName;
3029 return config->outputFile;
3030}
3031
3032void VersionDefinitionSection::finalizeContents() {
3033 fileDefNameOff = getPartition().dynStrTab->addString(getFileDefName());
3034 for (const VersionDefinition &v : namedVersionDefs())
3035 verDefNameOffs.push_back(getPartition().dynStrTab->addString(v.name));
3036
3037 if (OutputSection *sec = getPartition().dynStrTab->getParent())
3038 getParent()->link = sec->sectionIndex;
3039
3040 // sh_info should be set to the number of definitions. This fact is missed in
3041 // documentation, but confirmed by binutils community:
3042 // https://sourceware.org/ml/binutils/2014-11/msg00355.html
3043 getParent()->info = getVerDefNum();
3044}
3045
3046void VersionDefinitionSection::writeOne(uint8_t *buf, uint32_t index,
3047 StringRef name, size_t nameOff) {
3048 uint16_t flags = index == 1 ? VER_FLG_BASE : 0;
3049
3050 // Write a verdef.
3051 write16(buf, 1); // vd_version
3052 write16(buf + 2, flags); // vd_flags
3053 write16(buf + 4, index); // vd_ndx
3054 write16(buf + 6, 1); // vd_cnt
3055 write32(buf + 8, hashSysV(name)); // vd_hash
3056 write32(buf + 12, 20); // vd_aux
3057 write32(buf + 16, 28); // vd_next
3058
3059 // Write a veraux.
3060 write32(buf + 20, nameOff); // vda_name
3061 write32(buf + 24, 0); // vda_next
3062}
3063
3064void VersionDefinitionSection::writeTo(uint8_t *buf) {
3065 writeOne(buf, 1, getFileDefName(), fileDefNameOff);
3066
3067 auto nameOffIt = verDefNameOffs.begin();
3068 for (const VersionDefinition &v : namedVersionDefs()) {
3069 buf += EntrySize;
3070 writeOne(buf, v.id, v.name, *nameOffIt++);
3071 }
3072
3073 // Need to terminate the last version definition.
3074 write32(buf + 16, 0); // vd_next
3075}
3076
3077size_t VersionDefinitionSection::getSize() const {
3078 return EntrySize * getVerDefNum();
3079}
3080
3081// .gnu.version is a table where each entry is 2 byte long.
3082VersionTableSection::VersionTableSection()
3083 : SyntheticSection(SHF_ALLOC, SHT_GNU_versym, sizeof(uint16_t),
3084 ".gnu.version") {
3085 this->entsize = 2;
3086}
3087
3088void VersionTableSection::finalizeContents() {
3089 // At the moment of june 2016 GNU docs does not mention that sh_link field
3090 // should be set, but Sun docs do. Also readelf relies on this field.
3091 getParent()->link = getPartition().dynSymTab->getParent()->sectionIndex;
3092}
3093
3094size_t VersionTableSection::getSize() const {
3095 return (getPartition().dynSymTab->getSymbols().size() + 1) * 2;
3096}
3097
3098void VersionTableSection::writeTo(uint8_t *buf) {
3099 buf += 2;
3100 for (const SymbolTableEntry &s : getPartition().dynSymTab->getSymbols()) {
3101 // For an unextracted lazy symbol (undefined weak), it must have been
3102 // converted to Undefined and have VER_NDX_GLOBAL version here.
3103 assert(!s.sym->isLazy())(static_cast <bool> (!s.sym->isLazy()) ? void (0) : __assert_fail
("!s.sym->isLazy()", "lld/ELF/SyntheticSections.cpp", 3103
, __extension__ __PRETTY_FUNCTION__))
;
3104 write16(buf, s.sym->versionId);
3105 buf += 2;
3106 }
3107}
3108
3109bool VersionTableSection::isNeeded() const {
3110 return isLive() &&
3111 (getPartition().verDef || getPartition().verNeed->isNeeded());
3112}
3113
3114void elf::addVerneed(Symbol *ss) {
3115 auto &file = cast<SharedFile>(*ss->file);
3116 if (ss->verdefIndex == VER_NDX_GLOBAL) {
3117 ss->versionId = VER_NDX_GLOBAL;
3118 return;
3119 }
3120
3121 if (file.vernauxs.empty())
3122 file.vernauxs.resize(file.verdefs.size());
3123
3124 // Select a version identifier for the vernaux data structure, if we haven't
3125 // already allocated one. The verdef identifiers cover the range
3126 // [1..getVerDefNum()]; this causes the vernaux identifiers to start from
3127 // getVerDefNum()+1.
3128 if (file.vernauxs[ss->verdefIndex] == 0)
3129 file.vernauxs[ss->verdefIndex] = ++SharedFile::vernauxNum + getVerDefNum();
3130
3131 ss->versionId = file.vernauxs[ss->verdefIndex];
3132}
3133
3134template <class ELFT>
3135VersionNeedSection<ELFT>::VersionNeedSection()
3136 : SyntheticSection(SHF_ALLOC, SHT_GNU_verneed, sizeof(uint32_t),
3137 ".gnu.version_r") {}
3138
3139template <class ELFT> void VersionNeedSection<ELFT>::finalizeContents() {
3140 for (SharedFile *f : ctx.sharedFiles) {
3141 if (f->vernauxs.empty())
3142 continue;
3143 verneeds.emplace_back();
3144 Verneed &vn = verneeds.back();
3145 vn.nameStrTab = getPartition().dynStrTab->addString(f->soName);
3146 bool isLibc = config->relrGlibc && f->soName.startswith("libc.so.");
3147 bool isGlibc2 = false;
3148 for (unsigned i = 0; i != f->vernauxs.size(); ++i) {
3149 if (f->vernauxs[i] == 0)
3150 continue;
3151 auto *verdef =
3152 reinterpret_cast<const typename ELFT::Verdef *>(f->verdefs[i]);
3153 StringRef ver(f->getStringTable().data() + verdef->getAux()->vda_name);
3154 if (isLibc && ver.startswith("GLIBC_2."))
3155 isGlibc2 = true;
3156 vn.vernauxs.push_back({verdef->vd_hash, f->vernauxs[i],
3157 getPartition().dynStrTab->addString(ver)});
3158 }
3159 if (isGlibc2) {
3160 const char *ver = "GLIBC_ABI_DT_RELR";
3161 vn.vernauxs.push_back({hashSysV(ver),
3162 ++SharedFile::vernauxNum + getVerDefNum(),
3163 getPartition().dynStrTab->addString(ver)});
3164 }
3165 }
3166
3167 if (OutputSection *sec = getPartition().dynStrTab->getParent())
3168 getParent()->link = sec->sectionIndex;
3169 getParent()->info = verneeds.size();
3170}
3171
3172template <class ELFT> void VersionNeedSection<ELFT>::writeTo(uint8_t *buf) {
3173 // The Elf_Verneeds need to appear first, followed by the Elf_Vernauxs.
3174 auto *verneed = reinterpret_cast<Elf_Verneed *>(buf);
3175 auto *vernaux = reinterpret_cast<Elf_Vernaux *>(verneed + verneeds.size());
3176
3177 for (auto &vn : verneeds) {
3178 // Create an Elf_Verneed for this DSO.
3179 verneed->vn_version = 1;
3180 verneed->vn_cnt = vn.vernauxs.size();
3181 verneed->vn_file = vn.nameStrTab;
3182 verneed->vn_aux =
3183 reinterpret_cast<char *>(vernaux) - reinterpret_cast<char *>(verneed);
3184 verneed->vn_next = sizeof(Elf_Verneed);
3185 ++verneed;
3186
3187 // Create the Elf_Vernauxs for this Elf_Verneed.
3188 for (auto &vna : vn.vernauxs) {
3189 vernaux->vna_hash = vna.hash;
3190 vernaux->vna_flags = 0;
3191 vernaux->vna_other = vna.verneedIndex;
3192 vernaux->vna_name = vna.nameStrTab;
3193 vernaux->vna_next = sizeof(Elf_Vernaux);
3194 ++vernaux;
3195 }
3196
3197 vernaux[-1].vna_next = 0;
3198 }
3199 verneed[-1].vn_next = 0;
3200}
3201
3202template <class ELFT> size_t VersionNeedSection<ELFT>::getSize() const {
3203 return verneeds.size() * sizeof(Elf_Verneed) +
3204 SharedFile::vernauxNum * sizeof(Elf_Vernaux);
3205}
3206
3207template <class ELFT> bool VersionNeedSection<ELFT>::isNeeded() const {
3208 return isLive() && SharedFile::vernauxNum != 0;
3209}
3210
3211void MergeSyntheticSection::addSection(MergeInputSection *ms) {
3212 ms->parent = this;
3213 sections.push_back(ms);
3214 assert(addralign == ms->addralign || !(ms->flags & SHF_STRINGS))(static_cast <bool> (addralign == ms->addralign || !
(ms->flags & SHF_STRINGS)) ? void (0) : __assert_fail (
"addralign == ms->addralign || !(ms->flags & SHF_STRINGS)"
, "lld/ELF/SyntheticSections.cpp", 3214, __extension__ __PRETTY_FUNCTION__
))
;
3215 addralign = std::max(addralign, ms->addralign);
3216}
3217
3218MergeTailSection::MergeTailSection(StringRef name, uint32_t type,
3219 uint64_t flags, uint32_t alignment)
3220 : MergeSyntheticSection(name, type, flags, alignment),
3221 builder(StringTableBuilder::RAW, llvm::Align(alignment)) {}
3222
3223size_t MergeTailSection::getSize() const { return builder.getSize(); }
3224
3225void MergeTailSection::writeTo(uint8_t *buf) { builder.write(buf); }
3226
3227void MergeTailSection::finalizeContents() {
3228 // Add all string pieces to the string table builder to create section
3229 // contents.
3230 for (MergeInputSection *sec : sections)
3231 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
3232 if (sec->pieces[i].live)
3233 builder.add(sec->getData(i));
3234
3235 // Fix the string table content. After this, the contents will never change.
3236 builder.finalize();
3237
3238 // finalize() fixed tail-optimized strings, so we can now get
3239 // offsets of strings. Get an offset for each string and save it
3240 // to a corresponding SectionPiece for easy access.
3241 for (MergeInputSection *sec : sections)
3242 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
3243 if (sec->pieces[i].live)
3244 sec->pieces[i].outputOff = builder.getOffset(sec->getData(i));
3245}
3246
3247void MergeNoTailSection::writeTo(uint8_t *buf) {
3248 parallelFor(0, numShards,
3249 [&](size_t i) { shards[i].write(buf + shardOffsets[i]); });
3250}
3251
3252// This function is very hot (i.e. it can take several seconds to finish)
3253// because sometimes the number of inputs is in an order of magnitude of
3254// millions. So, we use multi-threading.
3255//
3256// For any strings S and T, we know S is not mergeable with T if S's hash
3257// value is different from T's. If that's the case, we can safely put S and
3258// T into different string builders without worrying about merge misses.
3259// We do it in parallel.
3260void MergeNoTailSection::finalizeContents() {
3261 // Initializes string table builders.
3262 for (size_t i = 0; i < numShards; ++i)
3263 shards.emplace_back(StringTableBuilder::RAW, llvm::Align(addralign));
3264
3265 // Concurrency level. Must be a power of 2 to avoid expensive modulo
3266 // operations in the following tight loop.
3267 const size_t concurrency =
3268 llvm::bit_floor(std::min<size_t>(config->threadCount, numShards));
3269
3270 // Add section pieces to the builders.
3271 parallelFor(0, concurrency, [&](size_t threadId) {
3272 for (MergeInputSection *sec : sections) {
3273 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i) {
3274 if (!sec->pieces[i].live)
3275 continue;
3276 size_t shardId = getShardId(sec->pieces[i].hash);
3277 if ((shardId & (concurrency - 1)) == threadId)
3278 sec->pieces[i].outputOff = shards[shardId].add(sec->getData(i));
3279 }
3280 }
3281 });
3282
3283 // Compute an in-section offset for each shard.
3284 size_t off = 0;
3285 for (size_t i = 0; i < numShards; ++i) {
3286 shards[i].finalizeInOrder();
3287 if (shards[i].getSize() > 0)
3288 off = alignToPowerOf2(off, addralign);
3289 shardOffsets[i] = off;
3290 off += shards[i].getSize();
3291 }
3292 size = off;
3293
3294 // So far, section pieces have offsets from beginning of shards, but
3295 // we want offsets from beginning of the whole section. Fix them.
3296 parallelForEach(sections, [&](MergeInputSection *sec) {
3297 for (size_t i = 0, e = sec->pieces.size(); i != e; ++i)
3298 if (sec->pieces[i].live)
3299 sec->pieces[i].outputOff +=
3300 shardOffsets[getShardId(sec->pieces[i].hash)];
3301 });
3302}
3303
3304template <class ELFT> void elf::splitSections() {
3305 llvm::TimeTraceScope timeScope("Split sections");
3306 // splitIntoPieces needs to be called on each MergeInputSection
3307 // before calling finalizeContents().
3308 parallelForEach(ctx.objectFiles, [](ELFFileBase *file) {
3309 for (InputSectionBase *sec : file->getSections()) {
3310 if (!sec)
3311 continue;
3312 if (auto *s = dyn_cast<MergeInputSection>(sec))
3313 s->splitIntoPieces();
3314 else if (auto *eh = dyn_cast<EhInputSection>(sec))
3315 eh->split<ELFT>();
3316 }
3317 });
3318}
3319
3320void elf::combineEhSections() {
3321 llvm::TimeTraceScope timeScope("Combine EH sections");
3322 for (EhInputSection *sec : ctx.ehInputSections) {
3323 EhFrameSection &eh = *sec->getPartition().ehFrame;
3324 sec->parent = &eh;
3325 eh.addralign = std::max(eh.addralign, sec->addralign);
3326 eh.sections.push_back(sec);
3327 llvm::append_range(eh.dependentSections, sec->dependentSections);
3328 }
3329
3330 if (!mainPart->armExidx)
3331 return;
3332 llvm::erase_if(ctx.inputSections, [](InputSectionBase *s) {
3333 // Ignore dead sections and the partition end marker (.part.end),
3334 // whose partition number is out of bounds.
3335 if (!s->isLive() || s->partition == 255)
3336 return false;
3337 Partition &part = s->getPartition();
3338 return s->kind() == SectionBase::Regular && part.armExidx &&
3339 part.armExidx->addSection(cast<InputSection>(s));
3340 });
3341}
3342
3343MipsRldMapSection::MipsRldMapSection()
3344 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, config->wordsize,
3345 ".rld_map") {}
3346
3347ARMExidxSyntheticSection::ARMExidxSyntheticSection()
3348 : SyntheticSection(SHF_ALLOC | SHF_LINK_ORDER, SHT_ARM_EXIDX,
3349 config->wordsize, ".ARM.exidx") {}
3350
3351static InputSection *findExidxSection(InputSection *isec) {
3352 for (InputSection *d : isec->dependentSections)
3353 if (d->type == SHT_ARM_EXIDX && d->isLive())
3354 return d;
3355 return nullptr;
3356}
3357
3358static bool isValidExidxSectionDep(InputSection *isec) {
3359 return (isec->flags & SHF_ALLOC) && (isec->flags & SHF_EXECINSTR) &&
3360 isec->getSize() > 0;
3361}
3362
3363bool ARMExidxSyntheticSection::addSection(InputSection *isec) {
3364 if (isec->type == SHT_ARM_EXIDX) {
3365 if (InputSection *dep = isec->getLinkOrderDep())
3366 if (isValidExidxSectionDep(dep)) {
3367 exidxSections.push_back(isec);
3368 // Every exidxSection is 8 bytes, we need an estimate of
3369 // size before assignAddresses can be called. Final size
3370 // will only be known after finalize is called.
3371 size += 8;
3372 }
3373 return true;
3374 }
3375
3376 if (isValidExidxSectionDep(isec)) {
3377 executableSections.push_back(isec);
3378 return false;
3379 }
3380
3381 // FIXME: we do not output a relocation section when --emit-relocs is used
3382 // as we do not have relocation sections for linker generated table entries
3383 // and we would have to erase at a late stage relocations from merged entries.
3384 // Given that exception tables are already position independent and a binary
3385 // analyzer could derive the relocations we choose to erase the relocations.
3386 if (config->emitRelocs && isec->type == SHT_REL)
3387 if (InputSectionBase *ex = isec->getRelocatedSection())
3388 if (isa<InputSection>(ex) && ex->type == SHT_ARM_EXIDX)
3389 return true;
3390
3391 return false;
3392}
3393
3394// References to .ARM.Extab Sections have bit 31 clear and are not the
3395// special EXIDX_CANTUNWIND bit-pattern.
3396static bool isExtabRef(uint32_t unwind) {
3397 return (unwind & 0x80000000) == 0 && unwind != 0x1;
3398}
3399
3400// Return true if the .ARM.exidx section Cur can be merged into the .ARM.exidx
3401// section Prev, where Cur follows Prev in the table. This can be done if the
3402// unwinding instructions in Cur are identical to Prev. Linker generated
3403// EXIDX_CANTUNWIND entries are represented by nullptr as they do not have an
3404// InputSection.
3405static bool isDuplicateArmExidxSec(InputSection *prev, InputSection *cur) {
3406
3407 struct ExidxEntry {
3408 ulittle32_t fn;
3409 ulittle32_t unwind;
3410 };
3411 // Get the last table Entry from the previous .ARM.exidx section. If Prev is
3412 // nullptr then it will be a synthesized EXIDX_CANTUNWIND entry.
3413 ExidxEntry prevEntry = {ulittle32_t(0), ulittle32_t(1)};
3414 if (prev)
3415 prevEntry = prev->getDataAs<ExidxEntry>().back();
3416 if (isExtabRef(prevEntry.unwind))
3417 return false;
3418
3419 // We consider the unwind instructions of an .ARM.exidx table entry
3420 // a duplicate if the previous unwind instructions if:
3421 // - Both are the special EXIDX_CANTUNWIND.
3422 // - Both are the same inline unwind instructions.
3423 // We do not attempt to follow and check links into .ARM.extab tables as
3424 // consecutive identical entries are rare and the effort to check that they
3425 // are identical is high.
3426
3427 // If Cur is nullptr then this is synthesized EXIDX_CANTUNWIND entry.
3428 if (cur == nullptr)
3429 return prevEntry.unwind == 1;
3430
3431 for (const ExidxEntry entry : cur->getDataAs<ExidxEntry>())
3432 if (isExtabRef(entry.unwind) || entry.unwind != prevEntry.unwind)
3433 return false;
3434
3435 // All table entries in this .ARM.exidx Section can be merged into the
3436 // previous Section.
3437 return true;
3438}
3439
3440// The .ARM.exidx table must be sorted in ascending order of the address of the
3441// functions the table describes. std::optionally duplicate adjacent table
3442// entries can be removed. At the end of the function the executableSections
3443// must be sorted in ascending order of address, Sentinel is set to the
3444// InputSection with the highest address and any InputSections that have
3445// mergeable .ARM.exidx table entries are removed from it.
3446void ARMExidxSyntheticSection::finalizeContents() {
3447 // The executableSections and exidxSections that we use to derive the final
3448 // contents of this SyntheticSection are populated before
3449 // processSectionCommands() and ICF. A /DISCARD/ entry in SECTIONS command or
3450 // ICF may remove executable InputSections and their dependent .ARM.exidx
3451 // section that we recorded earlier.
3452 auto isDiscarded = [](const InputSection *isec) { return !isec->isLive(); };
3453 llvm::erase_if(exidxSections, isDiscarded);
3454 // We need to remove discarded InputSections and InputSections without
3455 // .ARM.exidx sections that if we generated the .ARM.exidx it would be out
3456 // of range.
3457 auto isDiscardedOrOutOfRange = [this](InputSection *isec) {
3458 if (!isec->isLive())
3459 return true;
3460 if (findExidxSection(isec))
3461 return false;
3462 int64_t off = static_cast<int64_t>(isec->getVA() - getVA());
3463 return off != llvm::SignExtend64(off, 31);
3464 };
3465 llvm::erase_if(executableSections, isDiscardedOrOutOfRange);
3466
3467 // Sort the executable sections that may or may not have associated
3468 // .ARM.exidx sections by order of ascending address. This requires the
3469 // relative positions of InputSections and OutputSections to be known.
3470 auto compareByFilePosition = [](const InputSection *a,
3471 const InputSection *b) {
3472 OutputSection *aOut = a->getParent();
3473 OutputSection *bOut = b->getParent();
3474
3475 if (aOut != bOut)
3476 return aOut->addr < bOut->addr;
3477 return a->outSecOff < b->outSecOff;
3478 };
3479 llvm::stable_sort(executableSections, compareByFilePosition);
3480 sentinel = executableSections.back();
3481 // std::optionally merge adjacent duplicate entries.
3482 if (config->mergeArmExidx) {
3483 SmallVector<InputSection *, 0> selectedSections;
3484 selectedSections.reserve(executableSections.size());
3485 selectedSections.push_back(executableSections[0]);
3486 size_t prev = 0;
3487 for (size_t i = 1; i < executableSections.size(); ++i) {
3488 InputSection *ex1 = findExidxSection(executableSections[prev]);
3489 InputSection *ex2 = findExidxSection(executableSections[i]);
3490 if (!isDuplicateArmExidxSec(ex1, ex2)) {
3491 selectedSections.push_back(executableSections[i]);
3492 prev = i;
3493 }
3494 }
3495 executableSections = std::move(selectedSections);
3496 }
3497
3498 size_t offset = 0;
3499 size = 0;
3500 for (InputSection *isec : executableSections) {
3501 if (InputSection *d = findExidxSection(isec)) {
3502 d->outSecOff = offset;
3503 d->parent = getParent();
3504 offset += d->getSize();
3505 } else {
3506 offset += 8;
3507 }
3508 }
3509 // Size includes Sentinel.
3510 size = offset + 8;
3511}
3512
3513InputSection *ARMExidxSyntheticSection::getLinkOrderDep() const {
3514 return executableSections.front();
3515}
3516
3517// To write the .ARM.exidx table from the ExecutableSections we have three cases
3518// 1.) The InputSection has a .ARM.exidx InputSection in its dependent sections.
3519// We write the .ARM.exidx section contents and apply its relocations.
3520// 2.) The InputSection does not have a dependent .ARM.exidx InputSection. We
3521// must write the contents of an EXIDX_CANTUNWIND directly. We use the
3522// start of the InputSection as the purpose of the linker generated
3523// section is to terminate the address range of the previous entry.
3524// 3.) A trailing EXIDX_CANTUNWIND sentinel section is required at the end of
3525// the table to terminate the address range of the final entry.
3526void ARMExidxSyntheticSection::writeTo(uint8_t *buf) {
3527
3528 const uint8_t cantUnwindData[8] = {0, 0, 0, 0, // PREL31 to target
3529 1, 0, 0, 0}; // EXIDX_CANTUNWIND
3530
3531 uint64_t offset = 0;
3532 for (InputSection *isec : executableSections) {
3533 assert(isec->getParent() != nullptr)(static_cast <bool> (isec->getParent() != nullptr) ?
void (0) : __assert_fail ("isec->getParent() != nullptr",
"lld/ELF/SyntheticSections.cpp", 3533, __extension__ __PRETTY_FUNCTION__
))
;
3534 if (InputSection *d = findExidxSection(isec)) {
3535 memcpy(buf + offset, d->content().data(), d->content().size());
3536 target->relocateAlloc(*d, buf + d->outSecOff);
3537 offset += d->getSize();
3538 } else {
3539 // A Linker generated CANTUNWIND section.
3540 memcpy(buf + offset, cantUnwindData, sizeof(cantUnwindData));
3541 uint64_t s = isec->getVA();
3542 uint64_t p = getVA() + offset;
3543 target->relocateNoSym(buf + offset, R_ARM_PREL31, s - p);
3544 offset += 8;
3545 }
3546 }
3547 // Write Sentinel.
3548 memcpy(buf + offset, cantUnwindData, sizeof(cantUnwindData));
3549 uint64_t s = sentinel->getVA(sentinel->getSize());
3550 uint64_t p = getVA() + offset;
3551 target->relocateNoSym(buf + offset, R_ARM_PREL31, s - p);
3552 assert(size == offset + 8)(static_cast <bool> (size == offset + 8) ? void (0) : __assert_fail
("size == offset + 8", "lld/ELF/SyntheticSections.cpp", 3552
, __extension__ __PRETTY_FUNCTION__))
;
3553}
3554
3555bool ARMExidxSyntheticSection::isNeeded() const {
3556 return llvm::any_of(exidxSections,
3557 [](InputSection *isec) { return isec->isLive(); });
3558}
3559
3560ThunkSection::ThunkSection(OutputSection *os, uint64_t off)
3561 : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS,
3562 config->emachine == EM_PPC64 ? 16 : 4, ".text.thunk") {
3563 this->parent = os;
3564 this->outSecOff = off;
3565}
3566
3567size_t ThunkSection::getSize() const {
3568 if (roundUpSizeForErrata)
3569 return alignTo(size, 4096);
3570 return size;
3571}
3572
3573void ThunkSection::addThunk(Thunk *t) {
3574 thunks.push_back(t);
3575 t->addSymbols(*this);
3576}
3577
3578void ThunkSection::writeTo(uint8_t *buf) {
3579 for (Thunk *t : thunks)
3580 t->writeTo(buf + t->offset);
3581}
3582
3583InputSection *ThunkSection::getTargetInputSection() const {
3584 if (thunks.empty())
3585 return nullptr;
3586 const Thunk *t = thunks.front();
3587 return t->getTargetInputSection();
3588}
3589
3590bool ThunkSection::assignOffsets() {
3591 uint64_t off = 0;
3592 for (Thunk *t : thunks) {
3593 off = alignToPowerOf2(off, t->alignment);
3594 t->setOffset(off);
3595 uint32_t size = t->size();
3596 t->getThunkTargetSym()->size = size;
3597 off += size;
3598 }
3599 bool changed = off != size;
3600 size = off;
3601 return changed;
3602}
3603
3604PPC32Got2Section::PPC32Got2Section()
3605 : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, 4, ".got2") {}
3606
3607bool PPC32Got2Section::isNeeded() const {
3608 // See the comment below. This is not needed if there is no other
3609 // InputSection.
3610 for (SectionCommand *cmd : getParent()->commands)
3611 if (auto *isd = dyn_cast<InputSectionDescription>(cmd))
3612 for (InputSection *isec : isd->sections)
3613 if (isec != this)
3614 return true;
3615 return false;
3616}
3617
3618void PPC32Got2Section::finalizeContents() {
3619 // PPC32 may create multiple GOT sections for -fPIC/-fPIE, one per file in
3620 // .got2 . This function computes outSecOff of each .got2 to be used in
3621 // PPC32PltCallStub::writeTo(). The purpose of this empty synthetic section is
3622 // to collect input sections named ".got2".
3623 for (SectionCommand *cmd : getParent()->commands)
3624 if (auto *isd = dyn_cast<InputSectionDescription>(cmd)) {
3625 for (InputSection *isec : isd->sections) {
3626 // isec->file may be nullptr for MergeSyntheticSection.
3627 if (isec != this && isec->file)
3628 isec->file->ppc32Got2 = isec;
3629 }
3630 }
3631}
3632
3633// If linking position-dependent code then the table will store the addresses
3634// directly in the binary so the section has type SHT_PROGBITS. If linking
3635// position-independent code the section has type SHT_NOBITS since it will be
3636// allocated and filled in by the dynamic linker.
3637PPC64LongBranchTargetSection::PPC64LongBranchTargetSection()
3638 : SyntheticSection(SHF_ALLOC | SHF_WRITE,
3639 config->isPic ? SHT_NOBITS : SHT_PROGBITS, 8,
3640 ".branch_lt") {}
3641
3642uint64_t PPC64LongBranchTargetSection::getEntryVA(const Symbol *sym,
3643 int64_t addend) {
3644 return getVA() + entry_index.find({sym, addend})->second * 8;
3645}
3646
3647std::optional<uint32_t>
3648PPC64LongBranchTargetSection::addEntry(const Symbol *sym, int64_t addend) {
3649 auto res =
3650 entry_index.try_emplace(std::make_pair(sym, addend), entries.size());
3651 if (!res.second)
3652 return std::nullopt;
3653 entries.emplace_back(sym, addend);
3654 return res.first->second;
3655}
3656
3657size_t PPC64LongBranchTargetSection::getSize() const {
3658 return entries.size() * 8;
3659}
3660
3661void PPC64LongBranchTargetSection::writeTo(uint8_t *buf) {
3662 // If linking non-pic we have the final addresses of the targets and they get
3663 // written to the table directly. For pic the dynamic linker will allocate
3664 // the section and fill it.
3665 if (config->isPic)
3666 return;
3667
3668 for (auto entry : entries) {
3669 const Symbol *sym = entry.first;
3670 int64_t addend = entry.second;
3671 assert(sym->getVA())(static_cast <bool> (sym->getVA()) ? void (0) : __assert_fail
("sym->getVA()", "lld/ELF/SyntheticSections.cpp", 3671, __extension__
__PRETTY_FUNCTION__))
;
3672 // Need calls to branch to the local entry-point since a long-branch
3673 // must be a local-call.
3674 write64(buf, sym->getVA(addend) +
3675 getPPC64GlobalEntryToLocalEntryOffset(sym->stOther));
3676 buf += 8;
3677 }
3678}
3679
3680bool PPC64LongBranchTargetSection::isNeeded() const {
3681 // `removeUnusedSyntheticSections()` is called before thunk allocation which
3682 // is too early to determine if this section will be empty or not. We need
3683 // Finalized to keep the section alive until after thunk creation. Finalized
3684 // only gets set to true once `finalizeSections()` is called after thunk
3685 // creation. Because of this, if we don't create any long-branch thunks we end
3686 // up with an empty .branch_lt section in the binary.
3687 return !finalized || !entries.empty();
3688}
3689
3690static uint8_t getAbiVersion() {
3691 // MIPS non-PIC executable gets ABI version 1.
3692 if (config->emachine == EM_MIPS) {
3693 if (!config->isPic && !config->relocatable &&
3694 (config->eflags & (EF_MIPS_PIC | EF_MIPS_CPIC)) == EF_MIPS_CPIC)
3695 return 1;
3696 return 0;
3697 }
3698
3699 if (config->emachine == EM_AMDGPU && !ctx.objectFiles.empty()) {
3700 uint8_t ver = ctx.objectFiles[0]->abiVersion;
3701 for (InputFile *file : ArrayRef(ctx.objectFiles).slice(1))
3702 if (file->abiVersion != ver)
3703 error("incompatible ABI version: " + toString(file));
3704 return ver;
3705 }
3706
3707 return 0;
3708}
3709
3710template <typename ELFT> void elf::writeEhdr(uint8_t *buf, Partition &part) {
3711 memcpy(buf, "\177ELF", 4);
3712
3713 auto *eHdr = reinterpret_cast<typename ELFT::Ehdr *>(buf);
3714 eHdr->e_ident[EI_CLASS] = config->is64 ? ELFCLASS64 : ELFCLASS32;
3715 eHdr->e_ident[EI_DATA] = config->isLE ? ELFDATA2LSB : ELFDATA2MSB;
3716 eHdr->e_ident[EI_VERSION] = EV_CURRENT;
3717 eHdr->e_ident[EI_OSABI] = config->osabi;
3718 eHdr->e_ident[EI_ABIVERSION] = getAbiVersion();
3719 eHdr->e_machine = config->emachine;
3720 eHdr->e_version = EV_CURRENT;
3721 eHdr->e_flags = config->eflags;
3722 eHdr->e_ehsize = sizeof(typename ELFT::Ehdr);
3723 eHdr->e_phnum = part.phdrs.size();
3724 eHdr->e_shentsize = sizeof(typename ELFT::Shdr);
3725
3726 if (!config->relocatable) {
3727 eHdr->e_phoff = sizeof(typename ELFT::Ehdr);
3728 eHdr->e_phentsize = sizeof(typename ELFT::Phdr);
3729 }
3730}
3731
3732template <typename ELFT> void elf::writePhdrs(uint8_t *buf, Partition &part) {
3733 // Write the program header table.
3734 auto *hBuf = reinterpret_cast<typename ELFT::Phdr *>(buf);
3735 for (PhdrEntry *p : part.phdrs) {
3736 hBuf->p_type = p->p_type;
3737 hBuf->p_flags = p->p_flags;
3738 hBuf->p_offset = p->p_offset;
3739 hBuf->p_vaddr = p->p_vaddr;
3740 hBuf->p_paddr = p->p_paddr;
3741 hBuf->p_filesz = p->p_filesz;
3742 hBuf->p_memsz = p->p_memsz;
3743 hBuf->p_align = p->p_align;
3744 ++hBuf;
3745 }
3746}
3747
3748template <typename ELFT>
3749PartitionElfHeaderSection<ELFT>::PartitionElfHeaderSection()
3750 : SyntheticSection(SHF_ALLOC, SHT_LLVM_PART_EHDR, 1, "") {}
3751
3752template <typename ELFT>
3753size_t PartitionElfHeaderSection<ELFT>::getSize() const {
3754 return sizeof(typename ELFT::Ehdr);
3755}
3756
3757template <typename ELFT>
3758void PartitionElfHeaderSection<ELFT>::writeTo(uint8_t *buf) {
3759 writeEhdr<ELFT>(buf, getPartition());
3760
3761 // Loadable partitions are always ET_DYN.
3762 auto *eHdr = reinterpret_cast<typename ELFT::Ehdr *>(buf);
3763 eHdr->e_type = ET_DYN;
3764}
3765
3766template <typename ELFT>
3767PartitionProgramHeadersSection<ELFT>::PartitionProgramHeadersSection()
3768 : SyntheticSection(SHF_ALLOC, SHT_LLVM_PART_PHDR, 1, ".phdrs") {}
3769
3770template <typename ELFT>
3771size_t PartitionProgramHeadersSection<ELFT>::getSize() const {
3772 return sizeof(typename ELFT::Phdr) * getPartition().phdrs.size();
3773}
3774
3775template <typename ELFT>
3776void PartitionProgramHeadersSection<ELFT>::writeTo(uint8_t *buf) {
3777 writePhdrs<ELFT>(buf, getPartition());
3778}
3779
3780PartitionIndexSection::PartitionIndexSection()
3781 : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 4, ".rodata") {}
3782
3783size_t PartitionIndexSection::getSize() const {
3784 return 12 * (partitions.size() - 1);
3785}
3786
3787void PartitionIndexSection::finalizeContents() {
3788 for (size_t i = 1; i != partitions.size(); ++i)
3789 partitions[i].nameStrTab = mainPart->dynStrTab->addString(partitions[i].name);
3790}
3791
3792void PartitionIndexSection::writeTo(uint8_t *buf) {
3793 uint64_t va = getVA();
3794 for (size_t i = 1; i != partitions.size(); ++i) {
3795 write32(buf, mainPart->dynStrTab->getVA() + partitions[i].nameStrTab - va);
3796 write32(buf + 4, partitions[i].elfHeader->getVA() - (va + 4));
3797
3798 SyntheticSection *next = i == partitions.size() - 1
3799 ? in.partEnd.get()
3800 : partitions[i + 1].elfHeader.get();
3801 write32(buf + 8, next->getVA() - partitions[i].elfHeader->getVA());
3802
3803 va += 12;
3804 buf += 12;
3805 }
3806}
3807
3808void InStruct::reset() {
3809 attributes.reset();
3810 riscvAttributes.reset();
3811 bss.reset();
3812 bssRelRo.reset();
3813 got.reset();
3814 gotPlt.reset();
3815 igotPlt.reset();
3816 ppc64LongBranchTarget.reset();
3817 mipsAbiFlags.reset();
3818 mipsGot.reset();
3819 mipsOptions.reset();
3820 mipsReginfo.reset();
3821 mipsRldMap.reset();
3822 partEnd.reset();
3823 partIndex.reset();
3824 plt.reset();
3825 iplt.reset();
3826 ppc32Got2.reset();
3827 ibtPlt.reset();
3828 relaPlt.reset();
3829 relaIplt.reset();
3830 shStrTab.reset();
3831 strTab.reset();
3832 symTab.reset();
3833 symTabShndx.reset();
3834}
3835
3836constexpr char kMemtagAndroidNoteName[] = "Android";
3837void MemtagAndroidNote::writeTo(uint8_t *buf) {
3838 static_assert(sizeof(kMemtagAndroidNoteName) == 8,
3839 "ABI check for Android 11 & 12.");
3840 assert((config->androidMemtagStack || config->androidMemtagHeap) &&(static_cast <bool> ((config->androidMemtagStack || config
->androidMemtagHeap) && "Should only be synthesizing a note if heap || stack is enabled."
) ? void (0) : __assert_fail ("(config->androidMemtagStack || config->androidMemtagHeap) && \"Should only be synthesizing a note if heap || stack is enabled.\""
, "lld/ELF/SyntheticSections.cpp", 3841, __extension__ __PRETTY_FUNCTION__
))
3841 "Should only be synthesizing a note if heap || stack is enabled.")(static_cast <bool> ((config->androidMemtagStack || config
->androidMemtagHeap) && "Should only be synthesizing a note if heap || stack is enabled."
) ? void (0) : __assert_fail ("(config->androidMemtagStack || config->androidMemtagHeap) && \"Should only be synthesizing a note if heap || stack is enabled.\""
, "lld/ELF/SyntheticSections.cpp", 3841, __extension__ __PRETTY_FUNCTION__
))
;
3842
3843 write32(buf, sizeof(kMemtagAndroidNoteName));
3844 write32(buf + 4, sizeof(uint32_t));
3845 write32(buf + 8, ELF::NT_ANDROID_TYPE_MEMTAG);
3846 memcpy(buf + 12, kMemtagAndroidNoteName, sizeof(kMemtagAndroidNoteName));
3847 buf += 12 + sizeof(kMemtagAndroidNoteName);
3848
3849 uint32_t value = 0;
3850 value |= config->androidMemtagMode;
3851 if (config->androidMemtagHeap)
3852 value |= ELF::NT_MEMTAG_HEAP;
3853 // Note, MTE stack is an ABI break. Attempting to run an MTE stack-enabled
3854 // binary on Android 11 or 12 will result in a checkfail in the loader.
3855 if (config->androidMemtagStack)
3856 value |= ELF::NT_MEMTAG_STACK;
3857 write32(buf, value); // note value
3858}
3859
3860size_t MemtagAndroidNote::getSize() const {
3861 return sizeof(llvm::ELF::Elf64_Nhdr) +
3862 /*namesz=*/sizeof(kMemtagAndroidNoteName) +
3863 /*descsz=*/sizeof(uint32_t);
3864}
3865
3866void PackageMetadataNote::writeTo(uint8_t *buf) {
3867 write32(buf, 4);
3868 write32(buf + 4, config->packageMetadata.size() + 1);
3869 write32(buf + 8, FDO_PACKAGING_METADATA);
3870 memcpy(buf + 12, "FDO", 4);
3871 memcpy(buf + 16, config->packageMetadata.data(),
3872 config->packageMetadata.size());
3873}
3874
3875size_t PackageMetadataNote::getSize() const {
3876 return sizeof(llvm::ELF::Elf64_Nhdr) + 4 +
3877 alignTo(config->packageMetadata.size() + 1, 4);
3878}
3879
3880InStruct elf::in;
3881
3882std::vector<Partition> elf::partitions;
3883Partition *elf::mainPart;
3884
3885template GdbIndexSection *GdbIndexSection::create<ELF32LE>();
3886template GdbIndexSection *GdbIndexSection::create<ELF32BE>();
3887template GdbIndexSection *GdbIndexSection::create<ELF64LE>();
3888template GdbIndexSection *GdbIndexSection::create<ELF64BE>();
3889
3890template void elf::splitSections<ELF32LE>();
3891template void elf::splitSections<ELF32BE>();
3892template void elf::splitSections<ELF64LE>();
3893template void elf::splitSections<ELF64BE>();
3894
3895template class elf::MipsAbiFlagsSection<ELF32LE>;
3896template class elf::MipsAbiFlagsSection<ELF32BE>;
3897template class elf::MipsAbiFlagsSection<ELF64LE>;
3898template class elf::MipsAbiFlagsSection<ELF64BE>;
3899
3900template class elf::MipsOptionsSection<ELF32LE>;
3901template class elf::MipsOptionsSection<ELF32BE>;
3902template class elf::MipsOptionsSection<ELF64LE>;
3903template class elf::MipsOptionsSection<ELF64BE>;
3904
3905template void EhFrameSection::iterateFDEWithLSDA<ELF32LE>(
3906 function_ref<void(InputSection &)>);
3907template void EhFrameSection::iterateFDEWithLSDA<ELF32BE>(
3908 function_ref<void(InputSection &)>);
3909template void EhFrameSection::iterateFDEWithLSDA<ELF64LE>(
3910 function_ref<void(InputSection &)>);
3911template void EhFrameSection::iterateFDEWithLSDA<ELF64BE>(
3912 function_ref<void(InputSection &)>);
3913
3914template class elf::MipsReginfoSection<ELF32LE>;
3915template class elf::MipsReginfoSection<ELF32BE>;
3916template class elf::MipsReginfoSection<ELF64LE>;
3917template class elf::MipsReginfoSection<ELF64BE>;
3918
3919template class elf::DynamicSection<ELF32LE>;
3920template class elf::DynamicSection<ELF32BE>;
3921template class elf::DynamicSection<ELF64LE>;
3922template class elf::DynamicSection<ELF64BE>;
3923
3924template class elf::RelocationSection<ELF32LE>;
3925template class elf::RelocationSection<ELF32BE>;
3926template class elf::RelocationSection<ELF64LE>;
3927template class elf::RelocationSection<ELF64BE>;
3928
3929template class elf::AndroidPackedRelocationSection<ELF32LE>;
3930template class elf::AndroidPackedRelocationSection<ELF32BE>;
3931template class elf::AndroidPackedRelocationSection<ELF64LE>;
3932template class elf::AndroidPackedRelocationSection<ELF64BE>;
3933
3934template class elf::RelrSection<ELF32LE>;
3935template class elf::RelrSection<ELF32BE>;
3936template class elf::RelrSection<ELF64LE>;
3937template class elf::RelrSection<ELF64BE>;
3938
3939template class elf::SymbolTableSection<ELF32LE>;
3940template class elf::SymbolTableSection<ELF32BE>;
3941template class elf::SymbolTableSection<ELF64LE>;
3942template class elf::SymbolTableSection<ELF64BE>;
3943
3944template class elf::VersionNeedSection<ELF32LE>;
3945template class elf::VersionNeedSection<ELF32BE>;
3946template class elf::VersionNeedSection<ELF64LE>;
3947template class elf::VersionNeedSection<ELF64BE>;
3948
3949template void elf::writeEhdr<ELF32LE>(uint8_t *Buf, Partition &Part);
3950template void elf::writeEhdr<ELF32BE>(uint8_t *Buf, Partition &Part);
3951template void elf::writeEhdr<ELF64LE>(uint8_t *Buf, Partition &Part);
3952template void elf::writeEhdr<ELF64BE>(uint8_t *Buf, Partition &Part);
3953
3954template void elf::writePhdrs<ELF32LE>(uint8_t *Buf, Partition &Part);
3955template void elf::writePhdrs<ELF32BE>(uint8_t *Buf, Partition &Part);
3956template void elf::writePhdrs<ELF64LE>(uint8_t *Buf, Partition &Part);
3957template void elf::writePhdrs<ELF64BE>(uint8_t *Buf, Partition &Part);
3958
3959template class elf::PartitionElfHeaderSection<ELF32LE>;
3960template class elf::PartitionElfHeaderSection<ELF32BE>;
3961template class elf::PartitionElfHeaderSection<ELF64LE>;
3962template class elf::PartitionElfHeaderSection<ELF64BE>;
3963
3964template class elf::PartitionProgramHeadersSection<ELF32LE>;
3965template class elf::PartitionProgramHeadersSection<ELF32BE>;
3966template class elf::PartitionProgramHeadersSection<ELF64LE>;
3967template class elf::PartitionProgramHeadersSection<ELF64BE>;

/usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/bits/unique_ptr.h

1// unique_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2008-2020 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/unique_ptr.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{memory}
28 */
29
30#ifndef _UNIQUE_PTR_H1
31#define _UNIQUE_PTR_H1 1
32
33#include <bits/c++config.h>
34#include <debug/assertions.h>
35#include <type_traits>
36#include <utility>
37#include <tuple>
38#include <bits/stl_function.h>
39#include <bits/functional_hash.h>
40#if __cplusplus201703L > 201703L
41# include <compare>
42# include <ostream>
43#endif
44
45namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
46{
47_GLIBCXX_BEGIN_NAMESPACE_VERSION
48
49 /**
50 * @addtogroup pointer_abstractions
51 * @{
52 */
53
54#if _GLIBCXX_USE_DEPRECATED1
55#pragma GCC diagnostic push
56#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
57 template<typename> class auto_ptr;
58#pragma GCC diagnostic pop
59#endif
60
61 /// Primary template of default_delete, used by unique_ptr for single objects
62 template<typename _Tp>
63 struct default_delete
64 {
65 /// Default constructor
66 constexpr default_delete() noexcept = default;
67
68 /** @brief Converting constructor.
69 *
70 * Allows conversion from a deleter for objects of another type, `_Up`,
71 * only if `_Up*` is convertible to `_Tp*`.
72 */
73 template<typename _Up,
74 typename = _Require<is_convertible<_Up*, _Tp*>>>
75 default_delete(const default_delete<_Up>&) noexcept { }
76
77 /// Calls `delete __ptr`
78 void
79 operator()(_Tp* __ptr) const
80 {
81 static_assert(!is_void<_Tp>::value,
82 "can't delete pointer to incomplete type");
83 static_assert(sizeof(_Tp)>0,
84 "can't delete pointer to incomplete type");
85 delete __ptr;
86 }
87 };
88
89 // _GLIBCXX_RESOLVE_LIB_DEFECTS
90 // DR 740 - omit specialization for array objects with a compile time length
91
92 /// Specialization of default_delete for arrays, used by `unique_ptr<T[]>`
93 template<typename _Tp>
94 struct default_delete<_Tp[]>
95 {
96 public:
97 /// Default constructor
98 constexpr default_delete() noexcept = default;
99
100 /** @brief Converting constructor.
101 *
102 * Allows conversion from a deleter for arrays of another type, such as
103 * a const-qualified version of `_Tp`.
104 *
105 * Conversions from types derived from `_Tp` are not allowed because
106 * it is undefined to `delete[]` an array of derived types through a
107 * pointer to the base type.
108 */
109 template<typename _Up,
110 typename = _Require<is_convertible<_Up(*)[], _Tp(*)[]>>>
111 default_delete(const default_delete<_Up[]>&) noexcept { }
112
113 /// Calls `delete[] __ptr`
114 template<typename _Up>
115 typename enable_if<is_convertible<_Up(*)[], _Tp(*)[]>::value>::type
116 operator()(_Up* __ptr) const
117 {
118 static_assert(sizeof(_Tp)>0,
119 "can't delete pointer to incomplete type");
120 delete [] __ptr;
121 }
122 };
123
124 /// @cond undocumented
125
126 // Manages the pointer and deleter of a unique_ptr
127 template <typename _Tp, typename _Dp>
128 class __uniq_ptr_impl
129 {
130 template <typename _Up, typename _Ep, typename = void>
131 struct _Ptr
132 {
133 using type = _Up*;
134 };
135
136 template <typename _Up, typename _Ep>
137 struct
138 _Ptr<_Up, _Ep, __void_t<typename remove_reference<_Ep>::type::pointer>>
139 {
140 using type = typename remove_reference<_Ep>::type::pointer;
141 };
142
143 public:
144 using _DeleterConstraint = enable_if<
145 __and_<__not_<is_pointer<_Dp>>,
146 is_default_constructible<_Dp>>::value>;
147
148 using pointer = typename _Ptr<_Tp, _Dp>::type;
149
150 static_assert( !is_rvalue_reference<_Dp>::value,
151 "unique_ptr's deleter type must be a function object type"
152 " or an lvalue reference type" );
153
154 __uniq_ptr_impl() = default;
155 __uniq_ptr_impl(pointer __p) : _M_t() { _M_ptr() = __p; }
156
157 template<typename _Del>
158 __uniq_ptr_impl(pointer __p, _Del&& __d)
159 : _M_t(__p, std::forward<_Del>(__d)) { }
160
161 __uniq_ptr_impl(__uniq_ptr_impl&& __u) noexcept
162 : _M_t(std::move(__u._M_t))
163 { __u._M_ptr() = nullptr; }
164
165 __uniq_ptr_impl& operator=(__uniq_ptr_impl&& __u) noexcept
166 {
167 reset(__u.release());
168 _M_deleter() = std::forward<_Dp>(__u._M_deleter());
169 return *this;
170 }
171
172 pointer& _M_ptr() { return std::get<0>(_M_t); }
173 pointer _M_ptr() const { return std::get<0>(_M_t); }
174 _Dp& _M_deleter() { return std::get<1>(_M_t); }
175 const _Dp& _M_deleter() const { return std::get<1>(_M_t); }
176
177 void reset(pointer __p) noexcept
178 {
179 const pointer __old_p = _M_ptr();
180 _M_ptr() = __p;
181 if (__old_p)
182 _M_deleter()(__old_p);
183 }
184
185 pointer release() noexcept
186 {
187 pointer __p = _M_ptr();
188 _M_ptr() = nullptr;
189 return __p;
190 }
191
192 void
193 swap(__uniq_ptr_impl& __rhs) noexcept
194 {
195 using std::swap;
196 swap(this->_M_ptr(), __rhs._M_ptr());
197 swap(this->_M_deleter(), __rhs._M_deleter());
198 }
199
200 private:
201 tuple<pointer, _Dp> _M_t;
202 };
203
204 // Defines move construction + assignment as either defaulted or deleted.
205 template <typename _Tp, typename _Dp,
206 bool = is_move_constructible<_Dp>::value,
207 bool = is_move_assignable<_Dp>::value>
208 struct __uniq_ptr_data : __uniq_ptr_impl<_Tp, _Dp>
209 {
210 using __uniq_ptr_impl<_Tp, _Dp>::__uniq_ptr_impl;
211 __uniq_ptr_data(__uniq_ptr_data&&) = default;
212 __uniq_ptr_data& operator=(__uniq_ptr_data&&) = default;
213 };
214
215 template <typename _Tp, typename _Dp>
216 struct __uniq_ptr_data<_Tp, _Dp, true, false> : __uniq_ptr_impl<_Tp, _Dp>
217 {
218 using __uniq_ptr_impl<_Tp, _Dp>::__uniq_ptr_impl;
219 __uniq_ptr_data(__uniq_ptr_data&&) = default;
220 __uniq_ptr_data& operator=(__uniq_ptr_data&&) = delete;
221 };
222
223 template <typename _Tp, typename _Dp>
224 struct __uniq_ptr_data<_Tp, _Dp, false, true> : __uniq_ptr_impl<_Tp, _Dp>
225 {
226 using __uniq_ptr_impl<_Tp, _Dp>::__uniq_ptr_impl;
227 __uniq_ptr_data(__uniq_ptr_data&&) = delete;
228 __uniq_ptr_data& operator=(__uniq_ptr_data&&) = default;
229 };
230
231 template <typename _Tp, typename _Dp>
232 struct __uniq_ptr_data<_Tp, _Dp, false, false> : __uniq_ptr_impl<_Tp, _Dp>
233 {
234 using __uniq_ptr_impl<_Tp, _Dp>::__uniq_ptr_impl;
235 __uniq_ptr_data(__uniq_ptr_data&&) = delete;
236 __uniq_ptr_data& operator=(__uniq_ptr_data&&) = delete;
237 };
238 /// @endcond
239
240 /// 20.7.1.2 unique_ptr for single objects.
241 template <typename _Tp, typename _Dp = default_delete<_Tp>>
242 class unique_ptr
243 {
244 template <typename _Up>
245 using _DeleterConstraint =
246 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
247
248 __uniq_ptr_data<_Tp, _Dp> _M_t;
249
250 public:
251 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
252 using element_type = _Tp;
253 using deleter_type = _Dp;
254
255 private:
256 // helper template for detecting a safe conversion from another
257 // unique_ptr
258 template<typename _Up, typename _Ep>
259 using __safe_conversion_up = __and_<
260 is_convertible<typename unique_ptr<_Up, _Ep>::pointer, pointer>,
261 __not_<is_array<_Up>>
262 >;
263
264 public:
265 // Constructors.
266
267 /// Default constructor, creates a unique_ptr that owns nothing.
268 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
269 constexpr unique_ptr() noexcept
270 : _M_t()
271 { }
272
273 /** Takes ownership of a pointer.
274 *
275 * @param __p A pointer to an object of @c element_type
276 *
277 * The deleter will be value-initialized.
278 */
279 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
280 explicit
281 unique_ptr(pointer __p) noexcept
282 : _M_t(__p)
283 { }
284
285 /** Takes ownership of a pointer.
286 *
287 * @param __p A pointer to an object of @c element_type
288 * @param __d A reference to a deleter.
289 *
290 * The deleter will be initialized with @p __d
291 */
292 template<typename _Del = deleter_type,
293 typename = _Require<is_copy_constructible<_Del>>>
294 unique_ptr(pointer __p, const deleter_type& __d) noexcept
295 : _M_t(__p, __d) { }
296
297 /** Takes ownership of a pointer.
298 *
299 * @param __p A pointer to an object of @c element_type
300 * @param __d An rvalue reference to a (non-reference) deleter.
301 *
302 * The deleter will be initialized with @p std::move(__d)
303 */
304 template<typename _Del = deleter_type,
305 typename = _Require<is_move_constructible<_Del>>>
306 unique_ptr(pointer __p,
307 __enable_if_t<!is_lvalue_reference<_Del>::value,
308 _Del&&> __d) noexcept
309 : _M_t(__p, std::move(__d))
310 { }
311
312 template<typename _Del = deleter_type,
313 typename _DelUnref = typename remove_reference<_Del>::type>
314 unique_ptr(pointer,
315 __enable_if_t<is_lvalue_reference<_Del>::value,
316 _DelUnref&&>) = delete;
317
318 /// Creates a unique_ptr that owns nothing.
319 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
320 constexpr unique_ptr(nullptr_t) noexcept
321 : _M_t()
322 { }
323
324 // Move constructors.
325
326 /// Move constructor.
327 unique_ptr(unique_ptr&&) = default;
328
329 /** @brief Converting constructor from another type
330 *
331 * Requires that the pointer owned by @p __u is convertible to the
332 * type of pointer owned by this object, @p __u does not own an array,
333 * and @p __u has a compatible deleter type.
334 */
335 template<typename _Up, typename _Ep, typename = _Require<
336 __safe_conversion_up<_Up, _Ep>,
337 typename conditional<is_reference<_Dp>::value,
338 is_same<_Ep, _Dp>,
339 is_convertible<_Ep, _Dp>>::type>>
340 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
341 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
342 { }
343
344#if _GLIBCXX_USE_DEPRECATED1
345#pragma GCC diagnostic push
346#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
347 /// Converting constructor from @c auto_ptr
348 template<typename _Up, typename = _Require<
349 is_convertible<_Up*, _Tp*>, is_same<_Dp, default_delete<_Tp>>>>
350 unique_ptr(auto_ptr<_Up>&& __u) noexcept;
351#pragma GCC diagnostic pop
352#endif
353
354 /// Destructor, invokes the deleter if the stored pointer is not null.
355 ~unique_ptr() noexcept
356 {
357 static_assert(__is_invocable<deleter_type&, pointer>::value,
358 "unique_ptr's deleter must be invocable with a pointer");
359 auto& __ptr = _M_t._M_ptr();
360 if (__ptr != nullptr)
361 get_deleter()(std::move(__ptr));
362 __ptr = pointer();
363 }
364
365 // Assignment.
366
367 /** @brief Move assignment operator.
368 *
369 * Invokes the deleter if this object owns a pointer.
370 */
371 unique_ptr& operator=(unique_ptr&&) = default;
372
373 /** @brief Assignment from another type.
374 *
375 * @param __u The object to transfer ownership from, which owns a
376 * convertible pointer to a non-array object.
377 *
378 * Invokes the deleter if this object owns a pointer.
379 */
380 template<typename _Up, typename _Ep>
381 typename enable_if< __and_<
382 __safe_conversion_up<_Up, _Ep>,
383 is_assignable<deleter_type&, _Ep&&>
384 >::value,
385 unique_ptr&>::type
386 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
387 {
388 reset(__u.release());
389 get_deleter() = std::forward<_Ep>(__u.get_deleter());
390 return *this;
391 }
392
393 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
394 unique_ptr&
395 operator=(nullptr_t) noexcept
396 {
397 reset();
398 return *this;
399 }
400
401 // Observers.
402
403 /// Dereference the stored pointer.
404 typename add_lvalue_reference<element_type>::type
405 operator*() const
406 {
407 __glibcxx_assert(get() != pointer());
408 return *get();
409 }
410
411 /// Return the stored pointer.
412 pointer
413 operator->() const noexcept
414 {
415 _GLIBCXX_DEBUG_PEDASSERT(get() != pointer());
416 return get();
417 }
418
419 /// Return the stored pointer.
420 pointer
421 get() const noexcept
422 { return _M_t._M_ptr(); }
423
424 /// Return a reference to the stored deleter.
425 deleter_type&
426 get_deleter() noexcept
427 { return _M_t._M_deleter(); }
428
429 /// Return a reference to the stored deleter.
430 const deleter_type&
431 get_deleter() const noexcept
432 { return _M_t._M_deleter(); }
433
434 /// Return @c true if the stored pointer is not null.
435 explicit operator bool() const noexcept
436 { return get() == pointer() ? false : true; }
437
438 // Modifiers.
439
440 /// Release ownership of any stored pointer.
441 pointer
442 release() noexcept
443 { return _M_t.release(); }
444
445 /** @brief Replace the stored pointer.
446 *
447 * @param __p The new pointer to store.
448 *
449 * The deleter will be invoked if a pointer is already owned.
450 */
451 void
452 reset(pointer __p = pointer()) noexcept
453 {
454 static_assert(__is_invocable<deleter_type&, pointer>::value,
455 "unique_ptr's deleter must be invocable with a pointer");
456 _M_t.reset(std::move(__p));
457 }
458
459 /// Exchange the pointer and deleter with another object.
460 void
461 swap(unique_ptr& __u) noexcept
462 {
463 static_assert(__is_swappable<_Dp>::value, "deleter must be swappable");
464 _M_t.swap(__u._M_t);
465 }
466
467 // Disable copy from lvalue.
468 unique_ptr(const unique_ptr&) = delete;
469 unique_ptr& operator=(const unique_ptr&) = delete;
470 };
471
472 /// 20.7.1.3 unique_ptr for array objects with a runtime length
473 // [unique.ptr.runtime]
474 // _GLIBCXX_RESOLVE_LIB_DEFECTS
475 // DR 740 - omit specialization for array objects with a compile time length
476 template<typename _Tp, typename _Dp>
477 class unique_ptr<_Tp[], _Dp>
478 {
479 template <typename _Up>
480 using _DeleterConstraint =
481 typename __uniq_ptr_impl<_Tp, _Up>::_DeleterConstraint::type;
482
483 __uniq_ptr_data<_Tp, _Dp> _M_t;
484
485 template<typename _Up>
486 using __remove_cv = typename remove_cv<_Up>::type;
487
488 // like is_base_of<_Tp, _Up> but false if unqualified types are the same
489 template<typename _Up>
490 using __is_derived_Tp
491 = __and_< is_base_of<_Tp, _Up>,
492 __not_<is_same<__remove_cv<_Tp>, __remove_cv<_Up>>> >;
493
494 public:
495 using pointer = typename __uniq_ptr_impl<_Tp, _Dp>::pointer;
496 using element_type = _Tp;
497 using deleter_type = _Dp;
498
499 // helper template for detecting a safe conversion from another
500 // unique_ptr
501 template<typename _Up, typename _Ep,
502 typename _UPtr = unique_ptr<_Up, _Ep>,
503 typename _UP_pointer = typename _UPtr::pointer,
504 typename _UP_element_type = typename _UPtr::element_type>
505 using __safe_conversion_up = __and_<
506 is_array<_Up>,
507 is_same<pointer, element_type*>,
508 is_same<_UP_pointer, _UP_element_type*>,
509 is_convertible<_UP_element_type(*)[], element_type(*)[]>
510 >;
511
512 // helper template for detecting a safe conversion from a raw pointer
513 template<typename _Up>
514 using __safe_conversion_raw = __and_<
515 __or_<__or_<is_same<_Up, pointer>,
516 is_same<_Up, nullptr_t>>,
517 __and_<is_pointer<_Up>,
518 is_same<pointer, element_type*>,
519 is_convertible<
520 typename remove_pointer<_Up>::type(*)[],
521 element_type(*)[]>
522 >
523 >
524 >;
525
526 // Constructors.
527
528 /// Default constructor, creates a unique_ptr that owns nothing.
529 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
530 constexpr unique_ptr() noexcept
531 : _M_t()
532 { }
533
534 /** Takes ownership of a pointer.
535 *
536 * @param __p A pointer to an array of a type safely convertible
537 * to an array of @c element_type
538 *
539 * The deleter will be value-initialized.
540 */
541 template<typename _Up,
542 typename _Vp = _Dp,
543 typename = _DeleterConstraint<_Vp>,
544 typename = typename enable_if<
545 __safe_conversion_raw<_Up>::value, bool>::type>
546 explicit
547 unique_ptr(_Up __p) noexcept
548 : _M_t(__p)
3
Calling constructor for '__uniq_ptr_data<unsigned long, std::default_delete<unsigned long[]>, true, true>'
4
Calling constructor for '__uniq_ptr_impl<unsigned long, std::default_delete<unsigned long[]>>'
5
Returning from constructor for '__uniq_ptr_impl<unsigned long, std::default_delete<unsigned long[]>>'
6
Returning from constructor for '__uniq_ptr_data<unsigned long, std::default_delete<unsigned long[]>, true, true>'
549 { }
550
551 /** Takes ownership of a pointer.
552 *
553 * @param __p A pointer to an array of a type safely convertible
554 * to an array of @c element_type
555 * @param __d A reference to a deleter.
556 *
557 * The deleter will be initialized with @p __d
558 */
559 template<typename _Up, typename _Del = deleter_type,
560 typename = _Require<__safe_conversion_raw<_Up>,
561 is_copy_constructible<_Del>>>
562 unique_ptr(_Up __p, const deleter_type& __d) noexcept
563 : _M_t(__p, __d) { }
564
565 /** Takes ownership of a pointer.
566 *
567 * @param __p A pointer to an array of a type safely convertible
568 * to an array of @c element_type
569 * @param __d A reference to a deleter.
570 *
571 * The deleter will be initialized with @p std::move(__d)
572 */
573 template<typename _Up, typename _Del = deleter_type,
574 typename = _Require<__safe_conversion_raw<_Up>,
575 is_move_constructible<_Del>>>
576 unique_ptr(_Up __p,
577 __enable_if_t<!is_lvalue_reference<_Del>::value,
578 _Del&&> __d) noexcept
579 : _M_t(std::move(__p), std::move(__d))
580 { }
581
582 template<typename _Up, typename _Del = deleter_type,
583 typename _DelUnref = typename remove_reference<_Del>::type,
584 typename = _Require<__safe_conversion_raw<_Up>>>
585 unique_ptr(_Up,
586 __enable_if_t<is_lvalue_reference<_Del>::value,
587 _DelUnref&&>) = delete;
588
589 /// Move constructor.
590 unique_ptr(unique_ptr&&) = default;
591
592 /// Creates a unique_ptr that owns nothing.
593 template<typename _Del = _Dp, typename = _DeleterConstraint<_Del>>
594 constexpr unique_ptr(nullptr_t) noexcept
595 : _M_t()
596 { }
597
598 template<typename _Up, typename _Ep, typename = _Require<
599 __safe_conversion_up<_Up, _Ep>,
600 typename conditional<is_reference<_Dp>::value,
601 is_same<_Ep, _Dp>,
602 is_convertible<_Ep, _Dp>>::type>>
603 unique_ptr(unique_ptr<_Up, _Ep>&& __u) noexcept
604 : _M_t(__u.release(), std::forward<_Ep>(__u.get_deleter()))
605 { }
606
607 /// Destructor, invokes the deleter if the stored pointer is not null.
608 ~unique_ptr()
609 {
610 auto& __ptr = _M_t._M_ptr();
611 if (__ptr != nullptr)
612 get_deleter()(__ptr);
613 __ptr = pointer();
614 }
615
616 // Assignment.
617
618 /** @brief Move assignment operator.
619 *
620 * Invokes the deleter if this object owns a pointer.
621 */
622 unique_ptr&
623 operator=(unique_ptr&&) = default;
624
625 /** @brief Assignment from another type.
626 *
627 * @param __u The object to transfer ownership from, which owns a
628 * convertible pointer to an array object.
629 *
630 * Invokes the deleter if this object owns a pointer.
631 */
632 template<typename _Up, typename _Ep>
633 typename
634 enable_if<__and_<__safe_conversion_up<_Up, _Ep>,
635 is_assignable<deleter_type&, _Ep&&>
636 >::value,
637 unique_ptr&>::type
638 operator=(unique_ptr<_Up, _Ep>&& __u) noexcept
639 {
640 reset(__u.release());
641 get_deleter() = std::forward<_Ep>(__u.get_deleter());
642 return *this;
643 }
644
645 /// Reset the %unique_ptr to empty, invoking the deleter if necessary.
646 unique_ptr&
647 operator=(nullptr_t) noexcept
648 {
649 reset();
650 return *this;
651 }
652
653 // Observers.
654
655 /// Access an element of owned array.
656 typename std::add_lvalue_reference<element_type>::type
657 operator[](size_t __i) const
658 {
659 __glibcxx_assert(get() != pointer());
660 return get()[__i];
661 }
662
663 /// Return the stored pointer.
664 pointer
665 get() const noexcept
666 { return _M_t._M_ptr(); }
667
668 /// Return a reference to the stored deleter.
669 deleter_type&
670 get_deleter() noexcept
671 { return _M_t._M_deleter(); }
672
673 /// Return a reference to the stored deleter.
674 const deleter_type&
675 get_deleter() const noexcept
676 { return _M_t._M_deleter(); }
677
678 /// Return @c true if the stored pointer is not null.
679 explicit operator bool() const noexcept
680 { return get() == pointer() ? false : true; }
681
682 // Modifiers.
683
684 /// Release ownership of any stored pointer.
685 pointer
686 release() noexcept
687 { return _M_t.release(); }
688
689 /** @brief Replace the stored pointer.
690 *
691 * @param __p The new pointer to store.
692 *
693 * The deleter will be invoked if a pointer is already owned.
694 */
695 template <typename _Up,
696 typename = _Require<
697 __or_<is_same<_Up, pointer>,
698 __and_<is_same<pointer, element_type*>,
699 is_pointer<_Up>,
700 is_convertible<
701 typename remove_pointer<_Up>::type(*)[],
702 element_type(*)[]
703 >
704 >
705 >
706 >>
707 void
708 reset(_Up __p) noexcept
709 { _M_t.reset(std::move(__p)); }
710
711 void reset(nullptr_t = nullptr) noexcept
712 { reset(pointer()); }
713
714 /// Exchange the pointer and deleter with another object.
715 void
716 swap(unique_ptr& __u) noexcept
717 {
718 static_assert(__is_swappable<_Dp>::value, "deleter must be swappable");
719 _M_t.swap(__u._M_t);
720 }
721
722 // Disable copy from lvalue.
723 unique_ptr(const unique_ptr&) = delete;
724 unique_ptr& operator=(const unique_ptr&) = delete;
725 };
726
727 /// @relates unique_ptr @{
728
729 /// Swap overload for unique_ptr
730 template<typename _Tp, typename _Dp>
731 inline
732#if __cplusplus201703L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
733 // Constrained free swap overload, see p0185r1
734 typename enable_if<__is_swappable<_Dp>::value>::type
735#else
736 void
737#endif
738 swap(unique_ptr<_Tp, _Dp>& __x,
739 unique_ptr<_Tp, _Dp>& __y) noexcept
740 { __x.swap(__y); }
741
742#if __cplusplus201703L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
743 template<typename _Tp, typename _Dp>
744 typename enable_if<!__is_swappable<_Dp>::value>::type
745 swap(unique_ptr<_Tp, _Dp>&,
746 unique_ptr<_Tp, _Dp>&) = delete;
747#endif
748
749 /// Equality operator for unique_ptr objects, compares the owned pointers
750 template<typename _Tp, typename _Dp,
751 typename _Up, typename _Ep>
752 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
753 operator==(const unique_ptr<_Tp, _Dp>& __x,
754 const unique_ptr<_Up, _Ep>& __y)
755 { return __x.get() == __y.get(); }
756
757 /// unique_ptr comparison with nullptr
758 template<typename _Tp, typename _Dp>
759 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
760 operator==(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
761 { return !__x; }
762
763#ifndef __cpp_lib_three_way_comparison
764 /// unique_ptr comparison with nullptr
765 template<typename _Tp, typename _Dp>
766 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
767 operator==(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
768 { return !__x; }
769
770 /// Inequality operator for unique_ptr objects, compares the owned pointers
771 template<typename _Tp, typename _Dp,
772 typename _Up, typename _Ep>
773 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
774 operator!=(const unique_ptr<_Tp, _Dp>& __x,
775 const unique_ptr<_Up, _Ep>& __y)
776 { return __x.get() != __y.get(); }
777
778 /// unique_ptr comparison with nullptr
779 template<typename _Tp, typename _Dp>
780 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
781 operator!=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t) noexcept
782 { return (bool)__x; }
783
784 /// unique_ptr comparison with nullptr
785 template<typename _Tp, typename _Dp>
786 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
787 operator!=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x) noexcept
788 { return (bool)__x; }
789#endif // three way comparison
790
791 /// Relational operator for unique_ptr objects, compares the owned pointers
792 template<typename _Tp, typename _Dp,
793 typename _Up, typename _Ep>
794 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
795 operator<(const unique_ptr<_Tp, _Dp>& __x,
796 const unique_ptr<_Up, _Ep>& __y)
797 {
798 typedef typename
799 std::common_type<typename unique_ptr<_Tp, _Dp>::pointer,
800 typename unique_ptr<_Up, _Ep>::pointer>::type _CT;
801 return std::less<_CT>()(__x.get(), __y.get());
802 }
803
804 /// unique_ptr comparison with nullptr
805 template<typename _Tp, typename _Dp>
806 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
807 operator<(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
808 {
809 return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
810 nullptr);
811 }
812
813 /// unique_ptr comparison with nullptr
814 template<typename _Tp, typename _Dp>
815 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
816 operator<(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
817 {
818 return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
819 __x.get());
820 }
821
822 /// Relational operator for unique_ptr objects, compares the owned pointers
823 template<typename _Tp, typename _Dp,
824 typename _Up, typename _Ep>
825 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
826 operator<=(const unique_ptr<_Tp, _Dp>& __x,
827 const unique_ptr<_Up, _Ep>& __y)
828 { return !(__y < __x); }
829
830 /// unique_ptr comparison with nullptr
831 template<typename _Tp, typename _Dp>
832 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
833 operator<=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
834 { return !(nullptr < __x); }
835
836 /// unique_ptr comparison with nullptr
837 template<typename _Tp, typename _Dp>
838 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
839 operator<=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
840 { return !(__x < nullptr); }
841
842 /// Relational operator for unique_ptr objects, compares the owned pointers
843 template<typename _Tp, typename _Dp,
844 typename _Up, typename _Ep>
845 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
846 operator>(const unique_ptr<_Tp, _Dp>& __x,
847 const unique_ptr<_Up, _Ep>& __y)
848 { return (__y < __x); }
849
850 /// unique_ptr comparison with nullptr
851 template<typename _Tp, typename _Dp>
852 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
853 operator>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
854 {
855 return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(nullptr,
856 __x.get());
857 }
858
859 /// unique_ptr comparison with nullptr
860 template<typename _Tp, typename _Dp>
861 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
862 operator>(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
863 {
864 return std::less<typename unique_ptr<_Tp, _Dp>::pointer>()(__x.get(),
865 nullptr);
866 }
867
868 /// Relational operator for unique_ptr objects, compares the owned pointers
869 template<typename _Tp, typename _Dp,
870 typename _Up, typename _Ep>
871 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
872 operator>=(const unique_ptr<_Tp, _Dp>& __x,
873 const unique_ptr<_Up, _Ep>& __y)
874 { return !(__x < __y); }
875
876 /// unique_ptr comparison with nullptr
877 template<typename _Tp, typename _Dp>
878 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
879 operator>=(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
880 { return !(__x < nullptr); }
881
882 /// unique_ptr comparison with nullptr
883 template<typename _Tp, typename _Dp>
884 _GLIBCXX_NODISCARD[[__nodiscard__]] inline bool
885 operator>=(nullptr_t, const unique_ptr<_Tp, _Dp>& __x)
886 { return !(nullptr < __x); }
887
888#ifdef __cpp_lib_three_way_comparison
889 template<typename _Tp, typename _Dp, typename _Up, typename _Ep>
890 requires three_way_comparable_with<typename unique_ptr<_Tp, _Dp>::pointer,
891 typename unique_ptr<_Up, _Ep>::pointer>
892 inline
893 compare_three_way_result_t<typename unique_ptr<_Tp, _Dp>::pointer,
894 typename unique_ptr<_Up, _Ep>::pointer>
895 operator<=>(const unique_ptr<_Tp, _Dp>& __x,
896 const unique_ptr<_Up, _Ep>& __y)
897 { return compare_three_way()(__x.get(), __y.get()); }
898
899 template<typename _Tp, typename _Dp>
900 requires three_way_comparable<typename unique_ptr<_Tp, _Dp>::pointer>
901 inline
902 compare_three_way_result_t<typename unique_ptr<_Tp, _Dp>::pointer>
903 operator<=>(const unique_ptr<_Tp, _Dp>& __x, nullptr_t)
904 {
905 using pointer = typename unique_ptr<_Tp, _Dp>::pointer;
906 return compare_three_way()(__x.get(), static_cast<pointer>(nullptr));
907 }
908#endif
909 // @} relates unique_ptr
910
911 /// @cond undocumented
912 template<typename _Up, typename _Ptr = typename _Up::pointer,
913 bool = __poison_hash<_Ptr>::__enable_hash_call>
914 struct __uniq_ptr_hash
915#if ! _GLIBCXX_INLINE_VERSION0
916 : private __poison_hash<_Ptr>
917#endif
918 {
919 size_t
920 operator()(const _Up& __u) const
921 noexcept(noexcept(std::declval<hash<_Ptr>>()(std::declval<_Ptr>())))
922 { return hash<_Ptr>()(__u.get()); }
923 };
924
925 template<typename _Up, typename _Ptr>
926 struct __uniq_ptr_hash<_Up, _Ptr, false>
927 : private __poison_hash<_Ptr>
928 { };
929 /// @endcond
930
931 /// std::hash specialization for unique_ptr.
932 template<typename _Tp, typename _Dp>
933 struct hash<unique_ptr<_Tp, _Dp>>
934 : public __hash_base<size_t, unique_ptr<_Tp, _Dp>>,
935 public __uniq_ptr_hash<unique_ptr<_Tp, _Dp>>
936 { };
937
938#if __cplusplus201703L >= 201402L
939 /// @relates unique_ptr @{
940#define __cpp_lib_make_unique201304 201304
941
942 /// @cond undocumented
943
944 template<typename _Tp>
945 struct _MakeUniq
946 { typedef unique_ptr<_Tp> __single_object; };
947
948 template<typename _Tp>
949 struct _MakeUniq<_Tp[]>
950 { typedef unique_ptr<_Tp[]> __array; };
951
952 template<typename _Tp, size_t _Bound>
953 struct _MakeUniq<_Tp[_Bound]>
954 { struct __invalid_type { }; };
955
956 /// @endcond
957
958 /// std::make_unique for single objects
959 template<typename _Tp, typename... _Args>
960 inline typename _MakeUniq<_Tp>::__single_object
961 make_unique(_Args&&... __args)
962 { return unique_ptr<_Tp>(new _Tp(std::forward<_Args>(__args)...)); }
963
964 /// std::make_unique for arrays of unknown bound
965 template<typename _Tp>
966 inline typename _MakeUniq<_Tp>::__array
967 make_unique(size_t __num)
968 { return unique_ptr<_Tp>(new remove_extent_t<_Tp>[__num]()); }
969
970 /// Disable std::make_unique for arrays of known bound
971 template<typename _Tp, typename... _Args>
972 inline typename _MakeUniq<_Tp>::__invalid_type
973 make_unique(_Args&&...) = delete;
974 // @} relates unique_ptr
975#endif // C++14
976
977#if __cplusplus201703L > 201703L && __cpp_concepts
978 // _GLIBCXX_RESOLVE_LIB_DEFECTS
979 // 2948. unique_ptr does not define operator<< for stream output
980 /// Stream output operator for unique_ptr
981 template<typename _CharT, typename _Traits, typename _Tp, typename _Dp>
982 inline basic_ostream<_CharT, _Traits>&
983 operator<<(basic_ostream<_CharT, _Traits>& __os,
984 const unique_ptr<_Tp, _Dp>& __p)
985 requires requires { __os << __p.get(); }
986 {
987 __os << __p.get();
988 return __os;
989 }
990#endif // C++20
991
992 // @} group pointer_abstractions
993
994#if __cplusplus201703L >= 201703L
995 namespace __detail::__variant
996 {
997 template<typename> struct _Never_valueless_alt; // see <variant>
998
999 // Provide the strong exception-safety guarantee when emplacing a
1000 // unique_ptr into a variant.
1001 template<typename _Tp, typename _Del>
1002 struct _Never_valueless_alt<std::unique_ptr<_Tp, _Del>>
1003 : std::true_type
1004 { };
1005 } // namespace __detail::__variant
1006#endif // C++17
1007
1008_GLIBCXX_END_NAMESPACE_VERSION
1009} // namespace
1010
1011#endif /* _UNIQUE_PTR_H */

/build/source/llvm/include/llvm/ADT/STLExtras.h

1//===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file contains some templates that are useful if you are working with
11/// the STL at all.
12///
13/// No library is required when using these functions.
14///
15//===----------------------------------------------------------------------===//
16
17#ifndef LLVM_ADT_STLEXTRAS_H
18#define LLVM_ADT_STLEXTRAS_H
19
20#include "llvm/ADT/Hashing.h"
21#include "llvm/ADT/STLForwardCompat.h"
22#include "llvm/ADT/STLFunctionalExtras.h"
23#include "llvm/ADT/identity.h"
24#include "llvm/ADT/iterator.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/Config/abi-breaking.h"
27#include "llvm/Support/ErrorHandling.h"
28#include <algorithm>
29#include <cassert>
30#include <cstddef>
31#include <cstdint>
32#include <cstdlib>
33#include <functional>
34#include <initializer_list>
35#include <iterator>
36#include <limits>
37#include <memory>
38#include <optional>
39#include <tuple>
40#include <type_traits>
41#include <utility>
42
43#ifdef EXPENSIVE_CHECKS
44#include <random> // for std::mt19937
45#endif
46
47namespace llvm {
48
49// Only used by compiler if both template types are the same. Useful when
50// using SFINAE to test for the existence of member functions.
51template <typename T, T> struct SameType;
52
53namespace detail {
54
55template <typename RangeT>
56using IterOfRange = decltype(std::begin(std::declval<RangeT &>()));
57
58template <typename RangeT>
59using ValueOfRange =
60 std::remove_reference_t<decltype(*std::begin(std::declval<RangeT &>()))>;
61
62} // end namespace detail
63
64//===----------------------------------------------------------------------===//
65// Extra additions to <type_traits>
66//===----------------------------------------------------------------------===//
67
68template <typename T> struct make_const_ptr {
69 using type = std::add_pointer_t<std::add_const_t<T>>;
70};
71
72template <typename T> struct make_const_ref {
73 using type = std::add_lvalue_reference_t<std::add_const_t<T>>;
74};
75
76namespace detail {
77template <class, template <class...> class Op, class... Args> struct detector {
78 using value_t = std::false_type;
79};
80template <template <class...> class Op, class... Args>
81struct detector<std::void_t<Op<Args...>>, Op, Args...> {
82 using value_t = std::true_type;
83};
84} // end namespace detail
85
86/// Detects if a given trait holds for some set of arguments 'Args'.
87/// For example, the given trait could be used to detect if a given type
88/// has a copy assignment operator:
89/// template<class T>
90/// using has_copy_assign_t = decltype(std::declval<T&>()
91/// = std::declval<const T&>());
92/// bool fooHasCopyAssign = is_detected<has_copy_assign_t, FooClass>::value;
93template <template <class...> class Op, class... Args>
94using is_detected = typename detail::detector<void, Op, Args...>::value_t;
95
96/// This class provides various trait information about a callable object.
97/// * To access the number of arguments: Traits::num_args
98/// * To access the type of an argument: Traits::arg_t<Index>
99/// * To access the type of the result: Traits::result_t
100template <typename T, bool isClass = std::is_class<T>::value>
101struct function_traits : public function_traits<decltype(&T::operator())> {};
102
103/// Overload for class function types.
104template <typename ClassType, typename ReturnType, typename... Args>
105struct function_traits<ReturnType (ClassType::*)(Args...) const, false> {
106 /// The number of arguments to this function.
107 enum { num_args = sizeof...(Args) };
108
109 /// The result type of this function.
110 using result_t = ReturnType;
111
112 /// The type of an argument to this function.
113 template <size_t Index>
114 using arg_t = std::tuple_element_t<Index, std::tuple<Args...>>;
115};
116/// Overload for class function types.
117template <typename ClassType, typename ReturnType, typename... Args>
118struct function_traits<ReturnType (ClassType::*)(Args...), false>
119 : public function_traits<ReturnType (ClassType::*)(Args...) const> {};
120/// Overload for non-class function types.
121template <typename ReturnType, typename... Args>
122struct function_traits<ReturnType (*)(Args...), false> {
123 /// The number of arguments to this function.
124 enum { num_args = sizeof...(Args) };
125
126 /// The result type of this function.
127 using result_t = ReturnType;
128
129 /// The type of an argument to this function.
130 template <size_t i>
131 using arg_t = std::tuple_element_t<i, std::tuple<Args...>>;
132};
133template <typename ReturnType, typename... Args>
134struct function_traits<ReturnType (*const)(Args...), false>
135 : public function_traits<ReturnType (*)(Args...)> {};
136/// Overload for non-class function type references.
137template <typename ReturnType, typename... Args>
138struct function_traits<ReturnType (&)(Args...), false>
139 : public function_traits<ReturnType (*)(Args...)> {};
140
141/// traits class for checking whether type T is one of any of the given
142/// types in the variadic list.
143template <typename T, typename... Ts>
144using is_one_of = std::disjunction<std::is_same<T, Ts>...>;
145
146/// traits class for checking whether type T is a base class for all
147/// the given types in the variadic list.
148template <typename T, typename... Ts>
149using are_base_of = std::conjunction<std::is_base_of<T, Ts>...>;
150
151namespace detail {
152template <typename T, typename... Us> struct TypesAreDistinct;
153template <typename T, typename... Us>
154struct TypesAreDistinct
155 : std::integral_constant<bool, !is_one_of<T, Us...>::value &&
156 TypesAreDistinct<Us...>::value> {};
157template <typename T> struct TypesAreDistinct<T> : std::true_type {};
158} // namespace detail
159
160/// Determine if all types in Ts are distinct.
161///
162/// Useful to statically assert when Ts is intended to describe a non-multi set
163/// of types.
164///
165/// Expensive (currently quadratic in sizeof(Ts...)), and so should only be
166/// asserted once per instantiation of a type which requires it.
167template <typename... Ts> struct TypesAreDistinct;
168template <> struct TypesAreDistinct<> : std::true_type {};
169template <typename... Ts>
170struct TypesAreDistinct
171 : std::integral_constant<bool, detail::TypesAreDistinct<Ts...>::value> {};
172
173/// Find the first index where a type appears in a list of types.
174///
175/// FirstIndexOfType<T, Us...>::value is the first index of T in Us.
176///
177/// Typically only meaningful when it is otherwise statically known that the
178/// type pack has no duplicate types. This should be guaranteed explicitly with
179/// static_assert(TypesAreDistinct<Us...>::value).
180///
181/// It is a compile-time error to instantiate when T is not present in Us, i.e.
182/// if is_one_of<T, Us...>::value is false.
183template <typename T, typename... Us> struct FirstIndexOfType;
184template <typename T, typename U, typename... Us>
185struct FirstIndexOfType<T, U, Us...>
186 : std::integral_constant<size_t, 1 + FirstIndexOfType<T, Us...>::value> {};
187template <typename T, typename... Us>
188struct FirstIndexOfType<T, T, Us...> : std::integral_constant<size_t, 0> {};
189
190/// Find the type at a given index in a list of types.
191///
192/// TypeAtIndex<I, Ts...> is the type at index I in Ts.
193template <size_t I, typename... Ts>
194using TypeAtIndex = std::tuple_element_t<I, std::tuple<Ts...>>;
195
196/// Helper which adds two underlying types of enumeration type.
197/// Implicit conversion to a common type is accepted.
198template <typename EnumTy1, typename EnumTy2,
199 typename UT1 = std::enable_if_t<std::is_enum<EnumTy1>::value,
200 std::underlying_type_t<EnumTy1>>,
201 typename UT2 = std::enable_if_t<std::is_enum<EnumTy2>::value,
202 std::underlying_type_t<EnumTy2>>>
203constexpr auto addEnumValues(EnumTy1 LHS, EnumTy2 RHS) {
204 return static_cast<UT1>(LHS) + static_cast<UT2>(RHS);
205}
206
207//===----------------------------------------------------------------------===//
208// Extra additions to <iterator>
209//===----------------------------------------------------------------------===//
210
211namespace callable_detail {
212
213/// Templated storage wrapper for a callable.
214///
215/// This class is consistently default constructible, copy / move
216/// constructible / assignable.
217///
218/// Supported callable types:
219/// - Function pointer
220/// - Function reference
221/// - Lambda
222/// - Function object
223template <typename T,
224 bool = std::is_function_v<std::remove_pointer_t<remove_cvref_t<T>>>>
225class Callable {
226 using value_type = std::remove_reference_t<T>;
227 using reference = value_type &;
228 using const_reference = value_type const &;
229
230 std::optional<value_type> Obj;
231
232 static_assert(!std::is_pointer_v<value_type>,
233 "Pointers to non-functions are not callable.");
234
235public:
236 Callable() = default;
237 Callable(T const &O) : Obj(std::in_place, O) {}
238
239 Callable(Callable const &Other) = default;
240 Callable(Callable &&Other) = default;
241
242 Callable &operator=(Callable const &Other) {
243 Obj = std::nullopt;
244 if (Other.Obj)
245 Obj.emplace(*Other.Obj);
246 return *this;
247 }
248
249 Callable &operator=(Callable &&Other) {
250 Obj = std::nullopt;
251 if (Other.Obj)
252 Obj.emplace(std::move(*Other.Obj));
253 return *this;
254 }
255
256 template <typename... Pn,
257 std::enable_if_t<std::is_invocable_v<T, Pn...>, int> = 0>
258 decltype(auto) operator()(Pn &&...Params) {
259 return (*Obj)(std::forward<Pn>(Params)...);
260 }
261
262 template <typename... Pn,
263 std::enable_if_t<std::is_invocable_v<T const, Pn...>, int> = 0>
264 decltype(auto) operator()(Pn &&...Params) const {
265 return (*Obj)(std::forward<Pn>(Params)...);
266 }
267
268 bool valid() const { return Obj != std::nullopt; }
269 bool reset() { return Obj = std::nullopt; }
270
271 operator reference() { return *Obj; }
272 operator const_reference() const { return *Obj; }
273};
274
275// Function specialization. No need to waste extra space wrapping with a
276// std::optional.
277template <typename T> class Callable<T, true> {
278 static constexpr bool IsPtr = std::is_pointer_v<remove_cvref_t<T>>;
279
280 using StorageT = std::conditional_t<IsPtr, T, std::remove_reference_t<T> *>;
281 using CastT = std::conditional_t<IsPtr, T, T &>;
282
283private:
284 StorageT Func = nullptr;
285
286private:
287 template <typename In> static constexpr auto convertIn(In &&I) {
288 if constexpr (IsPtr) {
289 // Pointer... just echo it back.
290 return I;
291 } else {
292 // Must be a function reference. Return its address.
293 return &I;
294 }
295 }
296
297public:
298 Callable() = default;
299
300 // Construct from a function pointer or reference.
301 //
302 // Disable this constructor for references to 'Callable' so we don't violate
303 // the rule of 0.
304 template < // clang-format off
305 typename FnPtrOrRef,
306 std::enable_if_t<
307 !std::is_same_v<remove_cvref_t<FnPtrOrRef>, Callable>, int
308 > = 0
309 > // clang-format on
310 Callable(FnPtrOrRef &&F) : Func(convertIn(F)) {}
311
312 template <typename... Pn,
313 std::enable_if_t<std::is_invocable_v<T, Pn...>, int> = 0>
314 decltype(auto) operator()(Pn &&...Params) const {
315 return Func(std::forward<Pn>(Params)...);
316 }
317
318 bool valid() const { return Func != nullptr; }
319 void reset() { Func = nullptr; }
320
321 operator T const &() const {
322 if constexpr (IsPtr) {
323 // T is a pointer... just echo it back.
324 return Func;
325 } else {
326 static_assert(std::is_reference_v<T>,
327 "Expected a reference to a function.");
328 // T is a function reference... dereference the stored pointer.
329 return *Func;
330 }
331 }
332};
333
334} // namespace callable_detail
335
336namespace adl_detail {
337
338using std::begin;
339
340template <typename ContainerTy>
341decltype(auto) adl_begin(ContainerTy &&container) {
342 return begin(std::forward<ContainerTy>(container));
343}
344
345using std::end;
346
347template <typename ContainerTy>
348decltype(auto) adl_end(ContainerTy &&container) {
349 return end(std::forward<ContainerTy>(container));
350}
351
352using std::swap;
353
354template <typename T>
355void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(),
356 std::declval<T>()))) {
357 swap(std::forward<T>(lhs), std::forward<T>(rhs));
358}
359
360} // end namespace adl_detail
361
362template <typename ContainerTy>
363decltype(auto) adl_begin(ContainerTy &&container) {
364 return adl_detail::adl_begin(std::forward<ContainerTy>(container));
365}
366
367template <typename ContainerTy>
368decltype(auto) adl_end(ContainerTy &&container) {
369 return adl_detail::adl_end(std::forward<ContainerTy>(container));
370}
371
372template <typename T>
373void adl_swap(T &&lhs, T &&rhs) noexcept(
374 noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) {
375 adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs));
376}
377
378/// Returns true if the given container only contains a single element.
379template <typename ContainerTy> bool hasSingleElement(ContainerTy &&C) {
380 auto B = std::begin(C), E = std::end(C);
381 return B != E && std::next(B) == E;
382}
383
384/// Return a range covering \p RangeOrContainer with the first N elements
385/// excluded.
386template <typename T> auto drop_begin(T &&RangeOrContainer, size_t N = 1) {
387 return make_range(std::next(adl_begin(RangeOrContainer), N),
388 adl_end(RangeOrContainer));
389}
390
391/// Return a range covering \p RangeOrContainer with the last N elements
392/// excluded.
393template <typename T> auto drop_end(T &&RangeOrContainer, size_t N = 1) {
394 return make_range(adl_begin(RangeOrContainer),
395 std::prev(adl_end(RangeOrContainer), N));
396}
397
398// mapped_iterator - This is a simple iterator adapter that causes a function to
399// be applied whenever operator* is invoked on the iterator.
400
401template <typename ItTy, typename FuncTy,
402 typename ReferenceTy =
403 decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))>
404class mapped_iterator
405 : public iterator_adaptor_base<
406 mapped_iterator<ItTy, FuncTy>, ItTy,
407 typename std::iterator_traits<ItTy>::iterator_category,
408 std::remove_reference_t<ReferenceTy>,
409 typename std::iterator_traits<ItTy>::difference_type,
410 std::remove_reference_t<ReferenceTy> *, ReferenceTy> {
411public:
412 mapped_iterator() = default;
413 mapped_iterator(ItTy U, FuncTy F)
414 : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {}
415
416 ItTy getCurrent() { return this->I; }
417
418 const FuncTy &getFunction() const { return F; }
419
420 ReferenceTy operator*() const { return F(*this->I); }
421
422private:
423 callable_detail::Callable<FuncTy> F{};
424};
425
426// map_iterator - Provide a convenient way to create mapped_iterators, just like
427// make_pair is useful for creating pairs...
428template <class ItTy, class FuncTy>
429inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) {
430 return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F));
431}
432
433template <class ContainerTy, class FuncTy>
434auto map_range(ContainerTy &&C, FuncTy F) {
435 return make_range(map_iterator(C.begin(), F), map_iterator(C.end(), F));
436}
437
438/// A base type of mapped iterator, that is useful for building derived
439/// iterators that do not need/want to store the map function (as in
440/// mapped_iterator). These iterators must simply provide a `mapElement` method
441/// that defines how to map a value of the iterator to the provided reference
442/// type.
443template <typename DerivedT, typename ItTy, typename ReferenceTy>
444class mapped_iterator_base
445 : public iterator_adaptor_base<
446 DerivedT, ItTy,
447 typename std::iterator_traits<ItTy>::iterator_category,
448 std::remove_reference_t<ReferenceTy>,
449 typename std::iterator_traits<ItTy>::difference_type,
450 std::remove_reference_t<ReferenceTy> *, ReferenceTy> {
451public:
452 using BaseT = mapped_iterator_base;
453
454 mapped_iterator_base(ItTy U)
455 : mapped_iterator_base::iterator_adaptor_base(std::move(U)) {}
456
457 ItTy getCurrent() { return this->I; }
458
459 ReferenceTy operator*() const {
460 return static_cast<const DerivedT &>(*this).mapElement(*this->I);
461 }
462};
463
464/// Helper to determine if type T has a member called rbegin().
465template <typename Ty> class has_rbegin_impl {
466 using yes = char[1];
467 using no = char[2];
468
469 template <typename Inner>
470 static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr);
471
472 template <typename>
473 static no& test(...);
474
475public:
476 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
477};
478
479/// Metafunction to determine if T& or T has a member called rbegin().
480template <typename Ty>
481struct has_rbegin : has_rbegin_impl<std::remove_reference_t<Ty>> {};
482
483// Returns an iterator_range over the given container which iterates in reverse.
484template <typename ContainerTy> auto reverse(ContainerTy &&C) {
485 if constexpr (has_rbegin<ContainerTy>::value)
486 return make_range(C.rbegin(), C.rend());
487 else
488 return make_range(std::make_reverse_iterator(std::end(C)),
489 std::make_reverse_iterator(std::begin(C)));
490}
491
492/// An iterator adaptor that filters the elements of given inner iterators.
493///
494/// The predicate parameter should be a callable object that accepts the wrapped
495/// iterator's reference type and returns a bool. When incrementing or
496/// decrementing the iterator, it will call the predicate on each element and
497/// skip any where it returns false.
498///
499/// \code
500/// int A[] = { 1, 2, 3, 4 };
501/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
502/// // R contains { 1, 3 }.
503/// \endcode
504///
505/// Note: filter_iterator_base implements support for forward iteration.
506/// filter_iterator_impl exists to provide support for bidirectional iteration,
507/// conditional on whether the wrapped iterator supports it.
508template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
509class filter_iterator_base
510 : public iterator_adaptor_base<
511 filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
512 WrappedIteratorT,
513 std::common_type_t<IterTag,
514 typename std::iterator_traits<
515 WrappedIteratorT>::iterator_category>> {
516 using BaseT = typename filter_iterator_base::iterator_adaptor_base;
517
518protected:
519 WrappedIteratorT End;
520 PredicateT Pred;
521
522 void findNextValid() {
523 while (this->I != End && !Pred(*this->I))
524 BaseT::operator++();
525 }
526
527 filter_iterator_base() = default;
528
529 // Construct the iterator. The begin iterator needs to know where the end
530 // is, so that it can properly stop when it gets there. The end iterator only
531 // needs the predicate to support bidirectional iteration.
532 filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
533 PredicateT Pred)
534 : BaseT(Begin), End(End), Pred(Pred) {
535 findNextValid();
536 }
537
538public:
539 using BaseT::operator++;
540
541 filter_iterator_base &operator++() {
542 BaseT::operator++();
543 findNextValid();
544 return *this;
545 }
546
547 decltype(auto) operator*() const {
548 assert(BaseT::wrapped() != End && "Cannot dereference end iterator!")(static_cast <bool> (BaseT::wrapped() != End &&
"Cannot dereference end iterator!") ? void (0) : __assert_fail
("BaseT::wrapped() != End && \"Cannot dereference end iterator!\""
, "llvm/include/llvm/ADT/STLExtras.h", 548, __extension__ __PRETTY_FUNCTION__
))
;
549 return BaseT::operator*();
550 }
551
552 decltype(auto) operator->() const {
553 assert(BaseT::wrapped() != End && "Cannot dereference end iterator!")(static_cast <bool> (BaseT::wrapped() != End &&
"Cannot dereference end iterator!") ? void (0) : __assert_fail
("BaseT::wrapped() != End && \"Cannot dereference end iterator!\""
, "llvm/include/llvm/ADT/STLExtras.h", 553, __extension__ __PRETTY_FUNCTION__
))
;
554 return BaseT::operator->();
555 }
556};
557
558/// Specialization of filter_iterator_base for forward iteration only.
559template <typename WrappedIteratorT, typename PredicateT,
560 typename IterTag = std::forward_iterator_tag>
561class filter_iterator_impl
562 : public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
563public:
564 filter_iterator_impl() = default;
565
566 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
567 PredicateT Pred)
568 : filter_iterator_impl::filter_iterator_base(Begin, End, Pred) {}
569};
570
571/// Specialization of filter_iterator_base for bidirectional iteration.
572template <typename WrappedIteratorT, typename PredicateT>
573class filter_iterator_impl<WrappedIteratorT, PredicateT,
574 std::bidirectional_iterator_tag>
575 : public filter_iterator_base<WrappedIteratorT, PredicateT,
576 std::bidirectional_iterator_tag> {
577 using BaseT = typename filter_iterator_impl::filter_iterator_base;
578
579 void findPrevValid() {
580 while (!this->Pred(*this->I))
581 BaseT::operator--();
582 }
583
584public:
585 using BaseT::operator--;
586
587 filter_iterator_impl() = default;
588
589 filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
590 PredicateT Pred)
591 : BaseT(Begin, End, Pred) {}
592
593 filter_iterator_impl &operator--() {
594 BaseT::operator--();
595 findPrevValid();
596 return *this;
597 }
598};
599
600namespace detail {
601
602template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
603 using type = std::forward_iterator_tag;
604};
605
606template <> struct fwd_or_bidi_tag_impl<true> {
607 using type = std::bidirectional_iterator_tag;
608};
609
610/// Helper which sets its type member to forward_iterator_tag if the category
611/// of \p IterT does not derive from bidirectional_iterator_tag, and to
612/// bidirectional_iterator_tag otherwise.
613template <typename IterT> struct fwd_or_bidi_tag {
614 using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
615 std::bidirectional_iterator_tag,
616 typename std::iterator_traits<IterT>::iterator_category>::value>::type;
617};
618
619} // namespace detail
620
621/// Defines filter_iterator to a suitable specialization of
622/// filter_iterator_impl, based on the underlying iterator's category.
623template <typename WrappedIteratorT, typename PredicateT>
624using filter_iterator = filter_iterator_impl<
625 WrappedIteratorT, PredicateT,
626 typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;
627
628/// Convenience function that takes a range of elements and a predicate,
629/// and return a new filter_iterator range.
630///
631/// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the
632/// lifetime of that temporary is not kept by the returned range object, and the
633/// temporary is going to be dropped on the floor after the make_iterator_range
634/// full expression that contains this function call.
635template <typename RangeT, typename PredicateT>
636iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
637make_filter_range(RangeT &&Range, PredicateT Pred) {
638 using FilterIteratorT =
639 filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
640 return make_range(
641 FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
642 std::end(std::forward<RangeT>(Range)), Pred),
643 FilterIteratorT(std::end(std::forward<RangeT>(Range)),
644 std::end(std::forward<RangeT>(Range)), Pred));
645}
646
647/// A pseudo-iterator adaptor that is designed to implement "early increment"
648/// style loops.
649///
650/// This is *not a normal iterator* and should almost never be used directly. It
651/// is intended primarily to be used with range based for loops and some range
652/// algorithms.
653///
654/// The iterator isn't quite an `OutputIterator` or an `InputIterator` but
655/// somewhere between them. The constraints of these iterators are:
656///
657/// - On construction or after being incremented, it is comparable and
658/// dereferencable. It is *not* incrementable.
659/// - After being dereferenced, it is neither comparable nor dereferencable, it
660/// is only incrementable.
661///
662/// This means you can only dereference the iterator once, and you can only
663/// increment it once between dereferences.
664template <typename WrappedIteratorT>
665class early_inc_iterator_impl
666 : public iterator_adaptor_base<early_inc_iterator_impl<WrappedIteratorT>,
667 WrappedIteratorT, std::input_iterator_tag> {
668 using BaseT = typename early_inc_iterator_impl::iterator_adaptor_base;
669
670 using PointerT = typename std::iterator_traits<WrappedIteratorT>::pointer;
671
672protected:
673#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
674 bool IsEarlyIncremented = false;
675#endif
676
677public:
678 early_inc_iterator_impl(WrappedIteratorT I) : BaseT(I) {}
679
680 using BaseT::operator*;
681 decltype(*std::declval<WrappedIteratorT>()) operator*() {
682#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
683 assert(!IsEarlyIncremented && "Cannot dereference twice!")(static_cast <bool> (!IsEarlyIncremented && "Cannot dereference twice!"
) ? void (0) : __assert_fail ("!IsEarlyIncremented && \"Cannot dereference twice!\""
, "llvm/include/llvm/ADT/STLExtras.h", 683, __extension__ __PRETTY_FUNCTION__
))
;
684 IsEarlyIncremented = true;
685#endif
686 return *(this->I)++;
687 }
688
689 using BaseT::operator++;
690 early_inc_iterator_impl &operator++() {
691#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
692 assert(IsEarlyIncremented && "Cannot increment before dereferencing!")(static_cast <bool> (IsEarlyIncremented && "Cannot increment before dereferencing!"
) ? void (0) : __assert_fail ("IsEarlyIncremented && \"Cannot increment before dereferencing!\""
, "llvm/include/llvm/ADT/STLExtras.h", 692, __extension__ __PRETTY_FUNCTION__
))
;
693 IsEarlyIncremented = false;
694#endif
695 return *this;
696 }
697
698 friend bool operator==(const early_inc_iterator_impl &LHS,
699 const early_inc_iterator_impl &RHS) {
700#if LLVM_ENABLE_ABI_BREAKING_CHECKS1
701 assert(!LHS.IsEarlyIncremented && "Cannot compare after dereferencing!")(static_cast <bool> (!LHS.IsEarlyIncremented &&
"Cannot compare after dereferencing!") ? void (0) : __assert_fail
("!LHS.IsEarlyIncremented && \"Cannot compare after dereferencing!\""
, "llvm/include/llvm/ADT/STLExtras.h", 701, __extension__ __PRETTY_FUNCTION__
))
;
702#endif
703 return (const BaseT &)LHS == (const BaseT &)RHS;
704 }
705};
706
707/// Make a range that does early increment to allow mutation of the underlying
708/// range without disrupting iteration.
709///
710/// The underlying iterator will be incremented immediately after it is
711/// dereferenced, allowing deletion of the current node or insertion of nodes to
712/// not disrupt iteration provided they do not invalidate the *next* iterator --
713/// the current iterator can be invalidated.
714///
715/// This requires a very exact pattern of use that is only really suitable to
716/// range based for loops and other range algorithms that explicitly guarantee
717/// to dereference exactly once each element, and to increment exactly once each
718/// element.
719template <typename RangeT>
720iterator_range<early_inc_iterator_impl<detail::IterOfRange<RangeT>>>
721make_early_inc_range(RangeT &&Range) {
722 using EarlyIncIteratorT =
723 early_inc_iterator_impl<detail::IterOfRange<RangeT>>;
724 return make_range(EarlyIncIteratorT(std::begin(std::forward<RangeT>(Range))),
725 EarlyIncIteratorT(std::end(std::forward<RangeT>(Range))));
726}
727
728// Forward declarations required by zip_shortest/zip_equal/zip_first/zip_longest
729template <typename R, typename UnaryPredicate>
730bool all_of(R &&range, UnaryPredicate P);
731
732template <typename R, typename UnaryPredicate>
733bool any_of(R &&range, UnaryPredicate P);
734
735template <typename T> bool all_equal(std::initializer_list<T> Values);
736
737namespace detail {
738
739using std::declval;
740
741// We have to alias this since inlining the actual type at the usage site
742// in the parameter list of iterator_facade_base<> below ICEs MSVC 2017.
743template<typename... Iters> struct ZipTupleType {
744 using type = std::tuple<decltype(*declval<Iters>())...>;
745};
746
747template <typename ZipType, typename... Iters>
748using zip_traits = iterator_facade_base<
749 ZipType,
750 std::common_type_t<
751 std::bidirectional_iterator_tag,
752 typename std::iterator_traits<Iters>::iterator_category...>,
753 // ^ TODO: Implement random access methods.
754 typename ZipTupleType<Iters...>::type,
755 typename std::iterator_traits<
756 std::tuple_element_t<0, std::tuple<Iters...>>>::difference_type,
757 // ^ FIXME: This follows boost::make_zip_iterator's assumption that all
758 // inner iterators have the same difference_type. It would fail if, for
759 // instance, the second field's difference_type were non-numeric while the
760 // first is.
761 typename ZipTupleType<Iters...>::type *,
762 typename ZipTupleType<Iters...>::type>;
763
764template <typename ZipType, typename... Iters>
765struct zip_common : public zip_traits<ZipType, Iters...> {
766 using Base = zip_traits<ZipType, Iters...>;
767 using value_type = typename Base::value_type;
768
769 std::tuple<Iters...> iterators;
770
771protected:
772 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
773 return value_type(*std::get<Ns>(iterators)...);
774 }
775
776 template <size_t... Ns>
777 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
778 return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...);
779 }
780
781 template <size_t... Ns>
782 decltype(iterators) tup_dec(std::index_sequence<Ns...>) const {
783 return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...);
784 }
785
786 template <size_t... Ns>
787 bool test_all_equals(const zip_common &other,
788 std::index_sequence<Ns...>) const {
789 return ((std::get<Ns>(this->iterators) == std::get<Ns>(other.iterators)) &&
790 ...);
791 }
792
793public:
794 zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {}
795
796 value_type operator*() const {
797 return deref(std::index_sequence_for<Iters...>{});
798 }
799
800 ZipType &operator++() {
801 iterators = tup_inc(std::index_sequence_for<Iters...>{});
802 return *reinterpret_cast<ZipType *>(this);
803 }
804
805 ZipType &operator--() {
806 static_assert(Base::IsBidirectional,
807 "All inner iterators must be at least bidirectional.");
808 iterators = tup_dec(std::index_sequence_for<Iters...>{});
809 return *reinterpret_cast<ZipType *>(this);
810 }
811
812 /// Return true if all the iterator are matching `other`'s iterators.
813 bool all_equals(zip_common &other) {
814 return test_all_equals(other, std::index_sequence_for<Iters...>{});
815 }
816};
817
818template <typename... Iters>
819struct zip_first : public zip_common<zip_first<Iters...>, Iters...> {
820 using Base = zip_common<zip_first<Iters...>, Iters...>;
821
822 bool operator==(const zip_first<Iters...> &other) const {
823 return std::get<0>(this->iterators) == std::get<0>(other.iterators);
824 }
825
826 zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
827};
828
829template <typename... Iters>
830class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> {
831 template <size_t... Ns>
832 bool test(const zip_shortest<Iters...> &other,
833 std::index_sequence<Ns...>) const {
834 return ((std::get<Ns>(this->iterators) != std::get<Ns>(other.iterators)) &&
835 ...);
836 }
837
838public:
839 using Base = zip_common<zip_shortest<Iters...>, Iters...>;
840
841 zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {}
842
843 bool operator==(const zip_shortest<Iters...> &other) const {
844 return !test(other, std::index_sequence_for<Iters...>{});
845 }
846};
847
848template <template <typename...> class ItType, typename... Args> class zippy {
849public:
850 using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>;
851 using iterator_category = typename iterator::iterator_category;
852 using value_type = typename iterator::value_type;
853 using difference_type = typename iterator::difference_type;
854 using pointer = typename iterator::pointer;
855 using reference = typename iterator::reference;
856
857private:
858 std::tuple<Args...> ts;
859
860 template <size_t... Ns>
861 iterator begin_impl(std::index_sequence<Ns...>) const {
862 return iterator(std::begin(std::get<Ns>(ts))...);
863 }
864 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
865 return iterator(std::end(std::get<Ns>(ts))...);
866 }
867
868public:
869 zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
870
871 iterator begin() const {
872 return begin_impl(std::index_sequence_for<Args...>{});
873 }
874 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
875};
876
877} // end namespace detail
878
879/// zip iterator for two or more iteratable types. Iteration continues until the
880/// end of the *shortest* iteratee is reached.
881template <typename T, typename U, typename... Args>
882detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u,
883 Args &&...args) {
884 return detail::zippy<detail::zip_shortest, T, U, Args...>(
885 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
886}
887
888/// zip iterator that assumes that all iteratees have the same length.
889/// In builds with assertions on, this assumption is checked before the
890/// iteration starts.
891template <typename T, typename U, typename... Args>
892detail::zippy<detail::zip_first, T, U, Args...> zip_equal(T &&t, U &&u,
893 Args &&...args) {
894 assert(all_equal({std::distance(adl_begin(t), adl_end(t)),(static_cast <bool> (all_equal({std::distance(adl_begin
(t), adl_end(t)), std::distance(adl_begin(u), adl_end(u)), std
::distance(adl_begin(args), adl_end(args))...}) && "Iteratees do not have equal length"
) ? void (0) : __assert_fail ("all_equal({std::distance(adl_begin(t), adl_end(t)), std::distance(adl_begin(u), adl_end(u)), std::distance(adl_begin(args), adl_end(args))...}) && \"Iteratees do not have equal length\""
, "llvm/include/llvm/ADT/STLExtras.h", 897, __extension__ __PRETTY_FUNCTION__
))
895 std::distance(adl_begin(u), adl_end(u)),(static_cast <bool> (all_equal({std::distance(adl_begin
(t), adl_end(t)), std::distance(adl_begin(u), adl_end(u)), std
::distance(adl_begin(args), adl_end(args))...}) && "Iteratees do not have equal length"
) ? void (0) : __assert_fail ("all_equal({std::distance(adl_begin(t), adl_end(t)), std::distance(adl_begin(u), adl_end(u)), std::distance(adl_begin(args), adl_end(args))...}) && \"Iteratees do not have equal length\""
, "llvm/include/llvm/ADT/STLExtras.h", 897, __extension__ __PRETTY_FUNCTION__
))
896 std::distance(adl_begin(args), adl_end(args))...}) &&(static_cast <bool> (all_equal({std::distance(adl_begin
(t), adl_end(t)), std::distance(adl_begin(u), adl_end(u)), std
::distance(adl_begin(args), adl_end(args))...}) && "Iteratees do not have equal length"
) ? void (0) : __assert_fail ("all_equal({std::distance(adl_begin(t), adl_end(t)), std::distance(adl_begin(u), adl_end(u)), std::distance(adl_begin(args), adl_end(args))...}) && \"Iteratees do not have equal length\""
, "llvm/include/llvm/ADT/STLExtras.h", 897, __extension__ __PRETTY_FUNCTION__
))
897 "Iteratees do not have equal length")(static_cast <bool> (all_equal({std::distance(adl_begin
(t), adl_end(t)), std::distance(adl_begin(u), adl_end(u)), std
::distance(adl_begin(args), adl_end(args))...}) && "Iteratees do not have equal length"
) ? void (0) : __assert_fail ("all_equal({std::distance(adl_begin(t), adl_end(t)), std::distance(adl_begin(u), adl_end(u)), std::distance(adl_begin(args), adl_end(args))...}) && \"Iteratees do not have equal length\""
, "llvm/include/llvm/ADT/STLExtras.h", 897, __extension__ __PRETTY_FUNCTION__
))
;
898 return detail::zippy<detail::zip_first, T, U, Args...>(
899 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
900}
901
902/// zip iterator that, for the sake of efficiency, assumes the first iteratee to
903/// be the shortest. Iteration continues until the end of the first iteratee is
904/// reached. In builds with assertions on, we check that the assumption about
905/// the first iteratee being the shortest holds.
906template <typename T, typename U, typename... Args>
907detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u,
908 Args &&...args) {
909 assert(std::distance(adl_begin(t), adl_end(t)) <=(static_cast <bool> (std::distance(adl_begin(t), adl_end
(t)) <= std::min({std::distance(adl_begin(u), adl_end(u)),
std::distance(adl_begin(args), adl_end(args))...}) &&
"First iteratee is not the shortest") ? void (0) : __assert_fail
("std::distance(adl_begin(t), adl_end(t)) <= std::min({std::distance(adl_begin(u), adl_end(u)), std::distance(adl_begin(args), adl_end(args))...}) && \"First iteratee is not the shortest\""
, "llvm/include/llvm/ADT/STLExtras.h", 912, __extension__ __PRETTY_FUNCTION__
))
910 std::min({std::distance(adl_begin(u), adl_end(u)),(static_cast <bool> (std::distance(adl_begin(t), adl_end
(t)) <= std::min({std::distance(adl_begin(u), adl_end(u)),
std::distance(adl_begin(args), adl_end(args))...}) &&
"First iteratee is not the shortest") ? void (0) : __assert_fail
("std::distance(adl_begin(t), adl_end(t)) <= std::min({std::distance(adl_begin(u), adl_end(u)), std::distance(adl_begin(args), adl_end(args))...}) && \"First iteratee is not the shortest\""
, "llvm/include/llvm/ADT/STLExtras.h", 912, __extension__ __PRETTY_FUNCTION__
))
911 std::distance(adl_begin(args), adl_end(args))...}) &&(static_cast <bool> (std::distance(adl_begin(t), adl_end
(t)) <= std::min({std::distance(adl_begin(u), adl_end(u)),
std::distance(adl_begin(args), adl_end(args))...}) &&
"First iteratee is not the shortest") ? void (0) : __assert_fail
("std::distance(adl_begin(t), adl_end(t)) <= std::min({std::distance(adl_begin(u), adl_end(u)), std::distance(adl_begin(args), adl_end(args))...}) && \"First iteratee is not the shortest\""
, "llvm/include/llvm/ADT/STLExtras.h", 912, __extension__ __PRETTY_FUNCTION__
))
912 "First iteratee is not the shortest")(static_cast <bool> (std::distance(adl_begin(t), adl_end
(t)) <= std::min({std::distance(adl_begin(u), adl_end(u)),
std::distance(adl_begin(args), adl_end(args))...}) &&
"First iteratee is not the shortest") ? void (0) : __assert_fail
("std::distance(adl_begin(t), adl_end(t)) <= std::min({std::distance(adl_begin(u), adl_end(u)), std::distance(adl_begin(args), adl_end(args))...}) && \"First iteratee is not the shortest\""
, "llvm/include/llvm/ADT/STLExtras.h", 912, __extension__ __PRETTY_FUNCTION__
))
;
913
914 return detail::zippy<detail::zip_first, T, U, Args...>(
915 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
916}
917
918namespace detail {
919template <typename Iter>
920Iter next_or_end(const Iter &I, const Iter &End) {
921 if (I == End)
922 return End;
923 return std::next(I);
924}
925
926template <typename Iter>
927auto deref_or_none(const Iter &I, const Iter &End) -> std::optional<
928 std::remove_const_t<std::remove_reference_t<decltype(*I)>>> {
929 if (I == End)
930 return std::nullopt;
931 return *I;
932}
933
934template <typename Iter> struct ZipLongestItemType {
935 using type = std::optional<std::remove_const_t<
936 std::remove_reference_t<decltype(*std::declval<Iter>())>>>;
937};
938
939template <typename... Iters> struct ZipLongestTupleType {
940 using type = std::tuple<typename ZipLongestItemType<Iters>::type...>;
941};
942
943template <typename... Iters>
944class zip_longest_iterator
945 : public iterator_facade_base<
946 zip_longest_iterator<Iters...>,
947 std::common_type_t<
948 std::forward_iterator_tag,
949 typename std::iterator_traits<Iters>::iterator_category...>,
950 typename ZipLongestTupleType<Iters...>::type,
951 typename std::iterator_traits<
952 std::tuple_element_t<0, std::tuple<Iters...>>>::difference_type,
953 typename ZipLongestTupleType<Iters...>::type *,
954 typename ZipLongestTupleType<Iters...>::type> {
955public:
956 using value_type = typename ZipLongestTupleType<Iters...>::type;
957
958private:
959 std::tuple<Iters...> iterators;
960 std::tuple<Iters...> end_iterators;
961
962 template <size_t... Ns>
963 bool test(const zip_longest_iterator<Iters...> &other,
964 std::index_sequence<Ns...>) const {
965 return ((std::get<Ns>(this->iterators) != std::get<Ns>(other.iterators)) ||
966 ...);
967 }
968
969 template <size_t... Ns> value_type deref(std::index_sequence<Ns...>) const {
970 return value_type(
971 deref_or_none(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
972 }
973
974 template <size_t... Ns>
975 decltype(iterators) tup_inc(std::index_sequence<Ns...>) const {
976 return std::tuple<Iters...>(
977 next_or_end(std::get<Ns>(iterators), std::get<Ns>(end_iterators))...);
978 }
979
980public:
981 zip_longest_iterator(std::pair<Iters &&, Iters &&>... ts)
982 : iterators(std::forward<Iters>(ts.first)...),
983 end_iterators(std::forward<Iters>(ts.second)...) {}
984
985 value_type operator*() const {
986 return deref(std::index_sequence_for<Iters...>{});
987 }
988
989 zip_longest_iterator<Iters...> &operator++() {
990 iterators = tup_inc(std::index_sequence_for<Iters...>{});
991 return *this;
992 }
993
994 bool operator==(const zip_longest_iterator<Iters...> &other) const {
995 return !test(other, std::index_sequence_for<Iters...>{});
996 }
997};
998
999template <typename... Args> class zip_longest_range {
1000public:
1001 using iterator =
1002 zip_longest_iterator<decltype(adl_begin(std::declval<Args>()))...>;
1003 using iterator_category = typename iterator::iterator_category;
1004 using value_type = typename iterator::value_type;
1005 using difference_type = typename iterator::difference_type;
1006 using pointer = typename iterator::pointer;
1007 using reference = typename iterator::reference;
1008
1009private:
1010 std::tuple<Args...> ts;
1011
1012 template <size_t... Ns>
1013 iterator begin_impl(std::index_sequence<Ns...>) const {
1014 return iterator(std::make_pair(adl_begin(std::get<Ns>(ts)),
1015 adl_end(std::get<Ns>(ts)))...);
1016 }
1017
1018 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
1019 return iterator(std::make_pair(adl_end(std::get<Ns>(ts)),
1020 adl_end(std::get<Ns>(ts)))...);
1021 }
1022
1023public:
1024 zip_longest_range(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {}
1025
1026 iterator begin() const {
1027 return begin_impl(std::index_sequence_for<Args...>{});
1028 }
1029 iterator end() const { return end_impl(std::index_sequence_for<Args...>{}); }
1030};
1031} // namespace detail
1032
1033/// Iterate over two or more iterators at the same time. Iteration continues
1034/// until all iterators reach the end. The std::optional only contains a value
1035/// if the iterator has not reached the end.
1036template <typename T, typename U, typename... Args>
1037detail::zip_longest_range<T, U, Args...> zip_longest(T &&t, U &&u,
1038 Args &&... args) {
1039 return detail::zip_longest_range<T, U, Args...>(
1040 std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...);
1041}
1042
1043/// Iterator wrapper that concatenates sequences together.
1044///
1045/// This can concatenate different iterators, even with different types, into
1046/// a single iterator provided the value types of all the concatenated
1047/// iterators expose `reference` and `pointer` types that can be converted to
1048/// `ValueT &` and `ValueT *` respectively. It doesn't support more
1049/// interesting/customized pointer or reference types.
1050///
1051/// Currently this only supports forward or higher iterator categories as
1052/// inputs and always exposes a forward iterator interface.
1053template <typename ValueT, typename... IterTs>
1054class concat_iterator
1055 : public iterator_facade_base<concat_iterator<ValueT, IterTs...>,
1056 std::forward_iterator_tag, ValueT> {
1057 using BaseT = typename concat_iterator::iterator_facade_base;
1058
1059 /// We store both the current and end iterators for each concatenated
1060 /// sequence in a tuple of pairs.
1061 ///
1062 /// Note that something like iterator_range seems nice at first here, but the
1063 /// range properties are of little benefit and end up getting in the way
1064 /// because we need to do mutation on the current iterators.
1065 std::tuple<IterTs...> Begins;
1066 std::tuple<IterTs...> Ends;
1067
1068 /// Attempts to increment a specific iterator.
1069 ///
1070 /// Returns true if it was able to increment the iterator. Returns false if
1071 /// the iterator is already at the end iterator.
1072 template <size_t Index> bool incrementHelper() {
1073 auto &Begin = std::get<Index>(Begins);
1074 auto &End = std::get<Index>(Ends);
1075 if (Begin == End)
1076 return false;
1077
1078 ++Begin;
1079 return true;
1080 }
1081
1082 /// Increments the first non-end iterator.
1083 ///
1084 /// It is an error to call this with all iterators at the end.
1085 template <size_t... Ns> void increment(std::index_sequence<Ns...>) {
1086 // Build a sequence of functions to increment each iterator if possible.
1087 bool (concat_iterator::*IncrementHelperFns[])() = {
1088 &concat_iterator::incrementHelper<Ns>...};
1089
1090 // Loop over them, and stop as soon as we succeed at incrementing one.
1091 for (auto &IncrementHelperFn : IncrementHelperFns)
1092 if ((this->*IncrementHelperFn)())
1093 return;
1094
1095 llvm_unreachable("Attempted to increment an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to increment an end concat iterator!"
, "llvm/include/llvm/ADT/STLExtras.h", 1095)
;
1096 }
1097
1098 /// Returns null if the specified iterator is at the end. Otherwise,
1099 /// dereferences the iterator and returns the address of the resulting
1100 /// reference.
1101 template <size_t Index> ValueT *getHelper() const {
1102 auto &Begin = std::get<Index>(Begins);
1103 auto &End = std::get<Index>(Ends);
1104 if (Begin == End)
1105 return nullptr;
1106
1107 return &*Begin;
1108 }
1109
1110 /// Finds the first non-end iterator, dereferences, and returns the resulting
1111 /// reference.
1112 ///
1113 /// It is an error to call this with all iterators at the end.
1114 template <size_t... Ns> ValueT &get(std::index_sequence<Ns...>) const {
1115 // Build a sequence of functions to get from iterator if possible.
1116 ValueT *(concat_iterator::*GetHelperFns[])() const = {
1117 &concat_iterator::getHelper<Ns>...};
1118
1119 // Loop over them, and return the first result we find.
1120 for (auto &GetHelperFn : GetHelperFns)
1121 if (ValueT *P = (this->*GetHelperFn)())
1122 return *P;
1123
1124 llvm_unreachable("Attempted to get a pointer from an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to get a pointer from an end concat iterator!"
, "llvm/include/llvm/ADT/STLExtras.h", 1124)
;
1125 }
1126
1127public:
1128 /// Constructs an iterator from a sequence of ranges.
1129 ///
1130 /// We need the full range to know how to switch between each of the
1131 /// iterators.
1132 template <typename... RangeTs>
1133 explicit concat_iterator(RangeTs &&... Ranges)
1134 : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
1135
1136 using BaseT::operator++;
1137
1138 concat_iterator &operator++() {
1139 increment(std::index_sequence_for<IterTs...>());
1140 return *this;
1141 }
1142
1143 ValueT &operator*() const {
1144 return get(std::index_sequence_for<IterTs...>());
1145 }
1146
1147 bool operator==(const concat_iterator &RHS) const {
1148 return Begins == RHS.Begins && Ends == RHS.Ends;
1149 }
1150};
1151
1152namespace detail {
1153
1154/// Helper to store a sequence of ranges being concatenated and access them.
1155///
1156/// This is designed to facilitate providing actual storage when temporaries
1157/// are passed into the constructor such that we can use it as part of range
1158/// based for loops.
1159template <typename ValueT, typename... RangeTs> class concat_range {
1160public:
1161 using iterator =
1162 concat_iterator<ValueT,
1163 decltype(std::begin(std::declval<RangeTs &>()))...>;
1164
1165private:
1166 std::tuple<RangeTs...> Ranges;
1167
1168 template <size_t... Ns>
1169 iterator begin_impl(std::index_sequence<Ns...>) {
1170 return iterator(std::get<Ns>(Ranges)...);
1171 }
1172 template <size_t... Ns>
1173 iterator begin_impl(std::index_sequence<Ns...>) const {
1174 return iterator(std::get<Ns>(Ranges)...);
1175 }
1176 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) {
1177 return iterator(make_range(std::end(std::get<Ns>(Ranges)),
1178 std::end(std::get<Ns>(Ranges)))...);
1179 }
1180 template <size_t... Ns> iterator end_impl(std::index_sequence<Ns...>) const {
1181 return iterator(make_range(std::end(std::get<Ns>(Ranges)),
1182 std::end(std::get<Ns>(Ranges)))...);
1183 }
1184
1185public:
1186 concat_range(RangeTs &&... Ranges)
1187 : Ranges(std::forward<RangeTs>(Ranges)...) {}
1188
1189 iterator begin() {
1190 return begin_impl(std::index_sequence_for<RangeTs...>{});
1191 }
1192 iterator begin() const {
1193 return begin_impl(std::index_sequence_for<RangeTs...>{});
1194 }
1195 iterator end() {
1196 return end_impl(std::index_sequence_for<RangeTs...>{});
1197 }
1198 iterator end() const {
1199 return end_impl(std::index_sequence_for<RangeTs...>{});
1200 }
1201};
1202
1203} // end namespace detail
1204
1205/// Concatenated range across two or more ranges.
1206///
1207/// The desired value type must be explicitly specified.
1208template <typename ValueT, typename... RangeTs>
1209detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
1210 static_assert(sizeof...(RangeTs) > 1,
1211 "Need more than one range to concatenate!");
1212 return detail::concat_range<ValueT, RangeTs...>(
1213 std::forward<RangeTs>(Ranges)...);
1214}
1215
1216/// A utility class used to implement an iterator that contains some base object
1217/// and an index. The iterator moves the index but keeps the base constant.
1218template <typename DerivedT, typename BaseT, typename T,
1219 typename PointerT = T *, typename ReferenceT = T &>
1220class indexed_accessor_iterator
1221 : public llvm::iterator_facade_base<DerivedT,
1222 std::random_access_iterator_tag, T,
1223 std::ptrdiff_t, PointerT, ReferenceT> {
1224public:
1225 ptrdiff_t operator-(const indexed_accessor_iterator &rhs) const {
1226 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "llvm/include/llvm/ADT/STLExtras.h", 1226, __extension__ __PRETTY_FUNCTION__
))
;
1227 return index - rhs.index;
1228 }
1229 bool operator==(const indexed_accessor_iterator &rhs) const {
1230 return base == rhs.base && index == rhs.index;
1231 }
1232 bool operator<(const indexed_accessor_iterator &rhs) const {
1233 assert(base == rhs.base && "incompatible iterators")(static_cast <bool> (base == rhs.base && "incompatible iterators"
) ? void (0) : __assert_fail ("base == rhs.base && \"incompatible iterators\""
, "llvm/include/llvm/ADT/STLExtras.h", 1233, __extension__ __PRETTY_FUNCTION__
))
;
1234 return index < rhs.index;
1235 }
1236
1237 DerivedT &operator+=(ptrdiff_t offset) {
1238 this->index += offset;
1239 return static_cast<DerivedT &>(*this);
1240 }
1241 DerivedT &operator-=(ptrdiff_t offset) {
1242 this->index -= offset;
1243 return static_cast<DerivedT &>(*this);
1244 }
1245
1246 /// Returns the current index of the iterator.
1247 ptrdiff_t getIndex() const { return index; }
1248
1249 /// Returns the current base of the iterator.
1250 const BaseT &getBase() const { return base; }
1251
1252protected:
1253 indexed_accessor_iterator(BaseT base, ptrdiff_t index)
1254 : base(base), index(index) {}
1255 BaseT base;
1256 ptrdiff_t index;
1257};
1258
1259namespace detail {
1260/// The class represents the base of a range of indexed_accessor_iterators. It
1261/// provides support for many different range functionalities, e.g.
1262/// drop_front/slice/etc.. Derived range classes must implement the following
1263/// static methods:
1264/// * ReferenceT dereference_iterator(const BaseT &base, ptrdiff_t index)
1265/// - Dereference an iterator pointing to the base object at the given
1266/// index.
1267/// * BaseT offset_base(const BaseT &base, ptrdiff_t index)
1268/// - Return a new base that is offset from the provide base by 'index'
1269/// elements.
1270template <typename DerivedT, typename BaseT, typename T,
1271 typename PointerT = T *, typename ReferenceT = T &>
1272class indexed_accessor_range_base {
1273public:
1274 using RangeBaseT = indexed_accessor_range_base;
1275
1276 /// An iterator element of this range.
1277 class iterator : public indexed_accessor_iterator<iterator, BaseT, T,
1278 PointerT, ReferenceT> {
1279 public:
1280 // Index into this iterator, invoking a static method on the derived type.
1281 ReferenceT operator*() const {
1282 return DerivedT::dereference_iterator(this->getBase(), this->getIndex());
1283 }
1284
1285 private:
1286 iterator(BaseT owner, ptrdiff_t curIndex)
1287 : iterator::indexed_accessor_iterator(owner, curIndex) {}
1288
1289 /// Allow access to the constructor.
1290 friend indexed_accessor_range_base<DerivedT, BaseT, T, PointerT,
1291 ReferenceT>;
1292 };
1293
1294 indexed_accessor_range_base(iterator begin, iterator end)
1295 : base(offset_base(begin.getBase(), begin.getIndex())),
1296 count(end.getIndex() - begin.getIndex()) {}
1297 indexed_accessor_range_base(const iterator_range<iterator> &range)
1298 : indexed_accessor_range_base(range.begin(), range.end()) {}
1299 indexed_accessor_range_base(BaseT base, ptrdiff_t count)
1300 : base(base), count(count) {}
1301
1302 iterator begin() const { return iterator(base, 0); }
1303 iterator end() const { return iterator(base, count); }
1304 ReferenceT operator[](size_t Index) const {
1305 assert(Index < size() && "invalid index for value range")(static_cast <bool> (Index < size() && "invalid index for value range"
) ? void (0) : __assert_fail ("Index < size() && \"invalid index for value range\""
, "llvm/include/llvm/ADT/STLExtras.h", 1305, __extension__ __PRETTY_FUNCTION__
))
;
1306 return DerivedT::dereference_iterator(base, static_cast<ptrdiff_t>(Index));
1307 }
1308 ReferenceT front() const {
1309 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "llvm/include/llvm/ADT/STLExtras.h", 1309, __extension__ __PRETTY_FUNCTION__
))
;
1310 return (*this)[0];
1311 }
1312 ReferenceT back() const {
1313 assert(!empty() && "expected non-empty range")(static_cast <bool> (!empty() && "expected non-empty range"
) ? void (0) : __assert_fail ("!empty() && \"expected non-empty range\""
, "llvm/include/llvm/ADT/STLExtras.h", 1313, __extension__ __PRETTY_FUNCTION__
))
;
1314 return (*this)[size() - 1];
1315 }
1316
1317 /// Compare this range with another.
1318 template <typename OtherT>
1319 friend bool operator==(const indexed_accessor_range_base &lhs,
1320 const OtherT &rhs) {
1321 return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
1322 }
1323 template <typename OtherT>
1324 friend bool operator!=(const indexed_accessor_range_base &lhs,
1325 const OtherT &rhs) {
1326 return !(lhs == rhs);
1327 }
1328
1329 /// Return the size of this range.
1330 size_t size() const { return count; }
1331
1332 /// Return if the range is empty.
1333 bool empty() const { return size() == 0; }
1334
1335 /// Drop the first N elements, and keep M elements.
1336 DerivedT slice(size_t n, size_t m) const {
1337 assert(n + m <= size() && "invalid size specifiers")(static_cast <bool> (n + m <= size() && "invalid size specifiers"
) ? void (0) : __assert_fail ("n + m <= size() && \"invalid size specifiers\""
, "llvm/include/llvm/ADT/STLExtras.h", 1337, __extension__ __PRETTY_FUNCTION__
))
;
1338 return DerivedT(offset_base(base, n), m);
1339 }
1340
1341 /// Drop the first n elements.
1342 DerivedT drop_front(size_t n = 1) const {
1343 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "llvm/include/llvm/ADT/STLExtras.h", 1343, __extension__ __PRETTY_FUNCTION__
))
;
1344 return slice(n, size() - n);
1345 }
1346 /// Drop the last n elements.
1347 DerivedT drop_back(size_t n = 1) const {
1348 assert(size() >= n && "Dropping more elements than exist")(static_cast <bool> (size() >= n && "Dropping more elements than exist"
) ? void (0) : __assert_fail ("size() >= n && \"Dropping more elements than exist\""
, "llvm/include/llvm/ADT/STLExtras.h", 1348, __extension__ __PRETTY_FUNCTION__
))
;
1349 return DerivedT(base, size() - n);
1350 }
1351
1352 /// Take the first n elements.
1353 DerivedT take_front(size_t n = 1) const {
1354 return n < size() ? drop_back(size() - n)
1355 : static_cast<const DerivedT &>(*this);
1356 }
1357
1358 /// Take the last n elements.
1359 DerivedT take_back(size_t n = 1) const {
1360 return n < size() ? drop_front(size() - n)
1361 : static_cast<const DerivedT &>(*this);
1362 }
1363
1364 /// Allow conversion to any type accepting an iterator_range.
1365 template <typename RangeT, typename = std::enable_if_t<std::is_constructible<
1366 RangeT, iterator_range<iterator>>::value>>
1367 operator RangeT() const {
1368 return RangeT(iterator_range<iterator>(*this));
1369 }
1370
1371 /// Returns the base of this range.
1372 const BaseT &getBase() const { return base; }
1373
1374private:
1375 /// Offset the given base by the given amount.
1376 static BaseT offset_base(const BaseT &base, size_t n) {
1377 return n == 0 ? base : DerivedT::offset_base(base, n);
1378 }
1379
1380protected:
1381 indexed_accessor_range_base(const indexed_accessor_range_base &) = default;
1382 indexed_accessor_range_base(indexed_accessor_range_base &&) = default;
1383 indexed_accessor_range_base &
1384 operator=(const indexed_accessor_range_base &) = default;
1385
1386 /// The base that owns the provided range of values.
1387 BaseT base;
1388 /// The size from the owning range.
1389 ptrdiff_t count;
1390};
1391} // end namespace detail
1392
1393/// This class provides an implementation of a range of
1394/// indexed_accessor_iterators where the base is not indexable. Ranges with
1395/// bases that are offsetable should derive from indexed_accessor_range_base
1396/// instead. Derived range classes are expected to implement the following
1397/// static method:
1398/// * ReferenceT dereference(const BaseT &base, ptrdiff_t index)
1399/// - Dereference an iterator pointing to a parent base at the given index.
1400template <typename DerivedT, typename BaseT, typename T,
1401 typename PointerT = T *, typename ReferenceT = T &>
1402class indexed_accessor_range
1403 : public detail::indexed_accessor_range_base<
1404 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT> {
1405public:
1406 indexed_accessor_range(BaseT base, ptrdiff_t startIndex, ptrdiff_t count)
1407 : detail::indexed_accessor_range_base<
1408 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT, ReferenceT>(
1409 std::make_pair(base, startIndex), count) {}
1410 using detail::indexed_accessor_range_base<
1411 DerivedT, std::pair<BaseT, ptrdiff_t>, T, PointerT,
1412 ReferenceT>::indexed_accessor_range_base;
1413
1414 /// Returns the current base of the range.
1415 const BaseT &getBase() const { return this->base.first; }
1416
1417 /// Returns the current start index of the range.
1418 ptrdiff_t getStartIndex() const { return this->base.second; }
1419
1420 /// See `detail::indexed_accessor_range_base` for details.
1421 static std::pair<BaseT, ptrdiff_t>
1422 offset_base(const std::pair<BaseT, ptrdiff_t> &base, ptrdiff_t index) {
1423 // We encode the internal base as a pair of the derived base and a start
1424 // index into the derived base.
1425 return std::make_pair(base.first, base.second + index);
1426 }
1427 /// See `detail::indexed_accessor_range_base` for details.
1428 static ReferenceT
1429 dereference_iterator(const std::pair<BaseT, ptrdiff_t> &base,
1430 ptrdiff_t index) {
1431 return DerivedT::dereference(base.first, base.second + index);
1432 }
1433};
1434
1435namespace detail {
1436/// Return a reference to the first or second member of a reference. Otherwise,
1437/// return a copy of the member of a temporary.
1438///
1439/// When passing a range whose iterators return values instead of references,
1440/// the reference must be dropped from `decltype((elt.first))`, which will
1441/// always be a reference, to avoid returning a reference to a temporary.
1442template <typename EltTy, typename FirstTy> class first_or_second_type {
1443public:
1444 using type = std::conditional_t<std::is_reference<EltTy>::value, FirstTy,
1445 std::remove_reference_t<FirstTy>>;
1446};
1447} // end namespace detail
1448
1449/// Given a container of pairs, return a range over the first elements.
1450template <typename ContainerTy> auto make_first_range(ContainerTy &&c) {
1451 using EltTy = decltype((*std::begin(c)));
1452 return llvm::map_range(std::forward<ContainerTy>(c),
1453 [](EltTy elt) -> typename detail::first_or_second_type<
1454 EltTy, decltype((elt.first))>::type {
1455 return elt.first;
1456 });
1457}
1458
1459/// Given a container of pairs, return a range over the second elements.
1460template <typename ContainerTy> auto make_second_range(ContainerTy &&c) {
1461 using EltTy = decltype((*std::begin(c)));
1462 return llvm::map_range(
1463 std::forward<ContainerTy>(c),
1464 [](EltTy elt) ->
1465 typename detail::first_or_second_type<EltTy,
1466 decltype((elt.second))>::type {
1467 return elt.second;
1468 });
1469}
1470
1471//===----------------------------------------------------------------------===//
1472// Extra additions to <utility>
1473//===----------------------------------------------------------------------===//
1474
1475/// Function object to check whether the first component of a std::pair
1476/// compares less than the first component of another std::pair.
1477struct less_first {
1478 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1479 return std::less<>()(lhs.first, rhs.first);
1480 }
1481};
1482
1483/// Function object to check whether the second component of a std::pair
1484/// compares less than the second component of another std::pair.
1485struct less_second {
1486 template <typename T> bool operator()(const T &lhs, const T &rhs) const {
1487 return std::less<>()(lhs.second, rhs.second);
1488 }
1489};
1490
1491/// \brief Function object to apply a binary function to the first component of
1492/// a std::pair.
1493template<typename FuncTy>
1494struct on_first {
1495 FuncTy func;
1496
1497 template <typename T>
1498 decltype(auto) operator()(const T &lhs, const T &rhs) const {
1499 return func(lhs.first, rhs.first);
1500 }
1501};
1502
1503/// Utility type to build an inheritance chain that makes it easy to rank
1504/// overload candidates.
1505template <int N> struct rank : rank<N - 1> {};
1506template <> struct rank<0> {};
1507
1508/// traits class for checking whether type T is one of any of the given
1509/// types in the variadic list.
1510template <typename T, typename... Ts>
1511using is_one_of = std::disjunction<std::is_same<T, Ts>...>;
1512
1513/// traits class for checking whether type T is a base class for all
1514/// the given types in the variadic list.
1515template <typename T, typename... Ts>
1516using are_base_of = std::conjunction<std::is_base_of<T, Ts>...>;
1517
1518namespace detail {
1519template <typename... Ts> struct Visitor;
1520
1521template <typename HeadT, typename... TailTs>
1522struct Visitor<HeadT, TailTs...> : remove_cvref_t<HeadT>, Visitor<TailTs...> {
1523 explicit constexpr Visitor(HeadT &&Head, TailTs &&...Tail)
1524 : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)),
1525 Visitor<TailTs...>(std::forward<TailTs>(Tail)...) {}
1526 using remove_cvref_t<HeadT>::operator();
1527 using Visitor<TailTs...>::operator();
1528};
1529
1530template <typename HeadT> struct Visitor<HeadT> : remove_cvref_t<HeadT> {
1531 explicit constexpr Visitor(HeadT &&Head)
1532 : remove_cvref_t<HeadT>(std::forward<HeadT>(Head)) {}
1533 using remove_cvref_t<HeadT>::operator();
1534};
1535} // namespace detail
1536
1537/// Returns an opaquely-typed Callable object whose operator() overload set is
1538/// the sum of the operator() overload sets of each CallableT in CallableTs.
1539///
1540/// The type of the returned object derives from each CallableT in CallableTs.
1541/// The returned object is constructed by invoking the appropriate copy or move
1542/// constructor of each CallableT, as selected by overload resolution on the
1543/// corresponding argument to makeVisitor.
1544///
1545/// Example:
1546///
1547/// \code
1548/// auto visitor = makeVisitor([](auto) { return "unhandled type"; },
1549/// [](int i) { return "int"; },
1550/// [](std::string s) { return "str"; });
1551/// auto a = visitor(42); // `a` is now "int".
1552/// auto b = visitor("foo"); // `b` is now "str".
1553/// auto c = visitor(3.14f); // `c` is now "unhandled type".
1554/// \endcode
1555///
1556/// Example of making a visitor with a lambda which captures a move-only type:
1557///
1558/// \code
1559/// std::unique_ptr<FooHandler> FH = /* ... */;
1560/// auto visitor = makeVisitor(
1561/// [FH{std::move(FH)}](Foo F) { return FH->handle(F); },
1562/// [](int i) { return i; },
1563/// [](std::string s) { return atoi(s); });
1564/// \endcode
1565template <typename... CallableTs>
1566constexpr decltype(auto) makeVisitor(CallableTs &&...Callables) {
1567 return detail::Visitor<CallableTs...>(std::forward<CallableTs>(Callables)...);
1568}
1569
1570//===----------------------------------------------------------------------===//
1571// Extra additions to <algorithm>
1572//===----------------------------------------------------------------------===//
1573
1574// We have a copy here so that LLVM behaves the same when using different
1575// standard libraries.
1576template <class Iterator, class RNG>
1577void shuffle(Iterator first, Iterator last, RNG &&g) {
1578 // It would be better to use a std::uniform_int_distribution,
1579 // but that would be stdlib dependent.
1580 typedef
1581 typename std::iterator_traits<Iterator>::difference_type difference_type;
1582 for (auto size = last - first; size > 1; ++first, (void)--size) {
1583 difference_type offset = g() % size;
1584 // Avoid self-assignment due to incorrect assertions in libstdc++
1585 // containers (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85828).
1586 if (offset != difference_type(0))
1587 std::iter_swap(first, first + offset);
1588 }
1589}
1590
1591/// Adapt std::less<T> for array_pod_sort.
1592template<typename T>
1593inline int array_pod_sort_comparator(const void *P1, const void *P2) {
1594 if (std::less<T>()(*reinterpret_cast<const T*>(P1),
1595 *reinterpret_cast<const T*>(P2)))
1596 return -1;
1597 if (std::less<T>()(*reinterpret_cast<const T*>(P2),
1598 *reinterpret_cast<const T*>(P1)))
1599 return 1;
1600 return 0;
1601}
1602
1603/// get_array_pod_sort_comparator - This is an internal helper function used to
1604/// get type deduction of T right.
1605template<typename T>
1606inline int (*get_array_pod_sort_comparator(const T &))
1607 (const void*, const void*) {
1608 return array_pod_sort_comparator<T>;
1609}
1610
1611#ifdef EXPENSIVE_CHECKS
1612namespace detail {
1613
1614inline unsigned presortShuffleEntropy() {
1615 static unsigned Result(std::random_device{}());
1616 return Result;
1617}
1618
1619template <class IteratorTy>
1620inline void presortShuffle(IteratorTy Start, IteratorTy End) {
1621 std::mt19937 Generator(presortShuffleEntropy());
1622 llvm::shuffle(Start, End, Generator);
1623}
1624
1625} // end namespace detail
1626#endif
1627
1628/// array_pod_sort - This sorts an array with the specified start and end
1629/// extent. This is just like std::sort, except that it calls qsort instead of
1630/// using an inlined template. qsort is slightly slower than std::sort, but
1631/// most sorts are not performance critical in LLVM and std::sort has to be
1632/// template instantiated for each type, leading to significant measured code
1633/// bloat. This function should generally be used instead of std::sort where
1634/// possible.
1635///
1636/// This function assumes that you have simple POD-like types that can be
1637/// compared with std::less and can be moved with memcpy. If this isn't true,
1638/// you should use std::sort.
1639///
1640/// NOTE: If qsort_r were portable, we could allow a custom comparator and
1641/// default to std::less.
1642template<class IteratorTy>
1643inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
1644 // Don't inefficiently call qsort with one element or trigger undefined
1645 // behavior with an empty sequence.
1646 auto NElts = End - Start;
1647 if (NElts <= 1) return;
11
Assuming 'NElts' is <= 1
12
Taking true branch
13
Returning without writing to '*Start'
1648#ifdef EXPENSIVE_CHECKS
1649 detail::presortShuffle<IteratorTy>(Start, End);
1650#endif
1651 qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
1652}
1653
1654template <class IteratorTy>
1655inline void array_pod_sort(
1656 IteratorTy Start, IteratorTy End,
1657 int (*Compare)(
1658 const typename std::iterator_traits<IteratorTy>::value_type *,
1659 const typename std::iterator_traits<IteratorTy>::value_type *)) {
1660 // Don't inefficiently call qsort with one element or trigger undefined
1661 // behavior with an empty sequence.
1662 auto NElts = End - Start;
1663 if (NElts <= 1) return;
1664#ifdef EXPENSIVE_CHECKS
1665 detail::presortShuffle<IteratorTy>(Start, End);
1666#endif
1667 qsort(&*Start, NElts, sizeof(*Start),
1668 reinterpret_cast<int (*)(const void *, const void *)>(Compare));
1669}
1670
1671namespace detail {
1672template <typename T>
1673// We can use qsort if the iterator type is a pointer and the underlying value
1674// is trivially copyable.
1675using sort_trivially_copyable = std::conjunction<
1676 std::is_pointer<T>,
1677 std::is_trivially_copyable<typename std::iterator_traits<T>::value_type>>;
1678} // namespace detail
1679
1680// Provide wrappers to std::sort which shuffle the elements before sorting
1681// to help uncover non-deterministic behavior (PR35135).
1682template <typename IteratorTy>
1683inline void sort(IteratorTy Start, IteratorTy End) {
1684 if constexpr (detail::sort_trivially_copyable<IteratorTy>::value
8.1
'value' is true
8.1
'value' is true
8.1
'value' is true
) {
9
Taking true branch
1685 // Forward trivially copyable types to array_pod_sort. This avoids a large
1686 // amount of code bloat for a minor performance hit.
1687 array_pod_sort(Start, End);
10
Calling 'array_pod_sort<unsigned long *>'
14
Returning from 'array_pod_sort<unsigned long *>'
1688 } else {
1689#ifdef EXPENSIVE_CHECKS
1690 detail::presortShuffle<IteratorTy>(Start, End);
1691#endif
1692 std::sort(Start, End);
1693 }
1694}
15
Returning without writing to '*Start'
1695
1696template <typename Container> inline void sort(Container &&C) {
1697 llvm::sort(adl_begin(C), adl_end(C));
1698}
1699
1700template <typename IteratorTy, typename Compare>
1701inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
1702#ifdef EXPENSIVE_CHECKS
1703 detail::presortShuffle<IteratorTy>(Start, End);
1704#endif
1705 std::sort(Start, End, Comp);
1706}
1707
1708template <typename Container, typename Compare>
1709inline void sort(Container &&C, Compare Comp) {
1710 llvm::sort(adl_begin(C), adl_end(C), Comp);
1711}
1712
1713/// Get the size of a range. This is a wrapper function around std::distance
1714/// which is only enabled when the operation is O(1).
1715template <typename R>
1716auto size(R &&Range,
1717 std::enable_if_t<
1718 std::is_base_of<std::random_access_iterator_tag,
1719 typename std::iterator_traits<decltype(
1720 Range.begin())>::iterator_category>::value,
1721 void> * = nullptr) {
1722 return std::distance(Range.begin(), Range.end());
1723}
1724
1725/// Provide wrappers to std::for_each which take ranges instead of having to
1726/// pass begin/end explicitly.
1727template <typename R, typename UnaryFunction>
1728UnaryFunction for_each(R &&Range, UnaryFunction F) {
1729 return std::for_each(adl_begin(Range), adl_end(Range), F);
1730}
1731
1732/// Provide wrappers to std::all_of which take ranges instead of having to pass
1733/// begin/end explicitly.
1734template <typename R, typename UnaryPredicate>
1735bool all_of(R &&Range, UnaryPredicate P) {
1736 return std::all_of(adl_begin(Range), adl_end(Range), P);
1737}
1738
1739/// Provide wrappers to std::any_of which take ranges instead of having to pass
1740/// begin/end explicitly.
1741template <typename R, typename UnaryPredicate>
1742bool any_of(R &&Range, UnaryPredicate P) {
1743 return std::any_of(adl_begin(Range), adl_end(Range), P);
1744}
1745
1746/// Provide wrappers to std::none_of which take ranges instead of having to pass
1747/// begin/end explicitly.
1748template <typename R, typename UnaryPredicate>
1749bool none_of(R &&Range, UnaryPredicate P) {
1750 return std::none_of(adl_begin(Range), adl_end(Range), P);
1751}
1752
1753/// Provide wrappers to std::find which take ranges instead of having to pass
1754/// begin/end explicitly.
1755template <typename R, typename T> auto find(R &&Range, const T &Val) {
1756 return std::find(adl_begin(Range), adl_end(Range), Val);
1757}
1758
1759/// Provide wrappers to std::find_if which take ranges instead of having to pass
1760/// begin/end explicitly.
1761template <typename R, typename UnaryPredicate>
1762auto find_if(R &&Range, UnaryPredicate P) {
1763 return std::find_if(adl_begin(Range), adl_end(Range), P);
1764}
1765
1766template <typename R, typename UnaryPredicate>
1767auto find_if_not(R &&Range, UnaryPredicate P) {
1768 return std::find_if_not(adl_begin(Range), adl_end(Range), P);
1769}
1770
1771/// Provide wrappers to std::remove_if which take ranges instead of having to
1772/// pass begin/end explicitly.
1773template <typename R, typename UnaryPredicate>
1774auto remove_if(R &&Range, UnaryPredicate P) {
1775 return std::remove_if(adl_begin(Range), adl_end(Range), P);
1776}
1777
1778/// Provide wrappers to std::copy_if which take ranges instead of having to
1779/// pass begin/end explicitly.
1780template <typename R, typename OutputIt, typename UnaryPredicate>
1781OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
1782 return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
1783}
1784
1785/// Return the single value in \p Range that satisfies
1786/// \p P(<member of \p Range> *, AllowRepeats)->T * returning nullptr
1787/// when no values or multiple values were found.
1788/// When \p AllowRepeats is true, multiple values that compare equal
1789/// are allowed.
1790template <typename T, typename R, typename Predicate>
1791T *find_singleton(R &&Range, Predicate P, bool AllowRepeats = false) {
1792 T *RC = nullptr;
1793 for (auto *A : Range) {
1794 if (T *PRC = P(A, AllowRepeats)) {
1795 if (RC) {
1796 if (!AllowRepeats || PRC != RC)
1797 return nullptr;
1798 } else
1799 RC = PRC;
1800 }
1801 }
1802 return RC;
1803}
1804
1805/// Return a pair consisting of the single value in \p Range that satisfies
1806/// \p P(<member of \p Range> *, AllowRepeats)->std::pair<T*, bool> returning
1807/// nullptr when no values or multiple values were found, and a bool indicating
1808/// whether multiple values were found to cause the nullptr.
1809/// When \p AllowRepeats is true, multiple values that compare equal are
1810/// allowed. The predicate \p P returns a pair<T *, bool> where T is the
1811/// singleton while the bool indicates whether multiples have already been
1812/// found. It is expected that first will be nullptr when second is true.
1813/// This allows using find_singleton_nested within the predicate \P.
1814template <typename T, typename R, typename Predicate>
1815std::pair<T *, bool> find_singleton_nested(R &&Range, Predicate P,
1816 bool AllowRepeats = false) {
1817 T *RC = nullptr;
1818 for (auto *A : Range) {
1819 std::pair<T *, bool> PRC = P(A, AllowRepeats);
1820 if (PRC.second) {
1821 assert(PRC.first == nullptr &&(static_cast <bool> (PRC.first == nullptr && "Inconsistent return values in find_singleton_nested."
) ? void (0) : __assert_fail ("PRC.first == nullptr && \"Inconsistent return values in find_singleton_nested.\""
, "llvm/include/llvm/ADT/STLExtras.h", 1822, __extension__ __PRETTY_FUNCTION__
))
1822 "Inconsistent return values in find_singleton_nested.")(static_cast <bool> (PRC.first == nullptr && "Inconsistent return values in find_singleton_nested."
) ? void (0) : __assert_fail ("PRC.first == nullptr && \"Inconsistent return values in find_singleton_nested.\""
, "llvm/include/llvm/ADT/STLExtras.h", 1822, __extension__ __PRETTY_FUNCTION__
))
;
1823 return PRC;
1824 }
1825 if (PRC.first) {
1826 if (RC) {
1827 if (!AllowRepeats || PRC.first != RC)
1828 return {nullptr, true};
1829 } else
1830 RC = PRC.first;
1831 }
1832 }
1833 return {RC, false};
1834}
1835
1836template <typename R, typename OutputIt>
1837OutputIt copy(R &&Range, OutputIt Out) {
1838 return std::copy(adl_begin(Range), adl_end(Range), Out);
1839}
1840
1841/// Provide wrappers to std::replace_copy_if which take ranges instead of having
1842/// to pass begin/end explicitly.
1843template <typename R, typename OutputIt, typename UnaryPredicate, typename T>
1844OutputIt replace_copy_if(R &&Range, OutputIt Out, UnaryPredicate P,
1845 const T &NewValue) {
1846 return std::replace_copy_if(adl_begin(Range), adl_end(Range), Out, P,
1847 NewValue);
1848}
1849
1850/// Provide wrappers to std::replace_copy which take ranges instead of having to
1851/// pass begin/end explicitly.
1852template <typename R, typename OutputIt, typename T>
1853OutputIt replace_copy(R &&Range, OutputIt Out, const T &OldValue,
1854 const T &NewValue) {
1855 return std::replace_copy(adl_begin(Range), adl_end(Range), Out, OldValue,
1856 NewValue);
1857}
1858
1859/// Provide wrappers to std::move which take ranges instead of having to
1860/// pass begin/end explicitly.
1861template <typename R, typename OutputIt>
1862OutputIt move(R &&Range, OutputIt Out) {
1863 return std::move(adl_begin(Range), adl_end(Range), Out);
1864}
1865
1866/// Wrapper function around std::find to detect if an element exists
1867/// in a container.
1868template <typename R, typename E>
1869bool is_contained(R &&Range, const E &Element) {
1870 return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range);
1871}
1872
1873template <typename T>
1874constexpr bool is_contained(std::initializer_list<T> Set, T Value) {
1875 // TODO: Use std::find when we switch to C++20.
1876 for (T V : Set)
1877 if (V == Value)
1878 return true;
1879 return false;
1880}
1881
1882/// Wrapper function around std::is_sorted to check if elements in a range \p R
1883/// are sorted with respect to a comparator \p C.
1884template <typename R, typename Compare> bool is_sorted(R &&Range, Compare C) {
1885 return std::is_sorted(adl_begin(Range), adl_end(Range), C);
1886}
1887
1888/// Wrapper function around std::is_sorted to check if elements in a range \p R
1889/// are sorted in non-descending order.
1890template <typename R> bool is_sorted(R &&Range) {
1891 return std::is_sorted(adl_begin(Range), adl_end(Range));
1892}
1893
1894/// Wrapper function around std::count to count the number of times an element
1895/// \p Element occurs in the given range \p Range.
1896template <typename R, typename E> auto count(R &&Range, const E &Element) {
1897 return std::count(adl_begin(Range), adl_end(Range), Element);
1898}
1899
1900/// Wrapper function around std::count_if to count the number of times an
1901/// element satisfying a given predicate occurs in a range.
1902template <typename R, typename UnaryPredicate>
1903auto count_if(R &&Range, UnaryPredicate P) {
1904 return std::count_if(adl_begin(Range), adl_end(Range), P);
1905}
1906
1907/// Wrapper function around std::transform to apply a function to a range and
1908/// store the result elsewhere.
1909template <typename R, typename OutputIt, typename UnaryFunction>
1910OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F) {
1911 return std::transform(adl_begin(Range), adl_end(Range), d_first, F);
1912}
1913
1914/// Provide wrappers to std::partition which take ranges instead of having to
1915/// pass begin/end explicitly.
1916template <typename R, typename UnaryPredicate>
1917auto partition(R &&Range, UnaryPredicate P) {
1918 return std::partition(adl_begin(Range), adl_end(Range), P);
1919}
1920
1921/// Provide wrappers to std::lower_bound which take ranges instead of having to
1922/// pass begin/end explicitly.
1923template <typename R, typename T> auto lower_bound(R &&Range, T &&Value) {
1924 return std::lower_bound(adl_begin(Range), adl_end(Range),
1925 std::forward<T>(Value));
1926}
1927
1928template <typename R, typename T, typename Compare>
1929auto lower_bound(R &&Range, T &&Value, Compare C) {
1930 return std::lower_bound(adl_begin(Range), adl_end(Range),
1931 std::forward<T>(Value), C);
1932}
1933
1934/// Provide wrappers to std::upper_bound which take ranges instead of having to
1935/// pass begin/end explicitly.
1936template <typename R, typename T> auto upper_bound(R &&Range, T &&Value) {
1937 return std::upper_bound(adl_begin(Range), adl_end(Range),
1938 std::forward<T>(Value));
1939}
1940
1941template <typename R, typename T, typename Compare>
1942auto upper_bound(R &&Range, T &&Value, Compare C) {
1943 return std::upper_bound(adl_begin(Range), adl_end(Range),
1944 std::forward<T>(Value), C);
1945}
1946
1947template <typename R>
1948void stable_sort(R &&Range) {
1949 std::stable_sort(adl_begin(Range), adl_end(Range));
1950}
1951
1952template <typename R, typename Compare>
1953void stable_sort(R &&Range, Compare C) {
1954 std::stable_sort(adl_begin(Range), adl_end(Range), C);
1955}
1956
1957/// Binary search for the first iterator in a range where a predicate is false.
1958/// Requires that C is always true below some limit, and always false above it.
1959template <typename R, typename Predicate,
1960 typename Val = decltype(*adl_begin(std::declval<R>()))>
1961auto partition_point(R &&Range, Predicate P) {
1962 return std::partition_point(adl_begin(Range), adl_end(Range), P);
1963}
1964
1965template<typename Range, typename Predicate>
1966auto unique(Range &&R, Predicate P) {
1967 return std::unique(adl_begin(R), adl_end(R), P);
1968}
1969
1970/// Wrapper function around std::equal to detect if pair-wise elements between
1971/// two ranges are the same.
1972template <typename L, typename R> bool equal(L &&LRange, R &&RRange) {
1973 return std::equal(adl_begin(LRange), adl_end(LRange), adl_begin(RRange),
1974 adl_end(RRange));
1975}
1976
1977/// Returns true if all elements in Range are equal or when the Range is empty.
1978template <typename R> bool all_equal(R &&Range) {
1979 auto Begin = adl_begin(Range);
1980 auto End = adl_end(Range);
1981 return Begin == End || std::equal(Begin + 1, End, Begin);
1982}
1983
1984/// Returns true if all Values in the initializer lists are equal or the list
1985// is empty.
1986template <typename T> bool all_equal(std::initializer_list<T> Values) {
1987 return all_equal<std::initializer_list<T>>(std::move(Values));
1988}
1989
1990/// Provide a container algorithm similar to C++ Library Fundamentals v2's
1991/// `erase_if` which is equivalent to:
1992///
1993/// C.erase(remove_if(C, pred), C.end());
1994///
1995/// This version works for any container with an erase method call accepting
1996/// two iterators.
1997template <typename Container, typename UnaryPredicate>
1998void erase_if(Container &C, UnaryPredicate P) {
1999 C.erase(remove_if(C, P), C.end());
2000}
2001
2002/// Wrapper function to remove a value from a container:
2003///
2004/// C.erase(remove(C.begin(), C.end(), V), C.end());
2005template <typename Container, typename ValueType>
2006void erase_value(Container &C, ValueType V) {
2007 C.erase(std::remove(C.begin(), C.end(), V), C.end());
2008}
2009
2010/// Wrapper function to append a range to a container.
2011///
2012/// C.insert(C.end(), R.begin(), R.end());
2013template <typename Container, typename Range>
2014inline void append_range(Container &C, Range &&R) {
2015 C.insert(C.end(), R.begin(), R.end());
2016}
2017
2018/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
2019/// the range [ValIt, ValEnd) (which is not from the same container).
2020template<typename Container, typename RandomAccessIterator>
2021void replace(Container &Cont, typename Container::iterator ContIt,
2022 typename Container::iterator ContEnd, RandomAccessIterator ValIt,
2023 RandomAccessIterator ValEnd) {
2024 while (true) {
2025 if (ValIt == ValEnd) {
2026 Cont.erase(ContIt, ContEnd);
2027 return;
2028 } else if (ContIt == ContEnd) {
2029 Cont.insert(ContIt, ValIt, ValEnd);
2030 return;
2031 }
2032 *ContIt++ = *ValIt++;
2033 }
2034}
2035
2036/// Given a sequence container Cont, replace the range [ContIt, ContEnd) with
2037/// the range R.
2038template<typename Container, typename Range = std::initializer_list<
2039 typename Container::value_type>>
2040void replace(Container &Cont, typename Container::iterator ContIt,
2041 typename Container::iterator ContEnd, Range R) {
2042 replace(Cont, ContIt, ContEnd, R.begin(), R.end());
2043}
2044
2045/// An STL-style algorithm similar to std::for_each that applies a second
2046/// functor between every pair of elements.
2047///
2048/// This provides the control flow logic to, for example, print a
2049/// comma-separated list:
2050/// \code
2051/// interleave(names.begin(), names.end(),
2052/// [&](StringRef name) { os << name; },
2053/// [&] { os << ", "; });
2054/// \endcode
2055template <typename ForwardIterator, typename UnaryFunctor,
2056 typename NullaryFunctor,
2057 typename = std::enable_if_t<
2058 !std::is_constructible<StringRef, UnaryFunctor>::value &&
2059 !std::is_constructible<StringRef, NullaryFunctor>::value>>
2060inline void interleave(ForwardIterator begin, ForwardIterator end,
2061 UnaryFunctor each_fn, NullaryFunctor between_fn) {
2062 if (begin == end)
2063 return;
2064 each_fn(*begin);
2065 ++begin;
2066 for (; begin != end; ++begin) {
2067 between_fn();
2068 each_fn(*begin);
2069 }
2070}
2071
2072template <typename Container, typename UnaryFunctor, typename NullaryFunctor,
2073 typename = std::enable_if_t<
2074 !std::is_constructible<StringRef, UnaryFunctor>::value &&
2075 !std::is_constructible<StringRef, NullaryFunctor>::value>>
2076inline void interleave(const Container &c, UnaryFunctor each_fn,
2077 NullaryFunctor between_fn) {
2078 interleave(c.begin(), c.end(), each_fn, between_fn);
2079}
2080
2081/// Overload of interleave for the common case of string separator.
2082template <typename Container, typename UnaryFunctor, typename StreamT,
2083 typename T = detail::ValueOfRange<Container>>
2084inline void interleave(const Container &c, StreamT &os, UnaryFunctor each_fn,
2085 const StringRef &separator) {
2086 interleave(c.begin(), c.end(), each_fn, [&] { os << separator; });
2087}
2088template <typename Container, typename StreamT,
2089 typename T = detail::ValueOfRange<Container>>
2090inline void interleave(const Container &c, StreamT &os,
2091 const StringRef &separator) {
2092 interleave(
2093 c, os, [&](const T &a) { os << a; }, separator);
2094}
2095
2096template <typename Container, typename UnaryFunctor, typename StreamT,
2097 typename T = detail::ValueOfRange<Container>>
2098inline void interleaveComma(const Container &c, StreamT &os,
2099 UnaryFunctor each_fn) {
2100 interleave(c, os, each_fn, ", ");
2101}
2102template <typename Container, typename StreamT,
2103 typename T = detail::ValueOfRange<Container>>
2104inline void interleaveComma(const Container &c, StreamT &os) {
2105 interleaveComma(c, os, [&](const T &a) { os << a; });
2106}
2107
2108//===----------------------------------------------------------------------===//
2109// Extra additions to <memory>
2110//===----------------------------------------------------------------------===//
2111
2112struct FreeDeleter {
2113 void operator()(void* v) {
2114 ::free(v);
2115 }
2116};
2117
2118template<typename First, typename Second>
2119struct pair_hash {
2120 size_t operator()(const std::pair<First, Second> &P) const {
2121 return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second);
2122 }
2123};
2124
2125/// Binary functor that adapts to any other binary functor after dereferencing
2126/// operands.
2127template <typename T> struct deref {
2128 T func;
2129
2130 // Could be further improved to cope with non-derivable functors and
2131 // non-binary functors (should be a variadic template member function
2132 // operator()).
2133 template <typename A, typename B> auto operator()(A &lhs, B &rhs) const {
2134 assert(lhs)(static_cast <bool> (lhs) ? void (0) : __assert_fail ("lhs"
, "llvm/include/llvm/ADT/STLExtras.h", 2134, __extension__ __PRETTY_FUNCTION__
))
;
2135 assert(rhs)(static_cast <bool> (rhs) ? void (0) : __assert_fail ("rhs"
, "llvm/include/llvm/ADT/STLExtras.h", 2135, __extension__ __PRETTY_FUNCTION__
))
;
2136 return func(*lhs, *rhs);
2137 }
2138};
2139
2140namespace detail {
2141
2142template <typename R> class enumerator_iter;
2143
2144template <typename R> struct result_pair {
2145 using value_reference =
2146 typename std::iterator_traits<IterOfRange<R>>::reference;
2147
2148 friend class enumerator_iter<R>;
2149
2150 result_pair() = default;
2151 result_pair(std::size_t Index, IterOfRange<R> Iter)
2152 : Index(Index), Iter(Iter) {}
2153
2154 result_pair(const result_pair<R> &Other)
2155 : Index(Other.Index), Iter(Other.Iter) {}
2156 result_pair &operator=(const result_pair &Other) {
2157 Index = Other.Index;
2158 Iter = Other.Iter;
2159 return *this;
2160 }
2161
2162 std::size_t index() const { return Index; }
2163 value_reference value() const { return *Iter; }
2164
2165private:
2166 std::size_t Index = std::numeric_limits<std::size_t>::max();
2167 IterOfRange<R> Iter;
2168};
2169
2170template <std::size_t i, typename R>
2171decltype(auto) get(const result_pair<R> &Pair) {
2172 static_assert(i < 2);
2173 if constexpr (i == 0) {
2174 return Pair.index();
2175 } else {
2176 return Pair.value();
2177 }
2178}
2179
2180template <typename R>
2181class enumerator_iter
2182 : public iterator_facade_base<enumerator_iter<R>, std::forward_iterator_tag,
2183 const result_pair<R>> {
2184 using result_type = result_pair<R>;
2185
2186public:
2187 explicit enumerator_iter(IterOfRange<R> EndIter)
2188 : Result(std::numeric_limits<size_t>::max(), EndIter) {}
2189
2190 enumerator_iter(std::size_t Index, IterOfRange<R> Iter)
2191 : Result(Index, Iter) {}
2192
2193 const result_type &operator*() const { return Result; }
2194
2195 enumerator_iter &operator++() {
2196 assert(Result.Index != std::numeric_limits<size_t>::max())(static_cast <bool> (Result.Index != std::numeric_limits
<size_t>::max()) ? void (0) : __assert_fail ("Result.Index != std::numeric_limits<size_t>::max()"
, "llvm/include/llvm/ADT/STLExtras.h", 2196, __extension__ __PRETTY_FUNCTION__
))
;
2197 ++Result.Iter;
2198 ++Result.Index;
2199 return *this;
2200 }
2201
2202 bool operator==(const enumerator_iter &RHS) const {
2203 // Don't compare indices here, only iterators. It's possible for an end
2204 // iterator to have different indices depending on whether it was created
2205 // by calling std::end() versus incrementing a valid iterator.
2206 return Result.Iter == RHS.Result.Iter;
2207 }
2208
2209 enumerator_iter(const enumerator_iter &Other) : Result(Other.Result) {}
2210 enumerator_iter &operator=(const enumerator_iter &Other) {
2211 Result = Other.Result;
2212 return *this;
2213 }
2214
2215private:
2216 result_type Result;
2217};
2218
2219template <typename R> class enumerator {
2220public:
2221 explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {}
2222
2223 enumerator_iter<R> begin() {
2224 return enumerator_iter<R>(0, std::begin(TheRange));
2225 }
2226 enumerator_iter<R> begin() const {
2227 return enumerator_iter<R>(0, std::begin(TheRange));
2228 }
2229
2230 enumerator_iter<R> end() {
2231 return enumerator_iter<R>(std::end(TheRange));
2232 }
2233 enumerator_iter<R> end() const {
2234 return enumerator_iter<R>(std::end(TheRange));
2235 }
2236
2237private:
2238 R TheRange;
2239};
2240
2241} // end namespace detail
2242
2243/// Given an input range, returns a new range whose values are are pair (A,B)
2244/// such that A is the 0-based index of the item in the sequence, and B is
2245/// the value from the original sequence. Example:
2246///
2247/// std::vector<char> Items = {'A', 'B', 'C', 'D'};
2248/// for (auto X : enumerate(Items)) {
2249/// printf("Item %d - %c\n", X.index(), X.value());
2250/// }
2251///
2252/// or using structured bindings:
2253///
2254/// for (auto [Index, Value] : enumerate(Items)) {
2255/// printf("Item %d - %c\n", Index, Value);
2256/// }
2257///
2258/// Output:
2259/// Item 0 - A
2260/// Item 1 - B
2261/// Item 2 - C
2262/// Item 3 - D
2263///
2264template <typename R> detail::enumerator<R> enumerate(R &&TheRange) {
2265 return detail::enumerator<R>(std::forward<R>(TheRange));
2266}
2267
2268namespace detail {
2269
2270template <typename Predicate, typename... Args>
2271bool all_of_zip_predicate_first(Predicate &&P, Args &&...args) {
2272 auto z = zip(args...);
2273 auto it = z.begin();
2274 auto end = z.end();
2275 while (it != end) {
2276 if (!std::apply([&](auto &&...args) { return P(args...); }, *it))
2277 return false;
2278 ++it;
2279 }
2280 return it.all_equals(end);
2281}
2282
2283// Just an adaptor to switch the order of argument and have the predicate before
2284// the zipped inputs.
2285template <typename... ArgsThenPredicate, size_t... InputIndexes>
2286bool all_of_zip_predicate_last(
2287 std::tuple<ArgsThenPredicate...> argsThenPredicate,
2288 std::index_sequence<InputIndexes...>) {
2289 auto constexpr OutputIndex =
2290 std::tuple_size<decltype(argsThenPredicate)>::value - 1;
2291 return all_of_zip_predicate_first(std::get<OutputIndex>(argsThenPredicate),
2292 std::get<InputIndexes>(argsThenPredicate)...);
2293}
2294
2295} // end namespace detail
2296
2297/// Compare two zipped ranges using the provided predicate (as last argument).
2298/// Return true if all elements satisfy the predicate and false otherwise.
2299// Return false if the zipped iterator aren't all at end (size mismatch).
2300template <typename... ArgsAndPredicate>
2301bool all_of_zip(ArgsAndPredicate &&...argsAndPredicate) {
2302 return detail::all_of_zip_predicate_last(
2303 std::forward_as_tuple(argsAndPredicate...),
2304 std::make_index_sequence<sizeof...(argsAndPredicate) - 1>{});
2305}
2306
2307/// Return true if the sequence [Begin, End) has exactly N items. Runs in O(N)
2308/// time. Not meant for use with random-access iterators.
2309/// Can optionally take a predicate to filter lazily some items.
2310template <typename IterTy,
2311 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2312bool hasNItems(
2313 IterTy &&Begin, IterTy &&End, unsigned N,
2314 Pred &&ShouldBeCounted =
2315 [](const decltype(*std::declval<IterTy>()) &) { return true; },
2316 std::enable_if_t<
2317 !std::is_base_of<std::random_access_iterator_tag,
2318 typename std::iterator_traits<std::remove_reference_t<
2319 decltype(Begin)>>::iterator_category>::value,
2320 void> * = nullptr) {
2321 for (; N; ++Begin) {
2322 if (Begin == End)
2323 return false; // Too few.
2324 N -= ShouldBeCounted(*Begin);
2325 }
2326 for (; Begin != End; ++Begin)
2327 if (ShouldBeCounted(*Begin))
2328 return false; // Too many.
2329 return true;
2330}
2331
2332/// Return true if the sequence [Begin, End) has N or more items. Runs in O(N)
2333/// time. Not meant for use with random-access iterators.
2334/// Can optionally take a predicate to lazily filter some items.
2335template <typename IterTy,
2336 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2337bool hasNItemsOrMore(
2338 IterTy &&Begin, IterTy &&End, unsigned N,
2339 Pred &&ShouldBeCounted =
2340 [](const decltype(*std::declval<IterTy>()) &) { return true; },
2341 std::enable_if_t<
2342 !std::is_base_of<std::random_access_iterator_tag,
2343 typename std::iterator_traits<std::remove_reference_t<
2344 decltype(Begin)>>::iterator_category>::value,
2345 void> * = nullptr) {
2346 for (; N; ++Begin) {
2347 if (Begin == End)
2348 return false; // Too few.
2349 N -= ShouldBeCounted(*Begin);
2350 }
2351 return true;
2352}
2353
2354/// Returns true if the sequence [Begin, End) has N or less items. Can
2355/// optionally take a predicate to lazily filter some items.
2356template <typename IterTy,
2357 typename Pred = bool (*)(const decltype(*std::declval<IterTy>()) &)>
2358bool hasNItemsOrLess(
2359 IterTy &&Begin, IterTy &&End, unsigned N,
2360 Pred &&ShouldBeCounted = [](const decltype(*std::declval<IterTy>()) &) {
2361 return true;
2362 }) {
2363 assert(N != std::numeric_limits<unsigned>::max())(static_cast <bool> (N != std::numeric_limits<unsigned
>::max()) ? void (0) : __assert_fail ("N != std::numeric_limits<unsigned>::max()"
, "llvm/include/llvm/ADT/STLExtras.h", 2363, __extension__ __PRETTY_FUNCTION__
))
;
2364 return !hasNItemsOrMore(Begin, End, N + 1, ShouldBeCounted);
2365}
2366
2367/// Returns true if the given container has exactly N items
2368template <typename ContainerTy> bool hasNItems(ContainerTy &&C, unsigned N) {
2369 return hasNItems(std::begin(C), std::end(C), N);
2370}
2371
2372/// Returns true if the given container has N or more items
2373template <typename ContainerTy>
2374bool hasNItemsOrMore(ContainerTy &&C, unsigned N) {
2375 return hasNItemsOrMore(std::begin(C), std::end(C), N);
2376}
2377
2378/// Returns true if the given container has N or less items
2379template <typename ContainerTy>
2380bool hasNItemsOrLess(ContainerTy &&C, unsigned N) {
2381 return hasNItemsOrLess(std::begin(C), std::end(C), N);
2382}
2383
2384/// Returns a raw pointer that represents the same address as the argument.
2385///
2386/// This implementation can be removed once we move to C++20 where it's defined
2387/// as std::to_address().
2388///
2389/// The std::pointer_traits<>::to_address(p) variations of these overloads has
2390/// not been implemented.
2391template <class Ptr> auto to_address(const Ptr &P) { return P.operator->(); }
2392template <class T> constexpr T *to_address(T *P) { return P; }
2393
2394} // end namespace llvm
2395
2396namespace std {
2397template <typename R>
2398struct tuple_size<llvm::detail::result_pair<R>>
2399 : std::integral_constant<std::size_t, 2> {};
2400
2401template <std::size_t i, typename R>
2402struct tuple_element<i, llvm::detail::result_pair<R>>
2403 : std::conditional<i == 0, std::size_t,
2404 typename llvm::detail::result_pair<R>::value_reference> {
2405};
2406
2407} // namespace std
2408
2409#endif // LLVM_ADT_STLEXTRAS_H