| File: | build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/lld/MachO/SyntheticSections.cpp |
| Warning: | line 1566, column 13 The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'int' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | //===- SyntheticSections.cpp ---------------------------------------------===// | |||
| 2 | // | |||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | |||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
| 6 | // | |||
| 7 | //===----------------------------------------------------------------------===// | |||
| 8 | ||||
| 9 | #include "SyntheticSections.h" | |||
| 10 | #include "ConcatOutputSection.h" | |||
| 11 | #include "Config.h" | |||
| 12 | #include "ExportTrie.h" | |||
| 13 | #include "InputFiles.h" | |||
| 14 | #include "MachOStructs.h" | |||
| 15 | #include "OutputSegment.h" | |||
| 16 | #include "SymbolTable.h" | |||
| 17 | #include "Symbols.h" | |||
| 18 | ||||
| 19 | #include "lld/Common/CommonLinkerContext.h" | |||
| 20 | #include "llvm/ADT/STLExtras.h" | |||
| 21 | #include "llvm/Config/llvm-config.h" | |||
| 22 | #include "llvm/Support/EndianStream.h" | |||
| 23 | #include "llvm/Support/FileSystem.h" | |||
| 24 | #include "llvm/Support/LEB128.h" | |||
| 25 | #include "llvm/Support/Parallel.h" | |||
| 26 | #include "llvm/Support/Path.h" | |||
| 27 | #include "llvm/Support/xxhash.h" | |||
| 28 | ||||
| 29 | #if defined(__APPLE__) | |||
| 30 | #include <sys/mman.h> | |||
| 31 | ||||
| 32 | #define COMMON_DIGEST_FOR_OPENSSL | |||
| 33 | #include <CommonCrypto/CommonDigest.h> | |||
| 34 | #else | |||
| 35 | #include "llvm/Support/SHA256.h" | |||
| 36 | #endif | |||
| 37 | ||||
| 38 | #ifdef LLVM_HAVE_LIBXAR | |||
| 39 | #include <fcntl.h> | |||
| 40 | extern "C" { | |||
| 41 | #include <xar/xar.h> | |||
| 42 | } | |||
| 43 | #endif | |||
| 44 | ||||
| 45 | using namespace llvm; | |||
| 46 | using namespace llvm::MachO; | |||
| 47 | using namespace llvm::support; | |||
| 48 | using namespace llvm::support::endian; | |||
| 49 | using namespace lld; | |||
| 50 | using namespace lld::macho; | |||
| 51 | ||||
| 52 | // Reads `len` bytes at data and writes the 32-byte SHA256 checksum to `output`. | |||
| 53 | static void sha256(const uint8_t *data, size_t len, uint8_t *output) { | |||
| 54 | #if defined(__APPLE__) | |||
| 55 | // FIXME: Make LLVM's SHA256 faster and use it unconditionally. See PR56121 | |||
| 56 | // for some notes on this. | |||
| 57 | CC_SHA256(data, len, output); | |||
| 58 | #else | |||
| 59 | ArrayRef<uint8_t> block(data, len); | |||
| 60 | std::array<uint8_t, 32> hash = SHA256::hash(block); | |||
| 61 | static_assert(hash.size() == CodeSignatureSection::hashSize); | |||
| 62 | memcpy(output, hash.data(), hash.size()); | |||
| 63 | #endif | |||
| 64 | } | |||
| 65 | ||||
| 66 | InStruct macho::in; | |||
| 67 | std::vector<SyntheticSection *> macho::syntheticSections; | |||
| 68 | ||||
| 69 | SyntheticSection::SyntheticSection(const char *segname, const char *name) | |||
| 70 | : OutputSection(SyntheticKind, name) { | |||
| 71 | std::tie(this->segname, this->name) = maybeRenameSection({segname, name}); | |||
| 72 | isec = makeSyntheticInputSection(segname, name); | |||
| 73 | isec->parent = this; | |||
| 74 | syntheticSections.push_back(this); | |||
| 75 | } | |||
| 76 | ||||
| 77 | // dyld3's MachOLoaded::getSlide() assumes that the __TEXT segment starts | |||
| 78 | // from the beginning of the file (i.e. the header). | |||
| 79 | MachHeaderSection::MachHeaderSection() | |||
| 80 | : SyntheticSection(segment_names::text, section_names::header) { | |||
| 81 | // XXX: This is a hack. (See D97007) | |||
| 82 | // Setting the index to 1 to pretend that this section is the text | |||
| 83 | // section. | |||
| 84 | index = 1; | |||
| 85 | isec->isFinal = true; | |||
| 86 | } | |||
| 87 | ||||
| 88 | void MachHeaderSection::addLoadCommand(LoadCommand *lc) { | |||
| 89 | loadCommands.push_back(lc); | |||
| 90 | sizeOfCmds += lc->getSize(); | |||
| 91 | } | |||
| 92 | ||||
| 93 | uint64_t MachHeaderSection::getSize() const { | |||
| 94 | uint64_t size = target->headerSize + sizeOfCmds + config->headerPad; | |||
| 95 | // If we are emitting an encryptable binary, our load commands must have a | |||
| 96 | // separate (non-encrypted) page to themselves. | |||
| 97 | if (config->emitEncryptionInfo) | |||
| 98 | size = alignTo(size, target->getPageSize()); | |||
| 99 | return size; | |||
| 100 | } | |||
| 101 | ||||
| 102 | static uint32_t cpuSubtype() { | |||
| 103 | uint32_t subtype = target->cpuSubtype; | |||
| 104 | ||||
| 105 | if (config->outputType == MH_EXECUTE && !config->staticLink && | |||
| 106 | target->cpuSubtype == CPU_SUBTYPE_X86_64_ALL && | |||
| 107 | config->platform() == PLATFORM_MACOS && | |||
| 108 | config->platformInfo.minimum >= VersionTuple(10, 5)) | |||
| 109 | subtype |= CPU_SUBTYPE_LIB64; | |||
| 110 | ||||
| 111 | return subtype; | |||
| 112 | } | |||
| 113 | ||||
| 114 | void MachHeaderSection::writeTo(uint8_t *buf) const { | |||
| 115 | auto *hdr = reinterpret_cast<mach_header *>(buf); | |||
| 116 | hdr->magic = target->magic; | |||
| 117 | hdr->cputype = target->cpuType; | |||
| 118 | hdr->cpusubtype = cpuSubtype(); | |||
| 119 | hdr->filetype = config->outputType; | |||
| 120 | hdr->ncmds = loadCommands.size(); | |||
| 121 | hdr->sizeofcmds = sizeOfCmds; | |||
| 122 | hdr->flags = MH_DYLDLINK; | |||
| 123 | ||||
| 124 | if (config->namespaceKind == NamespaceKind::twolevel) | |||
| 125 | hdr->flags |= MH_NOUNDEFS | MH_TWOLEVEL; | |||
| 126 | ||||
| 127 | if (config->outputType == MH_DYLIB && !config->hasReexports) | |||
| 128 | hdr->flags |= MH_NO_REEXPORTED_DYLIBS; | |||
| 129 | ||||
| 130 | if (config->markDeadStrippableDylib) | |||
| 131 | hdr->flags |= MH_DEAD_STRIPPABLE_DYLIB; | |||
| 132 | ||||
| 133 | if (config->outputType == MH_EXECUTE && config->isPic) | |||
| 134 | hdr->flags |= MH_PIE; | |||
| 135 | ||||
| 136 | if (config->outputType == MH_DYLIB && config->applicationExtension) | |||
| 137 | hdr->flags |= MH_APP_EXTENSION_SAFE; | |||
| 138 | ||||
| 139 | if (in.exports->hasWeakSymbol || in.weakBinding->hasNonWeakDefinition()) | |||
| 140 | hdr->flags |= MH_WEAK_DEFINES; | |||
| 141 | ||||
| 142 | if (in.exports->hasWeakSymbol || in.weakBinding->hasEntry()) | |||
| 143 | hdr->flags |= MH_BINDS_TO_WEAK; | |||
| 144 | ||||
| 145 | for (const OutputSegment *seg : outputSegments) { | |||
| 146 | for (const OutputSection *osec : seg->getSections()) { | |||
| 147 | if (isThreadLocalVariables(osec->flags)) { | |||
| 148 | hdr->flags |= MH_HAS_TLV_DESCRIPTORS; | |||
| 149 | break; | |||
| 150 | } | |||
| 151 | } | |||
| 152 | } | |||
| 153 | ||||
| 154 | uint8_t *p = reinterpret_cast<uint8_t *>(hdr) + target->headerSize; | |||
| 155 | for (const LoadCommand *lc : loadCommands) { | |||
| 156 | lc->writeTo(p); | |||
| 157 | p += lc->getSize(); | |||
| 158 | } | |||
| 159 | } | |||
| 160 | ||||
| 161 | PageZeroSection::PageZeroSection() | |||
| 162 | : SyntheticSection(segment_names::pageZero, section_names::pageZero) {} | |||
| 163 | ||||
| 164 | RebaseSection::RebaseSection() | |||
| 165 | : LinkEditSection(segment_names::linkEdit, section_names::rebase) {} | |||
| 166 | ||||
| 167 | namespace { | |||
| 168 | struct RebaseState { | |||
| 169 | uint64_t sequenceLength; | |||
| 170 | uint64_t skipLength; | |||
| 171 | }; | |||
| 172 | } // namespace | |||
| 173 | ||||
| 174 | static void emitIncrement(uint64_t incr, raw_svector_ostream &os) { | |||
| 175 | assert(incr != 0)(static_cast <bool> (incr != 0) ? void (0) : __assert_fail ("incr != 0", "lld/MachO/SyntheticSections.cpp", 175, __extension__ __PRETTY_FUNCTION__)); | |||
| 176 | ||||
| 177 | if ((incr >> target->p2WordSize) <= REBASE_IMMEDIATE_MASK && | |||
| 178 | (incr % target->wordSize) == 0) { | |||
| 179 | os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_IMM_SCALED | | |||
| 180 | (incr >> target->p2WordSize)); | |||
| 181 | } else { | |||
| 182 | os << static_cast<uint8_t>(REBASE_OPCODE_ADD_ADDR_ULEB); | |||
| 183 | encodeULEB128(incr, os); | |||
| 184 | } | |||
| 185 | } | |||
| 186 | ||||
| 187 | static void flushRebase(const RebaseState &state, raw_svector_ostream &os) { | |||
| 188 | assert(state.sequenceLength > 0)(static_cast <bool> (state.sequenceLength > 0) ? void (0) : __assert_fail ("state.sequenceLength > 0", "lld/MachO/SyntheticSections.cpp" , 188, __extension__ __PRETTY_FUNCTION__)); | |||
| 189 | ||||
| 190 | if (state.skipLength == target->wordSize) { | |||
| 191 | if (state.sequenceLength <= REBASE_IMMEDIATE_MASK) { | |||
| 192 | os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_IMM_TIMES | | |||
| 193 | state.sequenceLength); | |||
| 194 | } else { | |||
| 195 | os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ULEB_TIMES); | |||
| 196 | encodeULEB128(state.sequenceLength, os); | |||
| 197 | } | |||
| 198 | } else if (state.sequenceLength == 1) { | |||
| 199 | os << static_cast<uint8_t>(REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB); | |||
| 200 | encodeULEB128(state.skipLength - target->wordSize, os); | |||
| 201 | } else { | |||
| 202 | os << static_cast<uint8_t>( | |||
| 203 | REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB); | |||
| 204 | encodeULEB128(state.sequenceLength, os); | |||
| 205 | encodeULEB128(state.skipLength - target->wordSize, os); | |||
| 206 | } | |||
| 207 | } | |||
| 208 | ||||
| 209 | // Rebases are communicated to dyld using a bytecode, whose opcodes cause the | |||
| 210 | // memory location at a specific address to be rebased and/or the address to be | |||
| 211 | // incremented. | |||
| 212 | // | |||
| 213 | // Opcode REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB is the most generic | |||
| 214 | // one, encoding a series of evenly spaced addresses. This algorithm works by | |||
| 215 | // splitting up the sorted list of addresses into such chunks. If the locations | |||
| 216 | // are consecutive or the sequence consists of a single location, flushRebase | |||
| 217 | // will use a smaller, more specialized encoding. | |||
| 218 | static void encodeRebases(const OutputSegment *seg, | |||
| 219 | MutableArrayRef<Location> locations, | |||
| 220 | raw_svector_ostream &os) { | |||
| 221 | // dyld operates on segments. Translate section offsets into segment offsets. | |||
| 222 | for (Location &loc : locations) | |||
| 223 | loc.offset = | |||
| 224 | loc.isec->parent->getSegmentOffset() + loc.isec->getOffset(loc.offset); | |||
| 225 | // The algorithm assumes that locations are unique. | |||
| 226 | Location *end = | |||
| 227 | llvm::unique(locations, [](const Location &a, const Location &b) { | |||
| 228 | return a.offset == b.offset; | |||
| 229 | }); | |||
| 230 | size_t count = end - locations.begin(); | |||
| 231 | ||||
| 232 | os << static_cast<uint8_t>(REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | | |||
| 233 | seg->index); | |||
| 234 | assert(!locations.empty())(static_cast <bool> (!locations.empty()) ? void (0) : __assert_fail ("!locations.empty()", "lld/MachO/SyntheticSections.cpp", 234 , __extension__ __PRETTY_FUNCTION__)); | |||
| 235 | uint64_t offset = locations[0].offset; | |||
| 236 | encodeULEB128(offset, os); | |||
| 237 | ||||
| 238 | RebaseState state{1, target->wordSize}; | |||
| 239 | ||||
| 240 | for (size_t i = 1; i < count; ++i) { | |||
| 241 | offset = locations[i].offset; | |||
| 242 | ||||
| 243 | uint64_t skip = offset - locations[i - 1].offset; | |||
| 244 | assert(skip != 0 && "duplicate locations should have been weeded out")(static_cast <bool> (skip != 0 && "duplicate locations should have been weeded out" ) ? void (0) : __assert_fail ("skip != 0 && \"duplicate locations should have been weeded out\"" , "lld/MachO/SyntheticSections.cpp", 244, __extension__ __PRETTY_FUNCTION__ )); | |||
| 245 | ||||
| 246 | if (skip == state.skipLength) { | |||
| 247 | ++state.sequenceLength; | |||
| 248 | } else if (state.sequenceLength == 1) { | |||
| 249 | ++state.sequenceLength; | |||
| 250 | state.skipLength = skip; | |||
| 251 | } else if (skip < state.skipLength) { | |||
| 252 | // The address is lower than what the rebase pointer would be if the last | |||
| 253 | // location would be part of a sequence. We start a new sequence from the | |||
| 254 | // previous location. | |||
| 255 | --state.sequenceLength; | |||
| 256 | flushRebase(state, os); | |||
| 257 | ||||
| 258 | state.sequenceLength = 2; | |||
| 259 | state.skipLength = skip; | |||
| 260 | } else { | |||
| 261 | // The address is at some positive offset from the rebase pointer. We | |||
| 262 | // start a new sequence which begins with the current location. | |||
| 263 | flushRebase(state, os); | |||
| 264 | emitIncrement(skip - state.skipLength, os); | |||
| 265 | state.sequenceLength = 1; | |||
| 266 | state.skipLength = target->wordSize; | |||
| 267 | } | |||
| 268 | } | |||
| 269 | flushRebase(state, os); | |||
| 270 | } | |||
| 271 | ||||
| 272 | void RebaseSection::finalizeContents() { | |||
| 273 | if (locations.empty()) | |||
| 274 | return; | |||
| 275 | ||||
| 276 | raw_svector_ostream os{contents}; | |||
| 277 | os << static_cast<uint8_t>(REBASE_OPCODE_SET_TYPE_IMM | REBASE_TYPE_POINTER); | |||
| 278 | ||||
| 279 | llvm::sort(locations, [](const Location &a, const Location &b) { | |||
| 280 | return a.isec->getVA(a.offset) < b.isec->getVA(b.offset); | |||
| 281 | }); | |||
| 282 | ||||
| 283 | for (size_t i = 0, count = locations.size(); i < count;) { | |||
| 284 | const OutputSegment *seg = locations[i].isec->parent->parent; | |||
| 285 | size_t j = i + 1; | |||
| 286 | while (j < count && locations[j].isec->parent->parent == seg) | |||
| 287 | ++j; | |||
| 288 | encodeRebases(seg, {locations.data() + i, locations.data() + j}, os); | |||
| 289 | i = j; | |||
| 290 | } | |||
| 291 | os << static_cast<uint8_t>(REBASE_OPCODE_DONE); | |||
| 292 | } | |||
| 293 | ||||
| 294 | void RebaseSection::writeTo(uint8_t *buf) const { | |||
| 295 | memcpy(buf, contents.data(), contents.size()); | |||
| 296 | } | |||
| 297 | ||||
| 298 | NonLazyPointerSectionBase::NonLazyPointerSectionBase(const char *segname, | |||
| 299 | const char *name) | |||
| 300 | : SyntheticSection(segname, name) { | |||
| 301 | align = target->wordSize; | |||
| 302 | } | |||
| 303 | ||||
| 304 | void macho::addNonLazyBindingEntries(const Symbol *sym, | |||
| 305 | const InputSection *isec, uint64_t offset, | |||
| 306 | int64_t addend) { | |||
| 307 | if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) { | |||
| 308 | in.binding->addEntry(dysym, isec, offset, addend); | |||
| 309 | if (dysym->isWeakDef()) | |||
| 310 | in.weakBinding->addEntry(sym, isec, offset, addend); | |||
| 311 | } else if (const auto *defined = dyn_cast<Defined>(sym)) { | |||
| 312 | in.rebase->addEntry(isec, offset); | |||
| 313 | if (defined->isExternalWeakDef()) | |||
| 314 | in.weakBinding->addEntry(sym, isec, offset, addend); | |||
| 315 | else if (defined->interposable) | |||
| 316 | in.binding->addEntry(sym, isec, offset, addend); | |||
| 317 | } else { | |||
| 318 | // Undefined symbols are filtered out in scanRelocations(); we should never | |||
| 319 | // get here | |||
| 320 | llvm_unreachable("cannot bind to an undefined symbol")::llvm::llvm_unreachable_internal("cannot bind to an undefined symbol" , "lld/MachO/SyntheticSections.cpp", 320); | |||
| 321 | } | |||
| 322 | } | |||
| 323 | ||||
| 324 | void NonLazyPointerSectionBase::addEntry(Symbol *sym) { | |||
| 325 | if (entries.insert(sym)) { | |||
| 326 | assert(!sym->isInGot())(static_cast <bool> (!sym->isInGot()) ? void (0) : __assert_fail ("!sym->isInGot()", "lld/MachO/SyntheticSections.cpp", 326 , __extension__ __PRETTY_FUNCTION__)); | |||
| 327 | sym->gotIndex = entries.size() - 1; | |||
| 328 | ||||
| 329 | addNonLazyBindingEntries(sym, isec, sym->gotIndex * target->wordSize); | |||
| 330 | } | |||
| 331 | } | |||
| 332 | ||||
| 333 | void NonLazyPointerSectionBase::writeTo(uint8_t *buf) const { | |||
| 334 | for (size_t i = 0, n = entries.size(); i < n; ++i) | |||
| 335 | if (auto *defined = dyn_cast<Defined>(entries[i])) | |||
| 336 | write64le(&buf[i * target->wordSize], defined->getVA()); | |||
| 337 | } | |||
| 338 | ||||
| 339 | GotSection::GotSection() | |||
| 340 | : NonLazyPointerSectionBase(segment_names::data, section_names::got) { | |||
| 341 | flags = S_NON_LAZY_SYMBOL_POINTERS; | |||
| 342 | } | |||
| 343 | ||||
| 344 | TlvPointerSection::TlvPointerSection() | |||
| 345 | : NonLazyPointerSectionBase(segment_names::data, | |||
| 346 | section_names::threadPtrs) { | |||
| 347 | flags = S_THREAD_LOCAL_VARIABLE_POINTERS; | |||
| 348 | } | |||
| 349 | ||||
| 350 | BindingSection::BindingSection() | |||
| 351 | : LinkEditSection(segment_names::linkEdit, section_names::binding) {} | |||
| 352 | ||||
| 353 | namespace { | |||
| 354 | struct Binding { | |||
| 355 | OutputSegment *segment = nullptr; | |||
| 356 | uint64_t offset = 0; | |||
| 357 | int64_t addend = 0; | |||
| 358 | }; | |||
| 359 | struct BindIR { | |||
| 360 | // Default value of 0xF0 is not valid opcode and should make the program | |||
| 361 | // scream instead of accidentally writing "valid" values. | |||
| 362 | uint8_t opcode = 0xF0; | |||
| 363 | uint64_t data = 0; | |||
| 364 | uint64_t consecutiveCount = 0; | |||
| 365 | }; | |||
| 366 | } // namespace | |||
| 367 | ||||
| 368 | // Encode a sequence of opcodes that tell dyld to write the address of symbol + | |||
| 369 | // addend at osec->addr + outSecOff. | |||
| 370 | // | |||
| 371 | // The bind opcode "interpreter" remembers the values of each binding field, so | |||
| 372 | // we only need to encode the differences between bindings. Hence the use of | |||
| 373 | // lastBinding. | |||
| 374 | static void encodeBinding(const OutputSection *osec, uint64_t outSecOff, | |||
| 375 | int64_t addend, Binding &lastBinding, | |||
| 376 | std::vector<BindIR> &opcodes) { | |||
| 377 | OutputSegment *seg = osec->parent; | |||
| 378 | uint64_t offset = osec->getSegmentOffset() + outSecOff; | |||
| 379 | if (lastBinding.segment != seg) { | |||
| 380 | opcodes.push_back( | |||
| 381 | {static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | | |||
| 382 | seg->index), | |||
| 383 | offset}); | |||
| 384 | lastBinding.segment = seg; | |||
| 385 | lastBinding.offset = offset; | |||
| 386 | } else if (lastBinding.offset != offset) { | |||
| 387 | opcodes.push_back({BIND_OPCODE_ADD_ADDR_ULEB, offset - lastBinding.offset}); | |||
| 388 | lastBinding.offset = offset; | |||
| 389 | } | |||
| 390 | ||||
| 391 | if (lastBinding.addend != addend) { | |||
| 392 | opcodes.push_back( | |||
| 393 | {BIND_OPCODE_SET_ADDEND_SLEB, static_cast<uint64_t>(addend)}); | |||
| 394 | lastBinding.addend = addend; | |||
| 395 | } | |||
| 396 | ||||
| 397 | opcodes.push_back({BIND_OPCODE_DO_BIND, 0}); | |||
| 398 | // DO_BIND causes dyld to both perform the binding and increment the offset | |||
| 399 | lastBinding.offset += target->wordSize; | |||
| 400 | } | |||
| 401 | ||||
| 402 | static void optimizeOpcodes(std::vector<BindIR> &opcodes) { | |||
| 403 | // Pass 1: Combine bind/add pairs | |||
| 404 | size_t i; | |||
| 405 | int pWrite = 0; | |||
| 406 | for (i = 1; i < opcodes.size(); ++i, ++pWrite) { | |||
| 407 | if ((opcodes[i].opcode == BIND_OPCODE_ADD_ADDR_ULEB) && | |||
| 408 | (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND)) { | |||
| 409 | opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB; | |||
| 410 | opcodes[pWrite].data = opcodes[i].data; | |||
| 411 | ++i; | |||
| 412 | } else { | |||
| 413 | opcodes[pWrite] = opcodes[i - 1]; | |||
| 414 | } | |||
| 415 | } | |||
| 416 | if (i == opcodes.size()) | |||
| 417 | opcodes[pWrite] = opcodes[i - 1]; | |||
| 418 | opcodes.resize(pWrite + 1); | |||
| 419 | ||||
| 420 | // Pass 2: Compress two or more bind_add opcodes | |||
| 421 | pWrite = 0; | |||
| 422 | for (i = 1; i < opcodes.size(); ++i, ++pWrite) { | |||
| 423 | if ((opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) && | |||
| 424 | (opcodes[i - 1].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) && | |||
| 425 | (opcodes[i].data == opcodes[i - 1].data)) { | |||
| 426 | opcodes[pWrite].opcode = BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB; | |||
| 427 | opcodes[pWrite].consecutiveCount = 2; | |||
| 428 | opcodes[pWrite].data = opcodes[i].data; | |||
| 429 | ++i; | |||
| 430 | while (i < opcodes.size() && | |||
| 431 | (opcodes[i].opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) && | |||
| 432 | (opcodes[i].data == opcodes[i - 1].data)) { | |||
| 433 | opcodes[pWrite].consecutiveCount++; | |||
| 434 | ++i; | |||
| 435 | } | |||
| 436 | } else { | |||
| 437 | opcodes[pWrite] = opcodes[i - 1]; | |||
| 438 | } | |||
| 439 | } | |||
| 440 | if (i == opcodes.size()) | |||
| 441 | opcodes[pWrite] = opcodes[i - 1]; | |||
| 442 | opcodes.resize(pWrite + 1); | |||
| 443 | ||||
| 444 | // Pass 3: Use immediate encodings | |||
| 445 | // Every binding is the size of one pointer. If the next binding is a | |||
| 446 | // multiple of wordSize away that is within BIND_IMMEDIATE_MASK, the | |||
| 447 | // opcode can be scaled by wordSize into a single byte and dyld will | |||
| 448 | // expand it to the correct address. | |||
| 449 | for (auto &p : opcodes) { | |||
| 450 | // It's unclear why the check needs to be less than BIND_IMMEDIATE_MASK, | |||
| 451 | // but ld64 currently does this. This could be a potential bug, but | |||
| 452 | // for now, perform the same behavior to prevent mysterious bugs. | |||
| 453 | if ((p.opcode == BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB) && | |||
| 454 | ((p.data / target->wordSize) < BIND_IMMEDIATE_MASK) && | |||
| 455 | ((p.data % target->wordSize) == 0)) { | |||
| 456 | p.opcode = BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED; | |||
| 457 | p.data /= target->wordSize; | |||
| 458 | } | |||
| 459 | } | |||
| 460 | } | |||
| 461 | ||||
| 462 | static void flushOpcodes(const BindIR &op, raw_svector_ostream &os) { | |||
| 463 | uint8_t opcode = op.opcode & BIND_OPCODE_MASK; | |||
| 464 | switch (opcode) { | |||
| 465 | case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: | |||
| 466 | case BIND_OPCODE_ADD_ADDR_ULEB: | |||
| 467 | case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: | |||
| 468 | os << op.opcode; | |||
| 469 | encodeULEB128(op.data, os); | |||
| 470 | break; | |||
| 471 | case BIND_OPCODE_SET_ADDEND_SLEB: | |||
| 472 | os << op.opcode; | |||
| 473 | encodeSLEB128(static_cast<int64_t>(op.data), os); | |||
| 474 | break; | |||
| 475 | case BIND_OPCODE_DO_BIND: | |||
| 476 | os << op.opcode; | |||
| 477 | break; | |||
| 478 | case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: | |||
| 479 | os << op.opcode; | |||
| 480 | encodeULEB128(op.consecutiveCount, os); | |||
| 481 | encodeULEB128(op.data, os); | |||
| 482 | break; | |||
| 483 | case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: | |||
| 484 | os << static_cast<uint8_t>(op.opcode | op.data); | |||
| 485 | break; | |||
| 486 | default: | |||
| 487 | llvm_unreachable("cannot bind to an unrecognized symbol")::llvm::llvm_unreachable_internal("cannot bind to an unrecognized symbol" , "lld/MachO/SyntheticSections.cpp", 487); | |||
| 488 | } | |||
| 489 | } | |||
| 490 | ||||
| 491 | // Non-weak bindings need to have their dylib ordinal encoded as well. | |||
| 492 | static int16_t ordinalForDylibSymbol(const DylibSymbol &dysym) { | |||
| 493 | if (config->namespaceKind == NamespaceKind::flat || dysym.isDynamicLookup()) | |||
| 494 | return static_cast<int16_t>(BIND_SPECIAL_DYLIB_FLAT_LOOKUP); | |||
| 495 | assert(dysym.getFile()->isReferenced())(static_cast <bool> (dysym.getFile()->isReferenced() ) ? void (0) : __assert_fail ("dysym.getFile()->isReferenced()" , "lld/MachO/SyntheticSections.cpp", 495, __extension__ __PRETTY_FUNCTION__ )); | |||
| 496 | return dysym.getFile()->ordinal; | |||
| 497 | } | |||
| 498 | ||||
| 499 | static int16_t ordinalForSymbol(const Symbol &sym) { | |||
| 500 | if (const auto *dysym = dyn_cast<DylibSymbol>(&sym)) | |||
| 501 | return ordinalForDylibSymbol(*dysym); | |||
| 502 | assert(cast<Defined>(&sym)->interposable)(static_cast <bool> (cast<Defined>(&sym)-> interposable) ? void (0) : __assert_fail ("cast<Defined>(&sym)->interposable" , "lld/MachO/SyntheticSections.cpp", 502, __extension__ __PRETTY_FUNCTION__ )); | |||
| 503 | return BIND_SPECIAL_DYLIB_FLAT_LOOKUP; | |||
| 504 | } | |||
| 505 | ||||
| 506 | static void encodeDylibOrdinal(int16_t ordinal, raw_svector_ostream &os) { | |||
| 507 | if (ordinal <= 0) { | |||
| 508 | os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | | |||
| 509 | (ordinal & BIND_IMMEDIATE_MASK)); | |||
| 510 | } else if (ordinal <= BIND_IMMEDIATE_MASK) { | |||
| 511 | os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | ordinal); | |||
| 512 | } else { | |||
| 513 | os << static_cast<uint8_t>(BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB); | |||
| 514 | encodeULEB128(ordinal, os); | |||
| 515 | } | |||
| 516 | } | |||
| 517 | ||||
| 518 | static void encodeWeakOverride(const Defined *defined, | |||
| 519 | raw_svector_ostream &os) { | |||
| 520 | os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM | | |||
| 521 | BIND_SYMBOL_FLAGS_NON_WEAK_DEFINITION) | |||
| 522 | << defined->getName() << '\0'; | |||
| 523 | } | |||
| 524 | ||||
| 525 | // Organize the bindings so we can encoded them with fewer opcodes. | |||
| 526 | // | |||
| 527 | // First, all bindings for a given symbol should be grouped together. | |||
| 528 | // BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM is the largest opcode (since it | |||
| 529 | // has an associated symbol string), so we only want to emit it once per symbol. | |||
| 530 | // | |||
| 531 | // Within each group, we sort the bindings by address. Since bindings are | |||
| 532 | // delta-encoded, sorting them allows for a more compact result. Note that | |||
| 533 | // sorting by address alone ensures that bindings for the same segment / section | |||
| 534 | // are located together, minimizing the number of times we have to emit | |||
| 535 | // BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB. | |||
| 536 | // | |||
| 537 | // Finally, we sort the symbols by the address of their first binding, again | |||
| 538 | // to facilitate the delta-encoding process. | |||
| 539 | template <class Sym> | |||
| 540 | std::vector<std::pair<const Sym *, std::vector<BindingEntry>>> | |||
| 541 | sortBindings(const BindingsMap<const Sym *> &bindingsMap) { | |||
| 542 | std::vector<std::pair<const Sym *, std::vector<BindingEntry>>> bindingsVec( | |||
| 543 | bindingsMap.begin(), bindingsMap.end()); | |||
| 544 | for (auto &p : bindingsVec) { | |||
| 545 | std::vector<BindingEntry> &bindings = p.second; | |||
| 546 | llvm::sort(bindings, [](const BindingEntry &a, const BindingEntry &b) { | |||
| 547 | return a.target.getVA() < b.target.getVA(); | |||
| 548 | }); | |||
| 549 | } | |||
| 550 | llvm::sort(bindingsVec, [](const auto &a, const auto &b) { | |||
| 551 | return a.second[0].target.getVA() < b.second[0].target.getVA(); | |||
| 552 | }); | |||
| 553 | return bindingsVec; | |||
| 554 | } | |||
| 555 | ||||
| 556 | // Emit bind opcodes, which are a stream of byte-sized opcodes that dyld | |||
| 557 | // interprets to update a record with the following fields: | |||
| 558 | // * segment index (of the segment to write the symbol addresses to, typically | |||
| 559 | // the __DATA_CONST segment which contains the GOT) | |||
| 560 | // * offset within the segment, indicating the next location to write a binding | |||
| 561 | // * symbol type | |||
| 562 | // * symbol library ordinal (the index of its library's LC_LOAD_DYLIB command) | |||
| 563 | // * symbol name | |||
| 564 | // * addend | |||
| 565 | // When dyld sees BIND_OPCODE_DO_BIND, it uses the current record state to bind | |||
| 566 | // a symbol in the GOT, and increments the segment offset to point to the next | |||
| 567 | // entry. It does *not* clear the record state after doing the bind, so | |||
| 568 | // subsequent opcodes only need to encode the differences between bindings. | |||
| 569 | void BindingSection::finalizeContents() { | |||
| 570 | raw_svector_ostream os{contents}; | |||
| 571 | Binding lastBinding; | |||
| 572 | int16_t lastOrdinal = 0; | |||
| 573 | ||||
| 574 | for (auto &p : sortBindings(bindingsMap)) { | |||
| 575 | const Symbol *sym = p.first; | |||
| 576 | std::vector<BindingEntry> &bindings = p.second; | |||
| 577 | uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM; | |||
| 578 | if (sym->isWeakRef()) | |||
| 579 | flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT; | |||
| 580 | os << flags << sym->getName() << '\0' | |||
| 581 | << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER); | |||
| 582 | int16_t ordinal = ordinalForSymbol(*sym); | |||
| 583 | if (ordinal != lastOrdinal) { | |||
| 584 | encodeDylibOrdinal(ordinal, os); | |||
| 585 | lastOrdinal = ordinal; | |||
| 586 | } | |||
| 587 | std::vector<BindIR> opcodes; | |||
| 588 | for (const BindingEntry &b : bindings) | |||
| 589 | encodeBinding(b.target.isec->parent, | |||
| 590 | b.target.isec->getOffset(b.target.offset), b.addend, | |||
| 591 | lastBinding, opcodes); | |||
| 592 | if (config->optimize > 1) | |||
| 593 | optimizeOpcodes(opcodes); | |||
| 594 | for (const auto &op : opcodes) | |||
| 595 | flushOpcodes(op, os); | |||
| 596 | } | |||
| 597 | if (!bindingsMap.empty()) | |||
| 598 | os << static_cast<uint8_t>(BIND_OPCODE_DONE); | |||
| 599 | } | |||
| 600 | ||||
| 601 | void BindingSection::writeTo(uint8_t *buf) const { | |||
| 602 | memcpy(buf, contents.data(), contents.size()); | |||
| 603 | } | |||
| 604 | ||||
| 605 | WeakBindingSection::WeakBindingSection() | |||
| 606 | : LinkEditSection(segment_names::linkEdit, section_names::weakBinding) {} | |||
| 607 | ||||
| 608 | void WeakBindingSection::finalizeContents() { | |||
| 609 | raw_svector_ostream os{contents}; | |||
| 610 | Binding lastBinding; | |||
| 611 | ||||
| 612 | for (const Defined *defined : definitions) | |||
| 613 | encodeWeakOverride(defined, os); | |||
| 614 | ||||
| 615 | for (auto &p : sortBindings(bindingsMap)) { | |||
| 616 | const Symbol *sym = p.first; | |||
| 617 | std::vector<BindingEntry> &bindings = p.second; | |||
| 618 | os << static_cast<uint8_t>(BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM) | |||
| 619 | << sym->getName() << '\0' | |||
| 620 | << static_cast<uint8_t>(BIND_OPCODE_SET_TYPE_IMM | BIND_TYPE_POINTER); | |||
| 621 | std::vector<BindIR> opcodes; | |||
| 622 | for (const BindingEntry &b : bindings) | |||
| 623 | encodeBinding(b.target.isec->parent, | |||
| 624 | b.target.isec->getOffset(b.target.offset), b.addend, | |||
| 625 | lastBinding, opcodes); | |||
| 626 | if (config->optimize > 1) | |||
| 627 | optimizeOpcodes(opcodes); | |||
| 628 | for (const auto &op : opcodes) | |||
| 629 | flushOpcodes(op, os); | |||
| 630 | } | |||
| 631 | if (!bindingsMap.empty() || !definitions.empty()) | |||
| 632 | os << static_cast<uint8_t>(BIND_OPCODE_DONE); | |||
| 633 | } | |||
| 634 | ||||
| 635 | void WeakBindingSection::writeTo(uint8_t *buf) const { | |||
| 636 | memcpy(buf, contents.data(), contents.size()); | |||
| 637 | } | |||
| 638 | ||||
| 639 | StubsSection::StubsSection() | |||
| 640 | : SyntheticSection(segment_names::text, section_names::stubs) { | |||
| 641 | flags = S_SYMBOL_STUBS | S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS; | |||
| 642 | // The stubs section comprises machine instructions, which are aligned to | |||
| 643 | // 4 bytes on the archs we care about. | |||
| 644 | align = 4; | |||
| 645 | reserved2 = target->stubSize; | |||
| 646 | } | |||
| 647 | ||||
| 648 | uint64_t StubsSection::getSize() const { | |||
| 649 | return entries.size() * target->stubSize; | |||
| 650 | } | |||
| 651 | ||||
| 652 | void StubsSection::writeTo(uint8_t *buf) const { | |||
| 653 | size_t off = 0; | |||
| 654 | for (const Symbol *sym : entries) { | |||
| 655 | target->writeStub(buf + off, *sym); | |||
| 656 | off += target->stubSize; | |||
| 657 | } | |||
| 658 | } | |||
| 659 | ||||
| 660 | void StubsSection::finalize() { isFinal = true; } | |||
| 661 | ||||
| 662 | static void addBindingsForStub(Symbol *sym) { | |||
| 663 | if (auto *dysym = dyn_cast<DylibSymbol>(sym)) { | |||
| 664 | if (sym->isWeakDef()) { | |||
| 665 | in.binding->addEntry(dysym, in.lazyPointers->isec, | |||
| 666 | sym->stubsIndex * target->wordSize); | |||
| 667 | in.weakBinding->addEntry(sym, in.lazyPointers->isec, | |||
| 668 | sym->stubsIndex * target->wordSize); | |||
| 669 | } else { | |||
| 670 | in.lazyBinding->addEntry(dysym); | |||
| 671 | } | |||
| 672 | } else if (auto *defined = dyn_cast<Defined>(sym)) { | |||
| 673 | if (defined->isExternalWeakDef()) { | |||
| 674 | in.rebase->addEntry(in.lazyPointers->isec, | |||
| 675 | sym->stubsIndex * target->wordSize); | |||
| 676 | in.weakBinding->addEntry(sym, in.lazyPointers->isec, | |||
| 677 | sym->stubsIndex * target->wordSize); | |||
| 678 | } else if (defined->interposable) { | |||
| 679 | in.lazyBinding->addEntry(sym); | |||
| 680 | } else { | |||
| 681 | llvm_unreachable("invalid stub target")::llvm::llvm_unreachable_internal("invalid stub target", "lld/MachO/SyntheticSections.cpp" , 681); | |||
| 682 | } | |||
| 683 | } else { | |||
| 684 | llvm_unreachable("invalid stub target symbol type")::llvm::llvm_unreachable_internal("invalid stub target symbol type" , "lld/MachO/SyntheticSections.cpp", 684); | |||
| 685 | } | |||
| 686 | } | |||
| 687 | ||||
| 688 | void StubsSection::addEntry(Symbol *sym) { | |||
| 689 | bool inserted = entries.insert(sym); | |||
| 690 | if (inserted) { | |||
| 691 | sym->stubsIndex = entries.size() - 1; | |||
| 692 | addBindingsForStub(sym); | |||
| 693 | } | |||
| 694 | } | |||
| 695 | ||||
| 696 | StubHelperSection::StubHelperSection() | |||
| 697 | : SyntheticSection(segment_names::text, section_names::stubHelper) { | |||
| 698 | flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS; | |||
| 699 | align = 4; // This section comprises machine instructions | |||
| 700 | } | |||
| 701 | ||||
| 702 | uint64_t StubHelperSection::getSize() const { | |||
| 703 | return target->stubHelperHeaderSize + | |||
| 704 | in.lazyBinding->getEntries().size() * target->stubHelperEntrySize; | |||
| 705 | } | |||
| 706 | ||||
| 707 | bool StubHelperSection::isNeeded() const { return in.lazyBinding->isNeeded(); } | |||
| 708 | ||||
| 709 | void StubHelperSection::writeTo(uint8_t *buf) const { | |||
| 710 | target->writeStubHelperHeader(buf); | |||
| 711 | size_t off = target->stubHelperHeaderSize; | |||
| 712 | for (const Symbol *sym : in.lazyBinding->getEntries()) { | |||
| 713 | target->writeStubHelperEntry(buf + off, *sym, addr + off); | |||
| 714 | off += target->stubHelperEntrySize; | |||
| 715 | } | |||
| 716 | } | |||
| 717 | ||||
| 718 | void StubHelperSection::setUp() { | |||
| 719 | Symbol *binder = symtab->addUndefined("dyld_stub_binder", /*file=*/nullptr, | |||
| 720 | /*isWeakRef=*/false); | |||
| 721 | if (auto *undefined = dyn_cast<Undefined>(binder)) | |||
| 722 | treatUndefinedSymbol(*undefined, | |||
| 723 | "lazy binding (normally in libSystem.dylib)"); | |||
| 724 | ||||
| 725 | // treatUndefinedSymbol() can replace binder with a DylibSymbol; re-check. | |||
| 726 | stubBinder = dyn_cast_or_null<DylibSymbol>(binder); | |||
| 727 | if (stubBinder == nullptr) | |||
| 728 | return; | |||
| 729 | ||||
| 730 | in.got->addEntry(stubBinder); | |||
| 731 | ||||
| 732 | in.imageLoaderCache->parent = | |||
| 733 | ConcatOutputSection::getOrCreateForInput(in.imageLoaderCache); | |||
| 734 | inputSections.push_back(in.imageLoaderCache); | |||
| 735 | // Since this isn't in the symbol table or in any input file, the noDeadStrip | |||
| 736 | // argument doesn't matter. | |||
| 737 | dyldPrivate = | |||
| 738 | make<Defined>("__dyld_private", nullptr, in.imageLoaderCache, 0, 0, | |||
| 739 | /*isWeakDef=*/false, | |||
| 740 | /*isExternal=*/false, /*isPrivateExtern=*/false, | |||
| 741 | /*includeInSymtab=*/true, | |||
| 742 | /*isThumb=*/false, /*isReferencedDynamically=*/false, | |||
| 743 | /*noDeadStrip=*/false); | |||
| 744 | dyldPrivate->used = true; | |||
| 745 | } | |||
| 746 | ||||
| 747 | ObjCStubsSection::ObjCStubsSection() | |||
| 748 | : SyntheticSection(segment_names::text, section_names::objcStubs) { | |||
| 749 | flags = S_ATTR_SOME_INSTRUCTIONS | S_ATTR_PURE_INSTRUCTIONS; | |||
| 750 | align = target->objcStubsAlignment; | |||
| 751 | } | |||
| 752 | ||||
| 753 | void ObjCStubsSection::addEntry(Symbol *sym) { | |||
| 754 | assert(sym->getName().startswith(symbolPrefix) && "not an objc stub")(static_cast <bool> (sym->getName().startswith(symbolPrefix ) && "not an objc stub") ? void (0) : __assert_fail ( "sym->getName().startswith(symbolPrefix) && \"not an objc stub\"" , "lld/MachO/SyntheticSections.cpp", 754, __extension__ __PRETTY_FUNCTION__ )); | |||
| 755 | StringRef methname = sym->getName().drop_front(symbolPrefix.size()); | |||
| 756 | offsets.push_back( | |||
| 757 | in.objcMethnameSection->getStringOffset(methname).outSecOff); | |||
| 758 | Defined *newSym = replaceSymbol<Defined>( | |||
| 759 | sym, sym->getName(), nullptr, isec, | |||
| 760 | /*value=*/symbols.size() * target->objcStubsFastSize, | |||
| 761 | /*size=*/target->objcStubsFastSize, | |||
| 762 | /*isWeakDef=*/false, /*isExternal=*/true, /*isPrivateExtern=*/true, | |||
| 763 | /*includeInSymtab=*/true, /*isThumb=*/false, | |||
| 764 | /*isReferencedDynamically=*/false, /*noDeadStrip=*/false); | |||
| 765 | symbols.push_back(newSym); | |||
| 766 | } | |||
| 767 | ||||
| 768 | void ObjCStubsSection::setUp() { | |||
| 769 | Symbol *objcMsgSend = symtab->addUndefined("_objc_msgSend", /*file=*/nullptr, | |||
| 770 | /*isWeakRef=*/false); | |||
| 771 | objcMsgSend->used = true; | |||
| 772 | in.got->addEntry(objcMsgSend); | |||
| 773 | assert(objcMsgSend->isInGot())(static_cast <bool> (objcMsgSend->isInGot()) ? void ( 0) : __assert_fail ("objcMsgSend->isInGot()", "lld/MachO/SyntheticSections.cpp" , 773, __extension__ __PRETTY_FUNCTION__)); | |||
| 774 | objcMsgSendGotIndex = objcMsgSend->gotIndex; | |||
| 775 | ||||
| 776 | size_t size = offsets.size() * target->wordSize; | |||
| 777 | uint8_t *selrefsData = bAlloc().Allocate<uint8_t>(size); | |||
| 778 | for (size_t i = 0, n = offsets.size(); i < n; ++i) | |||
| 779 | write64le(&selrefsData[i * target->wordSize], offsets[i]); | |||
| 780 | ||||
| 781 | in.objcSelrefs = | |||
| 782 | makeSyntheticInputSection(segment_names::data, section_names::objcSelrefs, | |||
| 783 | S_LITERAL_POINTERS | S_ATTR_NO_DEAD_STRIP, | |||
| 784 | ArrayRef<uint8_t>{selrefsData, size}, | |||
| 785 | /*align=*/target->wordSize); | |||
| 786 | in.objcSelrefs->live = true; | |||
| 787 | ||||
| 788 | for (size_t i = 0, n = offsets.size(); i < n; ++i) { | |||
| 789 | in.objcSelrefs->relocs.push_back( | |||
| 790 | {/*type=*/target->unsignedRelocType, | |||
| 791 | /*pcrel=*/false, /*length=*/3, | |||
| 792 | /*offset=*/static_cast<uint32_t>(i * target->wordSize), | |||
| 793 | /*addend=*/offsets[i] * in.objcMethnameSection->align, | |||
| 794 | /*referent=*/in.objcMethnameSection->isec}); | |||
| 795 | } | |||
| 796 | ||||
| 797 | in.objcSelrefs->parent = | |||
| 798 | ConcatOutputSection::getOrCreateForInput(in.objcSelrefs); | |||
| 799 | inputSections.push_back(in.objcSelrefs); | |||
| 800 | in.objcSelrefs->isFinal = true; | |||
| 801 | } | |||
| 802 | ||||
| 803 | uint64_t ObjCStubsSection::getSize() const { | |||
| 804 | return target->objcStubsFastSize * symbols.size(); | |||
| 805 | } | |||
| 806 | ||||
| 807 | void ObjCStubsSection::writeTo(uint8_t *buf) const { | |||
| 808 | assert(in.objcSelrefs->live)(static_cast <bool> (in.objcSelrefs->live) ? void (0 ) : __assert_fail ("in.objcSelrefs->live", "lld/MachO/SyntheticSections.cpp" , 808, __extension__ __PRETTY_FUNCTION__)); | |||
| 809 | assert(in.objcSelrefs->isFinal)(static_cast <bool> (in.objcSelrefs->isFinal) ? void (0) : __assert_fail ("in.objcSelrefs->isFinal", "lld/MachO/SyntheticSections.cpp" , 809, __extension__ __PRETTY_FUNCTION__)); | |||
| 810 | ||||
| 811 | uint64_t stubOffset = 0; | |||
| 812 | for (size_t i = 0, n = symbols.size(); i < n; ++i) { | |||
| 813 | Defined *sym = symbols[i]; | |||
| 814 | target->writeObjCMsgSendStub(buf + stubOffset, sym, in.objcStubs->addr, | |||
| 815 | stubOffset, in.objcSelrefs->getVA(), i, | |||
| 816 | in.got->addr, objcMsgSendGotIndex); | |||
| 817 | stubOffset += target->objcStubsFastSize; | |||
| 818 | } | |||
| 819 | } | |||
| 820 | ||||
| 821 | LazyPointerSection::LazyPointerSection() | |||
| 822 | : SyntheticSection(segment_names::data, section_names::lazySymbolPtr) { | |||
| 823 | align = target->wordSize; | |||
| 824 | flags = S_LAZY_SYMBOL_POINTERS; | |||
| 825 | } | |||
| 826 | ||||
| 827 | uint64_t LazyPointerSection::getSize() const { | |||
| 828 | return in.stubs->getEntries().size() * target->wordSize; | |||
| 829 | } | |||
| 830 | ||||
| 831 | bool LazyPointerSection::isNeeded() const { | |||
| 832 | return !in.stubs->getEntries().empty(); | |||
| 833 | } | |||
| 834 | ||||
| 835 | void LazyPointerSection::writeTo(uint8_t *buf) const { | |||
| 836 | size_t off = 0; | |||
| 837 | for (const Symbol *sym : in.stubs->getEntries()) { | |||
| 838 | if (const auto *dysym = dyn_cast<DylibSymbol>(sym)) { | |||
| 839 | if (dysym->hasStubsHelper()) { | |||
| 840 | uint64_t stubHelperOffset = | |||
| 841 | target->stubHelperHeaderSize + | |||
| 842 | dysym->stubsHelperIndex * target->stubHelperEntrySize; | |||
| 843 | write64le(buf + off, in.stubHelper->addr + stubHelperOffset); | |||
| 844 | } | |||
| 845 | } else { | |||
| 846 | write64le(buf + off, sym->getVA()); | |||
| 847 | } | |||
| 848 | off += target->wordSize; | |||
| 849 | } | |||
| 850 | } | |||
| 851 | ||||
| 852 | LazyBindingSection::LazyBindingSection() | |||
| 853 | : LinkEditSection(segment_names::linkEdit, section_names::lazyBinding) {} | |||
| 854 | ||||
| 855 | void LazyBindingSection::finalizeContents() { | |||
| 856 | // TODO: Just precompute output size here instead of writing to a temporary | |||
| 857 | // buffer | |||
| 858 | for (Symbol *sym : entries) | |||
| 859 | sym->lazyBindOffset = encode(*sym); | |||
| 860 | } | |||
| 861 | ||||
| 862 | void LazyBindingSection::writeTo(uint8_t *buf) const { | |||
| 863 | memcpy(buf, contents.data(), contents.size()); | |||
| 864 | } | |||
| 865 | ||||
| 866 | void LazyBindingSection::addEntry(Symbol *sym) { | |||
| 867 | if (entries.insert(sym)) { | |||
| 868 | sym->stubsHelperIndex = entries.size() - 1; | |||
| 869 | in.rebase->addEntry(in.lazyPointers->isec, | |||
| 870 | sym->stubsIndex * target->wordSize); | |||
| 871 | } | |||
| 872 | } | |||
| 873 | ||||
| 874 | // Unlike the non-lazy binding section, the bind opcodes in this section aren't | |||
| 875 | // interpreted all at once. Rather, dyld will start interpreting opcodes at a | |||
| 876 | // given offset, typically only binding a single symbol before it finds a | |||
| 877 | // BIND_OPCODE_DONE terminator. As such, unlike in the non-lazy-binding case, | |||
| 878 | // we cannot encode just the differences between symbols; we have to emit the | |||
| 879 | // complete bind information for each symbol. | |||
| 880 | uint32_t LazyBindingSection::encode(const Symbol &sym) { | |||
| 881 | uint32_t opstreamOffset = contents.size(); | |||
| 882 | OutputSegment *dataSeg = in.lazyPointers->parent; | |||
| 883 | os << static_cast<uint8_t>(BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | | |||
| 884 | dataSeg->index); | |||
| 885 | uint64_t offset = | |||
| 886 | in.lazyPointers->addr - dataSeg->addr + sym.stubsIndex * target->wordSize; | |||
| 887 | encodeULEB128(offset, os); | |||
| 888 | encodeDylibOrdinal(ordinalForSymbol(sym), os); | |||
| 889 | ||||
| 890 | uint8_t flags = BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM; | |||
| 891 | if (sym.isWeakRef()) | |||
| 892 | flags |= BIND_SYMBOL_FLAGS_WEAK_IMPORT; | |||
| 893 | ||||
| 894 | os << flags << sym.getName() << '\0' | |||
| 895 | << static_cast<uint8_t>(BIND_OPCODE_DO_BIND) | |||
| 896 | << static_cast<uint8_t>(BIND_OPCODE_DONE); | |||
| 897 | return opstreamOffset; | |||
| 898 | } | |||
| 899 | ||||
| 900 | ExportSection::ExportSection() | |||
| 901 | : LinkEditSection(segment_names::linkEdit, section_names::export_) {} | |||
| 902 | ||||
| 903 | void ExportSection::finalizeContents() { | |||
| 904 | trieBuilder.setImageBase(in.header->addr); | |||
| 905 | for (const Symbol *sym : symtab->getSymbols()) { | |||
| 906 | if (const auto *defined = dyn_cast<Defined>(sym)) { | |||
| 907 | if (defined->privateExtern || !defined->isLive()) | |||
| 908 | continue; | |||
| 909 | trieBuilder.addSymbol(*defined); | |||
| 910 | hasWeakSymbol = hasWeakSymbol || sym->isWeakDef(); | |||
| 911 | } | |||
| 912 | } | |||
| 913 | size = trieBuilder.build(); | |||
| 914 | } | |||
| 915 | ||||
| 916 | void ExportSection::writeTo(uint8_t *buf) const { trieBuilder.writeTo(buf); } | |||
| 917 | ||||
| 918 | DataInCodeSection::DataInCodeSection() | |||
| 919 | : LinkEditSection(segment_names::linkEdit, section_names::dataInCode) {} | |||
| 920 | ||||
| 921 | template <class LP> | |||
| 922 | static std::vector<MachO::data_in_code_entry> collectDataInCodeEntries() { | |||
| 923 | std::vector<MachO::data_in_code_entry> dataInCodeEntries; | |||
| 924 | for (const InputFile *inputFile : inputFiles) { | |||
| 925 | if (!isa<ObjFile>(inputFile)) | |||
| 926 | continue; | |||
| 927 | const ObjFile *objFile = cast<ObjFile>(inputFile); | |||
| 928 | ArrayRef<MachO::data_in_code_entry> entries = objFile->getDataInCode(); | |||
| 929 | if (entries.empty()) | |||
| 930 | continue; | |||
| 931 | ||||
| 932 | assert(is_sorted(entries, [](const data_in_code_entry &lhs,(static_cast <bool> (is_sorted(entries, [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) { return lhs.offset < rhs.offset; })) ? void (0) : __assert_fail ("is_sorted(entries, [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) { return lhs.offset < rhs.offset; })" , "lld/MachO/SyntheticSections.cpp", 935, __extension__ __PRETTY_FUNCTION__ )) | |||
| 933 | const data_in_code_entry &rhs) {(static_cast <bool> (is_sorted(entries, [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) { return lhs.offset < rhs.offset; })) ? void (0) : __assert_fail ("is_sorted(entries, [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) { return lhs.offset < rhs.offset; })" , "lld/MachO/SyntheticSections.cpp", 935, __extension__ __PRETTY_FUNCTION__ )) | |||
| 934 | return lhs.offset < rhs.offset;(static_cast <bool> (is_sorted(entries, [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) { return lhs.offset < rhs.offset; })) ? void (0) : __assert_fail ("is_sorted(entries, [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) { return lhs.offset < rhs.offset; })" , "lld/MachO/SyntheticSections.cpp", 935, __extension__ __PRETTY_FUNCTION__ )) | |||
| 935 | }))(static_cast <bool> (is_sorted(entries, [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) { return lhs.offset < rhs.offset; })) ? void (0) : __assert_fail ("is_sorted(entries, [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) { return lhs.offset < rhs.offset; })" , "lld/MachO/SyntheticSections.cpp", 935, __extension__ __PRETTY_FUNCTION__ )); | |||
| 936 | // For each code subsection find 'data in code' entries residing in it. | |||
| 937 | // Compute the new offset values as | |||
| 938 | // <offset within subsection> + <subsection address> - <__TEXT address>. | |||
| 939 | for (const Section *section : objFile->sections) { | |||
| 940 | for (const Subsection &subsec : section->subsections) { | |||
| 941 | const InputSection *isec = subsec.isec; | |||
| 942 | if (!isCodeSection(isec)) | |||
| 943 | continue; | |||
| 944 | if (cast<ConcatInputSection>(isec)->shouldOmitFromOutput()) | |||
| 945 | continue; | |||
| 946 | const uint64_t beginAddr = section->addr + subsec.offset; | |||
| 947 | auto it = llvm::lower_bound( | |||
| 948 | entries, beginAddr, | |||
| 949 | [](const MachO::data_in_code_entry &entry, uint64_t addr) { | |||
| 950 | return entry.offset < addr; | |||
| 951 | }); | |||
| 952 | const uint64_t endAddr = beginAddr + isec->getSize(); | |||
| 953 | for (const auto end = entries.end(); | |||
| 954 | it != end && it->offset + it->length <= endAddr; ++it) | |||
| 955 | dataInCodeEntries.push_back( | |||
| 956 | {static_cast<uint32_t>(isec->getVA(it->offset - beginAddr) - | |||
| 957 | in.header->addr), | |||
| 958 | it->length, it->kind}); | |||
| 959 | } | |||
| 960 | } | |||
| 961 | } | |||
| 962 | ||||
| 963 | // ld64 emits the table in sorted order too. | |||
| 964 | llvm::sort(dataInCodeEntries, | |||
| 965 | [](const data_in_code_entry &lhs, const data_in_code_entry &rhs) { | |||
| 966 | return lhs.offset < rhs.offset; | |||
| 967 | }); | |||
| 968 | return dataInCodeEntries; | |||
| 969 | } | |||
| 970 | ||||
| 971 | void DataInCodeSection::finalizeContents() { | |||
| 972 | entries = target->wordSize == 8 ? collectDataInCodeEntries<LP64>() | |||
| 973 | : collectDataInCodeEntries<ILP32>(); | |||
| 974 | } | |||
| 975 | ||||
| 976 | void DataInCodeSection::writeTo(uint8_t *buf) const { | |||
| 977 | if (!entries.empty()) | |||
| 978 | memcpy(buf, entries.data(), getRawSize()); | |||
| 979 | } | |||
| 980 | ||||
| 981 | FunctionStartsSection::FunctionStartsSection() | |||
| 982 | : LinkEditSection(segment_names::linkEdit, section_names::functionStarts) {} | |||
| 983 | ||||
| 984 | void FunctionStartsSection::finalizeContents() { | |||
| 985 | raw_svector_ostream os{contents}; | |||
| 986 | std::vector<uint64_t> addrs; | |||
| 987 | for (const InputFile *file : inputFiles) { | |||
| 988 | if (auto *objFile = dyn_cast<ObjFile>(file)) { | |||
| 989 | for (const Symbol *sym : objFile->symbols) { | |||
| 990 | if (const auto *defined = dyn_cast_or_null<Defined>(sym)) { | |||
| 991 | if (!defined->isec || !isCodeSection(defined->isec) || | |||
| 992 | !defined->isLive()) | |||
| 993 | continue; | |||
| 994 | // TODO: Add support for thumbs, in that case | |||
| 995 | // the lowest bit of nextAddr needs to be set to 1. | |||
| 996 | addrs.push_back(defined->getVA()); | |||
| 997 | } | |||
| 998 | } | |||
| 999 | } | |||
| 1000 | } | |||
| 1001 | llvm::sort(addrs); | |||
| 1002 | uint64_t addr = in.header->addr; | |||
| 1003 | for (uint64_t nextAddr : addrs) { | |||
| 1004 | uint64_t delta = nextAddr - addr; | |||
| 1005 | if (delta == 0) | |||
| 1006 | continue; | |||
| 1007 | encodeULEB128(delta, os); | |||
| 1008 | addr = nextAddr; | |||
| 1009 | } | |||
| 1010 | os << '\0'; | |||
| 1011 | } | |||
| 1012 | ||||
| 1013 | void FunctionStartsSection::writeTo(uint8_t *buf) const { | |||
| 1014 | memcpy(buf, contents.data(), contents.size()); | |||
| 1015 | } | |||
| 1016 | ||||
| 1017 | SymtabSection::SymtabSection(StringTableSection &stringTableSection) | |||
| 1018 | : LinkEditSection(segment_names::linkEdit, section_names::symbolTable), | |||
| 1019 | stringTableSection(stringTableSection) {} | |||
| 1020 | ||||
| 1021 | void SymtabSection::emitBeginSourceStab(StringRef sourceFile) { | |||
| 1022 | StabsEntry stab(N_SO); | |||
| 1023 | stab.strx = stringTableSection.addString(saver().save(sourceFile)); | |||
| 1024 | stabs.emplace_back(std::move(stab)); | |||
| 1025 | } | |||
| 1026 | ||||
| 1027 | void SymtabSection::emitEndSourceStab() { | |||
| 1028 | StabsEntry stab(N_SO); | |||
| 1029 | stab.sect = 1; | |||
| 1030 | stabs.emplace_back(std::move(stab)); | |||
| 1031 | } | |||
| 1032 | ||||
| 1033 | void SymtabSection::emitObjectFileStab(ObjFile *file) { | |||
| 1034 | StabsEntry stab(N_OSO); | |||
| 1035 | stab.sect = target->cpuSubtype; | |||
| 1036 | SmallString<261> path(!file->archiveName.empty() ? file->archiveName | |||
| 1037 | : file->getName()); | |||
| 1038 | std::error_code ec = sys::fs::make_absolute(path); | |||
| 1039 | if (ec) | |||
| 1040 | fatal("failed to get absolute path for " + path); | |||
| 1041 | ||||
| 1042 | if (!file->archiveName.empty()) | |||
| 1043 | path.append({"(", file->getName(), ")"}); | |||
| 1044 | ||||
| 1045 | StringRef adjustedPath = saver().save(path.str()); | |||
| 1046 | adjustedPath.consume_front(config->osoPrefix); | |||
| 1047 | ||||
| 1048 | stab.strx = stringTableSection.addString(adjustedPath); | |||
| 1049 | stab.desc = 1; | |||
| 1050 | stab.value = file->modTime; | |||
| 1051 | stabs.emplace_back(std::move(stab)); | |||
| 1052 | } | |||
| 1053 | ||||
| 1054 | void SymtabSection::emitEndFunStab(Defined *defined) { | |||
| 1055 | StabsEntry stab(N_FUN); | |||
| 1056 | stab.value = defined->size; | |||
| 1057 | stabs.emplace_back(std::move(stab)); | |||
| 1058 | } | |||
| 1059 | ||||
| 1060 | void SymtabSection::emitStabs() { | |||
| 1061 | if (config->omitDebugInfo) | |||
| 1062 | return; | |||
| 1063 | ||||
| 1064 | for (const std::string &s : config->astPaths) { | |||
| 1065 | StabsEntry astStab(N_AST); | |||
| 1066 | astStab.strx = stringTableSection.addString(s); | |||
| 1067 | stabs.emplace_back(std::move(astStab)); | |||
| 1068 | } | |||
| 1069 | ||||
| 1070 | // Cache the file ID for each symbol in an std::pair for faster sorting. | |||
| 1071 | using SortingPair = std::pair<Defined *, int>; | |||
| 1072 | std::vector<SortingPair> symbolsNeedingStabs; | |||
| 1073 | for (const SymtabEntry &entry : | |||
| 1074 | concat<SymtabEntry>(localSymbols, externalSymbols)) { | |||
| 1075 | Symbol *sym = entry.sym; | |||
| 1076 | assert(sym->isLive() &&(static_cast <bool> (sym->isLive() && "dead symbols should not be in localSymbols, externalSymbols" ) ? void (0) : __assert_fail ("sym->isLive() && \"dead symbols should not be in localSymbols, externalSymbols\"" , "lld/MachO/SyntheticSections.cpp", 1077, __extension__ __PRETTY_FUNCTION__ )) | |||
| 1077 | "dead symbols should not be in localSymbols, externalSymbols")(static_cast <bool> (sym->isLive() && "dead symbols should not be in localSymbols, externalSymbols" ) ? void (0) : __assert_fail ("sym->isLive() && \"dead symbols should not be in localSymbols, externalSymbols\"" , "lld/MachO/SyntheticSections.cpp", 1077, __extension__ __PRETTY_FUNCTION__ )); | |||
| 1078 | if (auto *defined = dyn_cast<Defined>(sym)) { | |||
| 1079 | // Excluded symbols should have been filtered out in finalizeContents(). | |||
| 1080 | assert(defined->includeInSymtab)(static_cast <bool> (defined->includeInSymtab) ? void (0) : __assert_fail ("defined->includeInSymtab", "lld/MachO/SyntheticSections.cpp" , 1080, __extension__ __PRETTY_FUNCTION__)); | |||
| 1081 | ||||
| 1082 | if (defined->isAbsolute()) | |||
| 1083 | continue; | |||
| 1084 | ||||
| 1085 | // Constant-folded symbols go in the executable's symbol table, but don't | |||
| 1086 | // get a stabs entry. | |||
| 1087 | if (defined->wasIdenticalCodeFolded) | |||
| 1088 | continue; | |||
| 1089 | ||||
| 1090 | InputSection *isec = defined->isec; | |||
| 1091 | ObjFile *file = dyn_cast_or_null<ObjFile>(isec->getFile()); | |||
| 1092 | if (!file || !file->compileUnit) | |||
| 1093 | continue; | |||
| 1094 | ||||
| 1095 | symbolsNeedingStabs.emplace_back(defined, defined->isec->getFile()->id); | |||
| 1096 | } | |||
| 1097 | } | |||
| 1098 | ||||
| 1099 | llvm::stable_sort(symbolsNeedingStabs, | |||
| 1100 | [&](const SortingPair &a, const SortingPair &b) { | |||
| 1101 | return a.second < b.second; | |||
| 1102 | }); | |||
| 1103 | ||||
| 1104 | // Emit STABS symbols so that dsymutil and/or the debugger can map address | |||
| 1105 | // regions in the final binary to the source and object files from which they | |||
| 1106 | // originated. | |||
| 1107 | InputFile *lastFile = nullptr; | |||
| 1108 | for (SortingPair &pair : symbolsNeedingStabs) { | |||
| 1109 | Defined *defined = pair.first; | |||
| 1110 | InputSection *isec = defined->isec; | |||
| 1111 | ObjFile *file = cast<ObjFile>(isec->getFile()); | |||
| 1112 | ||||
| 1113 | if (lastFile == nullptr || lastFile != file) { | |||
| 1114 | if (lastFile != nullptr) | |||
| 1115 | emitEndSourceStab(); | |||
| 1116 | lastFile = file; | |||
| 1117 | ||||
| 1118 | emitBeginSourceStab(file->sourceFile()); | |||
| 1119 | emitObjectFileStab(file); | |||
| 1120 | } | |||
| 1121 | ||||
| 1122 | StabsEntry symStab; | |||
| 1123 | symStab.sect = defined->isec->parent->index; | |||
| 1124 | symStab.strx = stringTableSection.addString(defined->getName()); | |||
| 1125 | symStab.value = defined->getVA(); | |||
| 1126 | ||||
| 1127 | if (isCodeSection(isec)) { | |||
| 1128 | symStab.type = N_FUN; | |||
| 1129 | stabs.emplace_back(std::move(symStab)); | |||
| 1130 | emitEndFunStab(defined); | |||
| 1131 | } else { | |||
| 1132 | symStab.type = defined->isExternal() ? N_GSYM : N_STSYM; | |||
| 1133 | stabs.emplace_back(std::move(symStab)); | |||
| 1134 | } | |||
| 1135 | } | |||
| 1136 | ||||
| 1137 | if (!stabs.empty()) | |||
| 1138 | emitEndSourceStab(); | |||
| 1139 | } | |||
| 1140 | ||||
| 1141 | void SymtabSection::finalizeContents() { | |||
| 1142 | auto addSymbol = [&](std::vector<SymtabEntry> &symbols, Symbol *sym) { | |||
| 1143 | uint32_t strx = stringTableSection.addString(sym->getName()); | |||
| 1144 | symbols.push_back({sym, strx}); | |||
| 1145 | }; | |||
| 1146 | ||||
| 1147 | std::function<void(Symbol *)> localSymbolsHandler; | |||
| 1148 | switch (config->localSymbolsPresence) { | |||
| 1149 | case SymtabPresence::All: | |||
| 1150 | localSymbolsHandler = [&](Symbol *sym) { addSymbol(localSymbols, sym); }; | |||
| 1151 | break; | |||
| 1152 | case SymtabPresence::None: | |||
| 1153 | localSymbolsHandler = [&](Symbol *) { /* Do nothing*/ }; | |||
| 1154 | break; | |||
| 1155 | case SymtabPresence::SelectivelyIncluded: | |||
| 1156 | localSymbolsHandler = [&](Symbol *sym) { | |||
| 1157 | if (config->localSymbolPatterns.match(sym->getName())) | |||
| 1158 | addSymbol(localSymbols, sym); | |||
| 1159 | }; | |||
| 1160 | break; | |||
| 1161 | case SymtabPresence::SelectivelyExcluded: | |||
| 1162 | localSymbolsHandler = [&](Symbol *sym) { | |||
| 1163 | if (!config->localSymbolPatterns.match(sym->getName())) | |||
| 1164 | addSymbol(localSymbols, sym); | |||
| 1165 | }; | |||
| 1166 | break; | |||
| 1167 | } | |||
| 1168 | ||||
| 1169 | // Local symbols aren't in the SymbolTable, so we walk the list of object | |||
| 1170 | // files to gather them. | |||
| 1171 | // But if `-x` is set, then we don't need to. localSymbolsHandler() will do | |||
| 1172 | // the right thing regardless, but this check is a perf optimization because | |||
| 1173 | // iterating through all the input files and their symbols is expensive. | |||
| 1174 | if (config->localSymbolsPresence != SymtabPresence::None) { | |||
| 1175 | for (const InputFile *file : inputFiles) { | |||
| 1176 | if (auto *objFile = dyn_cast<ObjFile>(file)) { | |||
| 1177 | for (Symbol *sym : objFile->symbols) { | |||
| 1178 | if (auto *defined = dyn_cast_or_null<Defined>(sym)) { | |||
| 1179 | if (defined->isExternal() || !defined->isLive() || | |||
| 1180 | !defined->includeInSymtab) | |||
| 1181 | continue; | |||
| 1182 | localSymbolsHandler(sym); | |||
| 1183 | } | |||
| 1184 | } | |||
| 1185 | } | |||
| 1186 | } | |||
| 1187 | } | |||
| 1188 | ||||
| 1189 | // __dyld_private is a local symbol too. It's linker-created and doesn't | |||
| 1190 | // exist in any object file. | |||
| 1191 | if (Defined *dyldPrivate = in.stubHelper->dyldPrivate) | |||
| 1192 | localSymbolsHandler(dyldPrivate); | |||
| 1193 | ||||
| 1194 | for (Symbol *sym : symtab->getSymbols()) { | |||
| 1195 | if (!sym->isLive()) | |||
| 1196 | continue; | |||
| 1197 | if (auto *defined = dyn_cast<Defined>(sym)) { | |||
| 1198 | if (!defined->includeInSymtab) | |||
| 1199 | continue; | |||
| 1200 | assert(defined->isExternal())(static_cast <bool> (defined->isExternal()) ? void ( 0) : __assert_fail ("defined->isExternal()", "lld/MachO/SyntheticSections.cpp" , 1200, __extension__ __PRETTY_FUNCTION__)); | |||
| 1201 | if (defined->privateExtern) | |||
| 1202 | localSymbolsHandler(defined); | |||
| 1203 | else | |||
| 1204 | addSymbol(externalSymbols, defined); | |||
| 1205 | } else if (auto *dysym = dyn_cast<DylibSymbol>(sym)) { | |||
| 1206 | if (dysym->isReferenced()) | |||
| 1207 | addSymbol(undefinedSymbols, sym); | |||
| 1208 | } | |||
| 1209 | } | |||
| 1210 | ||||
| 1211 | emitStabs(); | |||
| 1212 | uint32_t symtabIndex = stabs.size(); | |||
| 1213 | for (const SymtabEntry &entry : | |||
| 1214 | concat<SymtabEntry>(localSymbols, externalSymbols, undefinedSymbols)) { | |||
| 1215 | entry.sym->symtabIndex = symtabIndex++; | |||
| 1216 | } | |||
| 1217 | } | |||
| 1218 | ||||
| 1219 | uint32_t SymtabSection::getNumSymbols() const { | |||
| 1220 | return stabs.size() + localSymbols.size() + externalSymbols.size() + | |||
| 1221 | undefinedSymbols.size(); | |||
| 1222 | } | |||
| 1223 | ||||
| 1224 | // This serves to hide (type-erase) the template parameter from SymtabSection. | |||
| 1225 | template <class LP> class SymtabSectionImpl final : public SymtabSection { | |||
| 1226 | public: | |||
| 1227 | SymtabSectionImpl(StringTableSection &stringTableSection) | |||
| 1228 | : SymtabSection(stringTableSection) {} | |||
| 1229 | uint64_t getRawSize() const override; | |||
| 1230 | void writeTo(uint8_t *buf) const override; | |||
| 1231 | }; | |||
| 1232 | ||||
| 1233 | template <class LP> uint64_t SymtabSectionImpl<LP>::getRawSize() const { | |||
| 1234 | return getNumSymbols() * sizeof(typename LP::nlist); | |||
| 1235 | } | |||
| 1236 | ||||
| 1237 | template <class LP> void SymtabSectionImpl<LP>::writeTo(uint8_t *buf) const { | |||
| 1238 | auto *nList = reinterpret_cast<typename LP::nlist *>(buf); | |||
| 1239 | // Emit the stabs entries before the "real" symbols. We cannot emit them | |||
| 1240 | // after as that would render Symbol::symtabIndex inaccurate. | |||
| 1241 | for (const StabsEntry &entry : stabs) { | |||
| 1242 | nList->n_strx = entry.strx; | |||
| 1243 | nList->n_type = entry.type; | |||
| 1244 | nList->n_sect = entry.sect; | |||
| 1245 | nList->n_desc = entry.desc; | |||
| 1246 | nList->n_value = entry.value; | |||
| 1247 | ++nList; | |||
| 1248 | } | |||
| 1249 | ||||
| 1250 | for (const SymtabEntry &entry : concat<const SymtabEntry>( | |||
| 1251 | localSymbols, externalSymbols, undefinedSymbols)) { | |||
| 1252 | nList->n_strx = entry.strx; | |||
| 1253 | // TODO populate n_desc with more flags | |||
| 1254 | if (auto *defined = dyn_cast<Defined>(entry.sym)) { | |||
| 1255 | uint8_t scope = 0; | |||
| 1256 | if (defined->privateExtern) { | |||
| 1257 | // Private external -- dylib scoped symbol. | |||
| 1258 | // Promote to non-external at link time. | |||
| 1259 | scope = N_PEXT; | |||
| 1260 | } else if (defined->isExternal()) { | |||
| 1261 | // Normal global symbol. | |||
| 1262 | scope = N_EXT; | |||
| 1263 | } else { | |||
| 1264 | // TU-local symbol from localSymbols. | |||
| 1265 | scope = 0; | |||
| 1266 | } | |||
| 1267 | ||||
| 1268 | if (defined->isAbsolute()) { | |||
| 1269 | nList->n_type = scope | N_ABS; | |||
| 1270 | nList->n_sect = NO_SECT; | |||
| 1271 | nList->n_value = defined->value; | |||
| 1272 | } else { | |||
| 1273 | nList->n_type = scope | N_SECT; | |||
| 1274 | nList->n_sect = defined->isec->parent->index; | |||
| 1275 | // For the N_SECT symbol type, n_value is the address of the symbol | |||
| 1276 | nList->n_value = defined->getVA(); | |||
| 1277 | } | |||
| 1278 | nList->n_desc |= defined->thumb ? N_ARM_THUMB_DEF : 0; | |||
| 1279 | nList->n_desc |= defined->isExternalWeakDef() ? N_WEAK_DEF : 0; | |||
| 1280 | nList->n_desc |= | |||
| 1281 | defined->referencedDynamically ? REFERENCED_DYNAMICALLY : 0; | |||
| 1282 | } else if (auto *dysym = dyn_cast<DylibSymbol>(entry.sym)) { | |||
| 1283 | uint16_t n_desc = nList->n_desc; | |||
| 1284 | int16_t ordinal = ordinalForDylibSymbol(*dysym); | |||
| 1285 | if (ordinal == BIND_SPECIAL_DYLIB_FLAT_LOOKUP) | |||
| 1286 | SET_LIBRARY_ORDINAL(n_desc, DYNAMIC_LOOKUP_ORDINAL); | |||
| 1287 | else if (ordinal == BIND_SPECIAL_DYLIB_MAIN_EXECUTABLE) | |||
| 1288 | SET_LIBRARY_ORDINAL(n_desc, EXECUTABLE_ORDINAL); | |||
| 1289 | else { | |||
| 1290 | assert(ordinal > 0)(static_cast <bool> (ordinal > 0) ? void (0) : __assert_fail ("ordinal > 0", "lld/MachO/SyntheticSections.cpp", 1290, __extension__ __PRETTY_FUNCTION__)); | |||
| 1291 | SET_LIBRARY_ORDINAL(n_desc, static_cast<uint8_t>(ordinal)); | |||
| 1292 | } | |||
| 1293 | ||||
| 1294 | nList->n_type = N_EXT; | |||
| 1295 | n_desc |= dysym->isWeakDef() ? N_WEAK_DEF : 0; | |||
| 1296 | n_desc |= dysym->isWeakRef() ? N_WEAK_REF : 0; | |||
| 1297 | nList->n_desc = n_desc; | |||
| 1298 | } | |||
| 1299 | ++nList; | |||
| 1300 | } | |||
| 1301 | } | |||
| 1302 | ||||
| 1303 | template <class LP> | |||
| 1304 | SymtabSection * | |||
| 1305 | macho::makeSymtabSection(StringTableSection &stringTableSection) { | |||
| 1306 | return make<SymtabSectionImpl<LP>>(stringTableSection); | |||
| 1307 | } | |||
| 1308 | ||||
| 1309 | IndirectSymtabSection::IndirectSymtabSection() | |||
| 1310 | : LinkEditSection(segment_names::linkEdit, | |||
| 1311 | section_names::indirectSymbolTable) {} | |||
| 1312 | ||||
| 1313 | uint32_t IndirectSymtabSection::getNumSymbols() const { | |||
| 1314 | return in.got->getEntries().size() + in.tlvPointers->getEntries().size() + | |||
| 1315 | 2 * in.stubs->getEntries().size(); | |||
| 1316 | } | |||
| 1317 | ||||
| 1318 | bool IndirectSymtabSection::isNeeded() const { | |||
| 1319 | return in.got->isNeeded() || in.tlvPointers->isNeeded() || | |||
| 1320 | in.stubs->isNeeded(); | |||
| 1321 | } | |||
| 1322 | ||||
| 1323 | void IndirectSymtabSection::finalizeContents() { | |||
| 1324 | uint32_t off = 0; | |||
| 1325 | in.got->reserved1 = off; | |||
| 1326 | off += in.got->getEntries().size(); | |||
| 1327 | in.tlvPointers->reserved1 = off; | |||
| 1328 | off += in.tlvPointers->getEntries().size(); | |||
| 1329 | in.stubs->reserved1 = off; | |||
| 1330 | off += in.stubs->getEntries().size(); | |||
| 1331 | in.lazyPointers->reserved1 = off; | |||
| 1332 | } | |||
| 1333 | ||||
| 1334 | static uint32_t indirectValue(const Symbol *sym) { | |||
| 1335 | if (sym->symtabIndex == UINT32_MAX(4294967295U)) | |||
| 1336 | return INDIRECT_SYMBOL_LOCAL; | |||
| 1337 | if (auto *defined = dyn_cast<Defined>(sym)) | |||
| 1338 | if (defined->privateExtern) | |||
| 1339 | return INDIRECT_SYMBOL_LOCAL; | |||
| 1340 | return sym->symtabIndex; | |||
| 1341 | } | |||
| 1342 | ||||
| 1343 | void IndirectSymtabSection::writeTo(uint8_t *buf) const { | |||
| 1344 | uint32_t off = 0; | |||
| 1345 | for (const Symbol *sym : in.got->getEntries()) { | |||
| 1346 | write32le(buf + off * sizeof(uint32_t), indirectValue(sym)); | |||
| 1347 | ++off; | |||
| 1348 | } | |||
| 1349 | for (const Symbol *sym : in.tlvPointers->getEntries()) { | |||
| 1350 | write32le(buf + off * sizeof(uint32_t), indirectValue(sym)); | |||
| 1351 | ++off; | |||
| 1352 | } | |||
| 1353 | for (const Symbol *sym : in.stubs->getEntries()) { | |||
| 1354 | write32le(buf + off * sizeof(uint32_t), indirectValue(sym)); | |||
| 1355 | ++off; | |||
| 1356 | } | |||
| 1357 | // There is a 1:1 correspondence between stubs and LazyPointerSection | |||
| 1358 | // entries. But giving __stubs and __la_symbol_ptr the same reserved1 | |||
| 1359 | // (the offset into the indirect symbol table) so that they both refer | |||
| 1360 | // to the same range of offsets confuses `strip`, so write the stubs | |||
| 1361 | // symbol table offsets a second time. | |||
| 1362 | for (const Symbol *sym : in.stubs->getEntries()) { | |||
| 1363 | write32le(buf + off * sizeof(uint32_t), indirectValue(sym)); | |||
| 1364 | ++off; | |||
| 1365 | } | |||
| 1366 | } | |||
| 1367 | ||||
| 1368 | StringTableSection::StringTableSection() | |||
| 1369 | : LinkEditSection(segment_names::linkEdit, section_names::stringTable) {} | |||
| 1370 | ||||
| 1371 | uint32_t StringTableSection::addString(StringRef str) { | |||
| 1372 | uint32_t strx = size; | |||
| 1373 | strings.push_back(str); // TODO: consider deduplicating strings | |||
| 1374 | size += str.size() + 1; // account for null terminator | |||
| 1375 | return strx; | |||
| 1376 | } | |||
| 1377 | ||||
| 1378 | void StringTableSection::writeTo(uint8_t *buf) const { | |||
| 1379 | uint32_t off = 0; | |||
| 1380 | for (StringRef str : strings) { | |||
| 1381 | memcpy(buf + off, str.data(), str.size()); | |||
| 1382 | off += str.size() + 1; // account for null terminator | |||
| 1383 | } | |||
| 1384 | } | |||
| 1385 | ||||
| 1386 | static_assert((CodeSignatureSection::blobHeadersSize % 8) == 0); | |||
| 1387 | static_assert((CodeSignatureSection::fixedHeadersSize % 8) == 0); | |||
| 1388 | ||||
| 1389 | CodeSignatureSection::CodeSignatureSection() | |||
| 1390 | : LinkEditSection(segment_names::linkEdit, section_names::codeSignature) { | |||
| 1391 | align = 16; // required by libstuff | |||
| 1392 | // FIXME: Consider using finalOutput instead of outputFile. | |||
| 1393 | fileName = config->outputFile; | |||
| 1394 | size_t slashIndex = fileName.rfind("/"); | |||
| 1395 | if (slashIndex != std::string::npos) | |||
| 1396 | fileName = fileName.drop_front(slashIndex + 1); | |||
| 1397 | ||||
| 1398 | // NOTE: Any changes to these calculations should be repeated | |||
| 1399 | // in llvm-objcopy's MachOLayoutBuilder::layoutTail. | |||
| 1400 | allHeadersSize = alignTo<16>(fixedHeadersSize + fileName.size() + 1); | |||
| 1401 | fileNamePad = allHeadersSize - fixedHeadersSize - fileName.size(); | |||
| 1402 | } | |||
| 1403 | ||||
| 1404 | uint32_t CodeSignatureSection::getBlockCount() const { | |||
| 1405 | return (fileOff + blockSize - 1) / blockSize; | |||
| 1406 | } | |||
| 1407 | ||||
| 1408 | uint64_t CodeSignatureSection::getRawSize() const { | |||
| 1409 | return allHeadersSize + getBlockCount() * hashSize; | |||
| 1410 | } | |||
| 1411 | ||||
| 1412 | void CodeSignatureSection::writeHashes(uint8_t *buf) const { | |||
| 1413 | // NOTE: Changes to this functionality should be repeated in llvm-objcopy's | |||
| 1414 | // MachOWriter::writeSignatureData. | |||
| 1415 | uint8_t *hashes = buf + fileOff + allHeadersSize; | |||
| 1416 | parallelFor(0, getBlockCount(), [&](size_t i) { | |||
| 1417 | sha256(buf + i * blockSize, | |||
| 1418 | std::min(static_cast<size_t>(fileOff - i * blockSize), blockSize), | |||
| 1419 | hashes + i * hashSize); | |||
| 1420 | }); | |||
| 1421 | #if defined(__APPLE__) | |||
| 1422 | // This is macOS-specific work-around and makes no sense for any | |||
| 1423 | // other host OS. See https://openradar.appspot.com/FB8914231 | |||
| 1424 | // | |||
| 1425 | // The macOS kernel maintains a signature-verification cache to | |||
| 1426 | // quickly validate applications at time of execve(2). The trouble | |||
| 1427 | // is that for the kernel creates the cache entry at the time of the | |||
| 1428 | // mmap(2) call, before we have a chance to write either the code to | |||
| 1429 | // sign or the signature header+hashes. The fix is to invalidate | |||
| 1430 | // all cached data associated with the output file, thus discarding | |||
| 1431 | // the bogus prematurely-cached signature. | |||
| 1432 | msync(buf, fileOff + getSize(), MS_INVALIDATE); | |||
| 1433 | #endif | |||
| 1434 | } | |||
| 1435 | ||||
| 1436 | void CodeSignatureSection::writeTo(uint8_t *buf) const { | |||
| 1437 | // NOTE: Changes to this functionality should be repeated in llvm-objcopy's | |||
| 1438 | // MachOWriter::writeSignatureData. | |||
| 1439 | uint32_t signatureSize = static_cast<uint32_t>(getSize()); | |||
| 1440 | auto *superBlob = reinterpret_cast<CS_SuperBlob *>(buf); | |||
| 1441 | write32be(&superBlob->magic, CSMAGIC_EMBEDDED_SIGNATURE); | |||
| 1442 | write32be(&superBlob->length, signatureSize); | |||
| 1443 | write32be(&superBlob->count, 1); | |||
| 1444 | auto *blobIndex = reinterpret_cast<CS_BlobIndex *>(&superBlob[1]); | |||
| 1445 | write32be(&blobIndex->type, CSSLOT_CODEDIRECTORY); | |||
| 1446 | write32be(&blobIndex->offset, blobHeadersSize); | |||
| 1447 | auto *codeDirectory = | |||
| 1448 | reinterpret_cast<CS_CodeDirectory *>(buf + blobHeadersSize); | |||
| 1449 | write32be(&codeDirectory->magic, CSMAGIC_CODEDIRECTORY); | |||
| 1450 | write32be(&codeDirectory->length, signatureSize - blobHeadersSize); | |||
| 1451 | write32be(&codeDirectory->version, CS_SUPPORTSEXECSEG); | |||
| 1452 | write32be(&codeDirectory->flags, CS_ADHOC | CS_LINKER_SIGNED); | |||
| 1453 | write32be(&codeDirectory->hashOffset, | |||
| 1454 | sizeof(CS_CodeDirectory) + fileName.size() + fileNamePad); | |||
| 1455 | write32be(&codeDirectory->identOffset, sizeof(CS_CodeDirectory)); | |||
| 1456 | codeDirectory->nSpecialSlots = 0; | |||
| 1457 | write32be(&codeDirectory->nCodeSlots, getBlockCount()); | |||
| 1458 | write32be(&codeDirectory->codeLimit, fileOff); | |||
| 1459 | codeDirectory->hashSize = static_cast<uint8_t>(hashSize); | |||
| 1460 | codeDirectory->hashType = kSecCodeSignatureHashSHA256; | |||
| 1461 | codeDirectory->platform = 0; | |||
| 1462 | codeDirectory->pageSize = blockSizeShift; | |||
| 1463 | codeDirectory->spare2 = 0; | |||
| 1464 | codeDirectory->scatterOffset = 0; | |||
| 1465 | codeDirectory->teamOffset = 0; | |||
| 1466 | codeDirectory->spare3 = 0; | |||
| 1467 | codeDirectory->codeLimit64 = 0; | |||
| 1468 | OutputSegment *textSeg = getOrCreateOutputSegment(segment_names::text); | |||
| 1469 | write64be(&codeDirectory->execSegBase, textSeg->fileOff); | |||
| 1470 | write64be(&codeDirectory->execSegLimit, textSeg->fileSize); | |||
| 1471 | write64be(&codeDirectory->execSegFlags, | |||
| 1472 | config->outputType == MH_EXECUTE ? CS_EXECSEG_MAIN_BINARY : 0); | |||
| 1473 | auto *id = reinterpret_cast<char *>(&codeDirectory[1]); | |||
| 1474 | memcpy(id, fileName.begin(), fileName.size()); | |||
| 1475 | memset(id + fileName.size(), 0, fileNamePad); | |||
| 1476 | } | |||
| 1477 | ||||
| 1478 | BitcodeBundleSection::BitcodeBundleSection() | |||
| 1479 | : SyntheticSection(segment_names::llvm, section_names::bitcodeBundle) {} | |||
| 1480 | ||||
| 1481 | class ErrorCodeWrapper { | |||
| 1482 | public: | |||
| 1483 | explicit ErrorCodeWrapper(std::error_code ec) : errorCode(ec.value()) {} | |||
| 1484 | explicit ErrorCodeWrapper(int ec) : errorCode(ec) {} | |||
| 1485 | operator int() const { return errorCode; } | |||
| 1486 | ||||
| 1487 | private: | |||
| 1488 | int errorCode; | |||
| 1489 | }; | |||
| 1490 | ||||
| 1491 | #define CHECK_EC(exp)do { ErrorCodeWrapper ec(exp); if (ec) fatal(Twine("operation failed with error code " ) + Twine(ec) + ": " + "exp"); } while (0); \ | |||
| 1492 | do { \ | |||
| 1493 | ErrorCodeWrapper ec(exp); \ | |||
| 1494 | if (ec) \ | |||
| 1495 | fatal(Twine("operation failed with error code ") + Twine(ec) + ": " + \ | |||
| 1496 | #exp); \ | |||
| 1497 | } while (0); | |||
| 1498 | ||||
| 1499 | void BitcodeBundleSection::finalize() { | |||
| 1500 | #ifdef LLVM_HAVE_LIBXAR | |||
| 1501 | using namespace llvm::sys::fs; | |||
| 1502 | CHECK_EC(createTemporaryFile("bitcode-bundle", "xar", xarPath))do { ErrorCodeWrapper ec(createTemporaryFile("bitcode-bundle" , "xar", xarPath)); if (ec) fatal(Twine("operation failed with error code " ) + Twine(ec) + ": " + "createTemporaryFile(\"bitcode-bundle\", \"xar\", xarPath)" ); } while (0);; | |||
| 1503 | ||||
| 1504 | #pragma clang diagnostic push | |||
| 1505 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" | |||
| 1506 | xar_t xar(xar_open(xarPath.data(), O_RDWR)); | |||
| 1507 | #pragma clang diagnostic pop | |||
| 1508 | if (!xar) | |||
| 1509 | fatal("failed to open XAR temporary file at " + xarPath); | |||
| 1510 | CHECK_EC(xar_opt_set(xar, XAR_OPT_COMPRESSION, XAR_OPT_VAL_NONE))do { ErrorCodeWrapper ec(xar_opt_set(xar, XAR_OPT_COMPRESSION , XAR_OPT_VAL_NONE)); if (ec) fatal(Twine("operation failed with error code " ) + Twine(ec) + ": " + "xar_opt_set(xar, XAR_OPT_COMPRESSION, XAR_OPT_VAL_NONE)" ); } while (0);; | |||
| 1511 | // FIXME: add more data to XAR | |||
| 1512 | CHECK_EC(xar_close(xar))do { ErrorCodeWrapper ec(xar_close(xar)); if (ec) fatal(Twine ("operation failed with error code ") + Twine(ec) + ": " + "xar_close(xar)" ); } while (0);; | |||
| 1513 | ||||
| 1514 | file_size(xarPath, xarSize); | |||
| 1515 | #endif // defined(LLVM_HAVE_LIBXAR) | |||
| 1516 | } | |||
| 1517 | ||||
| 1518 | void BitcodeBundleSection::writeTo(uint8_t *buf) const { | |||
| 1519 | using namespace llvm::sys::fs; | |||
| 1520 | file_t handle = | |||
| 1521 | CHECK(openNativeFile(xarPath, CD_OpenExisting, FA_Read, OF_None),check2((openNativeFile(xarPath, CD_OpenExisting, FA_Read, OF_None )), [&] { return toString("failed to open XAR file"); }) | |||
| 1522 | "failed to open XAR file")check2((openNativeFile(xarPath, CD_OpenExisting, FA_Read, OF_None )), [&] { return toString("failed to open XAR file"); }); | |||
| 1523 | std::error_code ec; | |||
| 1524 | mapped_file_region xarMap(handle, mapped_file_region::mapmode::readonly, | |||
| 1525 | xarSize, 0, ec); | |||
| 1526 | if (ec) | |||
| 1527 | fatal("failed to map XAR file"); | |||
| 1528 | memcpy(buf, xarMap.const_data(), xarSize); | |||
| 1529 | ||||
| 1530 | closeFile(handle); | |||
| 1531 | remove(xarPath); | |||
| 1532 | } | |||
| 1533 | ||||
| 1534 | CStringSection::CStringSection(const char *name) | |||
| 1535 | : SyntheticSection(segment_names::text, name) { | |||
| 1536 | flags = S_CSTRING_LITERALS; | |||
| 1537 | } | |||
| 1538 | ||||
| 1539 | void CStringSection::addInput(CStringInputSection *isec) { | |||
| 1540 | isec->parent = this; | |||
| 1541 | inputs.push_back(isec); | |||
| 1542 | if (isec->align > align) | |||
| 1543 | align = isec->align; | |||
| 1544 | } | |||
| 1545 | ||||
| 1546 | void CStringSection::writeTo(uint8_t *buf) const { | |||
| 1547 | for (const CStringInputSection *isec : inputs) { | |||
| 1548 | for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) { | |||
| 1549 | if (!isec->pieces[i].live) | |||
| 1550 | continue; | |||
| 1551 | StringRef string = isec->getStringRef(i); | |||
| 1552 | memcpy(buf + isec->pieces[i].outSecOff, string.data(), string.size()); | |||
| 1553 | } | |||
| 1554 | } | |||
| 1555 | } | |||
| 1556 | ||||
| 1557 | void CStringSection::finalizeContents() { | |||
| 1558 | uint64_t offset = 0; | |||
| 1559 | for (CStringInputSection *isec : inputs) { | |||
| 1560 | for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) { | |||
| ||||
| 1561 | if (!isec->pieces[i].live) | |||
| 1562 | continue; | |||
| 1563 | // See comment above DeduplicatedCStringSection for how alignment is | |||
| 1564 | // handled. | |||
| 1565 | uint32_t pieceAlign = | |||
| 1566 | 1 << countTrailingZeros(isec->align | isec->pieces[i].inSecOff); | |||
| ||||
| 1567 | offset = alignTo(offset, pieceAlign); | |||
| 1568 | isec->pieces[i].outSecOff = offset; | |||
| 1569 | isec->isFinal = true; | |||
| 1570 | StringRef string = isec->getStringRef(i); | |||
| 1571 | offset += string.size() + 1; // account for null terminator | |||
| 1572 | } | |||
| 1573 | } | |||
| 1574 | size = offset; | |||
| 1575 | } | |||
| 1576 | ||||
| 1577 | // Mergeable cstring literals are found under the __TEXT,__cstring section. In | |||
| 1578 | // contrast to ELF, which puts strings that need different alignments into | |||
| 1579 | // different sections, clang's Mach-O backend puts them all in one section. | |||
| 1580 | // Strings that need to be aligned have the .p2align directive emitted before | |||
| 1581 | // them, which simply translates into zero padding in the object file. In other | |||
| 1582 | // words, we have to infer the desired alignment of these cstrings from their | |||
| 1583 | // addresses. | |||
| 1584 | // | |||
| 1585 | // We differ slightly from ld64 in how we've chosen to align these cstrings. | |||
| 1586 | // Both LLD and ld64 preserve the number of trailing zeros in each cstring's | |||
| 1587 | // address in the input object files. When deduplicating identical cstrings, | |||
| 1588 | // both linkers pick the cstring whose address has more trailing zeros, and | |||
| 1589 | // preserve the alignment of that address in the final binary. However, ld64 | |||
| 1590 | // goes a step further and also preserves the offset of the cstring from the | |||
| 1591 | // last section-aligned address. I.e. if a cstring is at offset 18 in the | |||
| 1592 | // input, with a section alignment of 16, then both LLD and ld64 will ensure the | |||
| 1593 | // final address is 2-byte aligned (since 18 == 16 + 2). But ld64 will also | |||
| 1594 | // ensure that the final address is of the form 16 * k + 2 for some k. | |||
| 1595 | // | |||
| 1596 | // Note that ld64's heuristic means that a dedup'ed cstring's final address is | |||
| 1597 | // dependent on the order of the input object files. E.g. if in addition to the | |||
| 1598 | // cstring at offset 18 above, we have a duplicate one in another file with a | |||
| 1599 | // `.cstring` section alignment of 2 and an offset of zero, then ld64 will pick | |||
| 1600 | // the cstring from the object file earlier on the command line (since both have | |||
| 1601 | // the same number of trailing zeros in their address). So the final cstring may | |||
| 1602 | // either be at some address `16 * k + 2` or at some address `2 * k`. | |||
| 1603 | // | |||
| 1604 | // I've opted not to follow this behavior primarily for implementation | |||
| 1605 | // simplicity, and secondarily to save a few more bytes. It's not clear to me | |||
| 1606 | // that preserving the section alignment + offset is ever necessary, and there | |||
| 1607 | // are many cases that are clearly redundant. In particular, if an x86_64 object | |||
| 1608 | // file contains some strings that are accessed via SIMD instructions, then the | |||
| 1609 | // .cstring section in the object file will be 16-byte-aligned (since SIMD | |||
| 1610 | // requires its operand addresses to be 16-byte aligned). However, there will | |||
| 1611 | // typically also be other cstrings in the same file that aren't used via SIMD | |||
| 1612 | // and don't need this alignment. They will be emitted at some arbitrary address | |||
| 1613 | // `A`, but ld64 will treat them as being 16-byte aligned with an offset of `16 | |||
| 1614 | // % A`. | |||
| 1615 | void DeduplicatedCStringSection::finalizeContents() { | |||
| 1616 | // Find the largest alignment required for each string. | |||
| 1617 | for (const CStringInputSection *isec : inputs) { | |||
| 1618 | for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) { | |||
| 1619 | const StringPiece &piece = isec->pieces[i]; | |||
| 1620 | if (!piece.live) | |||
| 1621 | continue; | |||
| 1622 | auto s = isec->getCachedHashStringRef(i); | |||
| 1623 | assert(isec->align != 0)(static_cast <bool> (isec->align != 0) ? void (0) : __assert_fail ("isec->align != 0", "lld/MachO/SyntheticSections.cpp", 1623 , __extension__ __PRETTY_FUNCTION__)); | |||
| 1624 | uint8_t trailingZeros = countTrailingZeros(isec->align | piece.inSecOff); | |||
| 1625 | auto it = stringOffsetMap.insert( | |||
| 1626 | std::make_pair(s, StringOffset(trailingZeros))); | |||
| 1627 | if (!it.second && it.first->second.trailingZeros < trailingZeros) | |||
| 1628 | it.first->second.trailingZeros = trailingZeros; | |||
| 1629 | } | |||
| 1630 | } | |||
| 1631 | ||||
| 1632 | // Assign an offset for each string and save it to the corresponding | |||
| 1633 | // StringPieces for easy access. | |||
| 1634 | for (CStringInputSection *isec : inputs) { | |||
| 1635 | for (size_t i = 0, e = isec->pieces.size(); i != e; ++i) { | |||
| 1636 | if (!isec->pieces[i].live) | |||
| 1637 | continue; | |||
| 1638 | auto s = isec->getCachedHashStringRef(i); | |||
| 1639 | auto it = stringOffsetMap.find(s); | |||
| 1640 | assert(it != stringOffsetMap.end())(static_cast <bool> (it != stringOffsetMap.end()) ? void (0) : __assert_fail ("it != stringOffsetMap.end()", "lld/MachO/SyntheticSections.cpp" , 1640, __extension__ __PRETTY_FUNCTION__)); | |||
| 1641 | StringOffset &offsetInfo = it->second; | |||
| 1642 | if (offsetInfo.outSecOff == UINT64_MAX(18446744073709551615UL)) { | |||
| 1643 | offsetInfo.outSecOff = alignTo(size, 1ULL << offsetInfo.trailingZeros); | |||
| 1644 | size = | |||
| 1645 | offsetInfo.outSecOff + s.size() + 1; // account for null terminator | |||
| 1646 | } | |||
| 1647 | isec->pieces[i].outSecOff = offsetInfo.outSecOff; | |||
| 1648 | } | |||
| 1649 | isec->isFinal = true; | |||
| 1650 | } | |||
| 1651 | } | |||
| 1652 | ||||
| 1653 | void DeduplicatedCStringSection::writeTo(uint8_t *buf) const { | |||
| 1654 | for (const auto &p : stringOffsetMap) { | |||
| 1655 | StringRef data = p.first.val(); | |||
| 1656 | uint64_t off = p.second.outSecOff; | |||
| 1657 | if (!data.empty()) | |||
| 1658 | memcpy(buf + off, data.data(), data.size()); | |||
| 1659 | } | |||
| 1660 | } | |||
| 1661 | ||||
| 1662 | DeduplicatedCStringSection::StringOffset | |||
| 1663 | DeduplicatedCStringSection::getStringOffset(StringRef str) const { | |||
| 1664 | // StringPiece uses 31 bits to store the hashes, so we replicate that | |||
| 1665 | uint32_t hash = xxHash64(str) & 0x7fffffff; | |||
| 1666 | auto offset = stringOffsetMap.find(CachedHashStringRef(str, hash)); | |||
| 1667 | assert(offset != stringOffsetMap.end() &&(static_cast <bool> (offset != stringOffsetMap.end() && "Looked-up strings should always exist in section") ? void ( 0) : __assert_fail ("offset != stringOffsetMap.end() && \"Looked-up strings should always exist in section\"" , "lld/MachO/SyntheticSections.cpp", 1668, __extension__ __PRETTY_FUNCTION__ )) | |||
| 1668 | "Looked-up strings should always exist in section")(static_cast <bool> (offset != stringOffsetMap.end() && "Looked-up strings should always exist in section") ? void ( 0) : __assert_fail ("offset != stringOffsetMap.end() && \"Looked-up strings should always exist in section\"" , "lld/MachO/SyntheticSections.cpp", 1668, __extension__ __PRETTY_FUNCTION__ )); | |||
| 1669 | return offset->second; | |||
| 1670 | } | |||
| 1671 | ||||
| 1672 | // This section is actually emitted as __TEXT,__const by ld64, but clang may | |||
| 1673 | // emit input sections of that name, and LLD doesn't currently support mixing | |||
| 1674 | // synthetic and concat-type OutputSections. To work around this, I've given | |||
| 1675 | // our merged-literals section a different name. | |||
| 1676 | WordLiteralSection::WordLiteralSection() | |||
| 1677 | : SyntheticSection(segment_names::text, section_names::literals) { | |||
| 1678 | align = 16; | |||
| 1679 | } | |||
| 1680 | ||||
| 1681 | void WordLiteralSection::addInput(WordLiteralInputSection *isec) { | |||
| 1682 | isec->parent = this; | |||
| 1683 | inputs.push_back(isec); | |||
| 1684 | } | |||
| 1685 | ||||
| 1686 | void WordLiteralSection::finalizeContents() { | |||
| 1687 | for (WordLiteralInputSection *isec : inputs) { | |||
| 1688 | // We do all processing of the InputSection here, so it will be effectively | |||
| 1689 | // finalized. | |||
| 1690 | isec->isFinal = true; | |||
| 1691 | const uint8_t *buf = isec->data.data(); | |||
| 1692 | switch (sectionType(isec->getFlags())) { | |||
| 1693 | case S_4BYTE_LITERALS: { | |||
| 1694 | for (size_t off = 0, e = isec->data.size(); off < e; off += 4) { | |||
| 1695 | if (!isec->isLive(off)) | |||
| 1696 | continue; | |||
| 1697 | uint32_t value = *reinterpret_cast<const uint32_t *>(buf + off); | |||
| 1698 | literal4Map.emplace(value, literal4Map.size()); | |||
| 1699 | } | |||
| 1700 | break; | |||
| 1701 | } | |||
| 1702 | case S_8BYTE_LITERALS: { | |||
| 1703 | for (size_t off = 0, e = isec->data.size(); off < e; off += 8) { | |||
| 1704 | if (!isec->isLive(off)) | |||
| 1705 | continue; | |||
| 1706 | uint64_t value = *reinterpret_cast<const uint64_t *>(buf + off); | |||
| 1707 | literal8Map.emplace(value, literal8Map.size()); | |||
| 1708 | } | |||
| 1709 | break; | |||
| 1710 | } | |||
| 1711 | case S_16BYTE_LITERALS: { | |||
| 1712 | for (size_t off = 0, e = isec->data.size(); off < e; off += 16) { | |||
| 1713 | if (!isec->isLive(off)) | |||
| 1714 | continue; | |||
| 1715 | UInt128 value = *reinterpret_cast<const UInt128 *>(buf + off); | |||
| 1716 | literal16Map.emplace(value, literal16Map.size()); | |||
| 1717 | } | |||
| 1718 | break; | |||
| 1719 | } | |||
| 1720 | default: | |||
| 1721 | llvm_unreachable("invalid literal section type")::llvm::llvm_unreachable_internal("invalid literal section type" , "lld/MachO/SyntheticSections.cpp", 1721); | |||
| 1722 | } | |||
| 1723 | } | |||
| 1724 | } | |||
| 1725 | ||||
| 1726 | void WordLiteralSection::writeTo(uint8_t *buf) const { | |||
| 1727 | // Note that we don't attempt to do any endianness conversion in addInput(), | |||
| 1728 | // so we don't do it here either -- just write out the original value, | |||
| 1729 | // byte-for-byte. | |||
| 1730 | for (const auto &p : literal16Map) | |||
| 1731 | memcpy(buf + p.second * 16, &p.first, 16); | |||
| 1732 | buf += literal16Map.size() * 16; | |||
| 1733 | ||||
| 1734 | for (const auto &p : literal8Map) | |||
| 1735 | memcpy(buf + p.second * 8, &p.first, 8); | |||
| 1736 | buf += literal8Map.size() * 8; | |||
| 1737 | ||||
| 1738 | for (const auto &p : literal4Map) | |||
| 1739 | memcpy(buf + p.second * 4, &p.first, 4); | |||
| 1740 | } | |||
| 1741 | ||||
| 1742 | ObjCImageInfoSection::ObjCImageInfoSection() | |||
| 1743 | : SyntheticSection(segment_names::data, section_names::objCImageInfo) {} | |||
| 1744 | ||||
| 1745 | ObjCImageInfoSection::ImageInfo | |||
| 1746 | ObjCImageInfoSection::parseImageInfo(const InputFile *file) { | |||
| 1747 | ImageInfo info; | |||
| 1748 | ArrayRef<uint8_t> data = file->objCImageInfo; | |||
| 1749 | // The image info struct has the following layout: | |||
| 1750 | // struct { | |||
| 1751 | // uint32_t version; | |||
| 1752 | // uint32_t flags; | |||
| 1753 | // }; | |||
| 1754 | if (data.size() < 8) { | |||
| 1755 | warn(toString(file) + ": invalid __objc_imageinfo size"); | |||
| 1756 | return info; | |||
| 1757 | } | |||
| 1758 | ||||
| 1759 | auto *buf = reinterpret_cast<const uint32_t *>(data.data()); | |||
| 1760 | if (read32le(buf) != 0) { | |||
| 1761 | warn(toString(file) + ": invalid __objc_imageinfo version"); | |||
| 1762 | return info; | |||
| 1763 | } | |||
| 1764 | ||||
| 1765 | uint32_t flags = read32le(buf + 1); | |||
| 1766 | info.swiftVersion = (flags >> 8) & 0xff; | |||
| 1767 | info.hasCategoryClassProperties = flags & 0x40; | |||
| 1768 | return info; | |||
| 1769 | } | |||
| 1770 | ||||
| 1771 | static std::string swiftVersionString(uint8_t version) { | |||
| 1772 | switch (version) { | |||
| 1773 | case 1: | |||
| 1774 | return "1.0"; | |||
| 1775 | case 2: | |||
| 1776 | return "1.1"; | |||
| 1777 | case 3: | |||
| 1778 | return "2.0"; | |||
| 1779 | case 4: | |||
| 1780 | return "3.0"; | |||
| 1781 | case 5: | |||
| 1782 | return "4.0"; | |||
| 1783 | default: | |||
| 1784 | return ("0x" + Twine::utohexstr(version)).str(); | |||
| 1785 | } | |||
| 1786 | } | |||
| 1787 | ||||
| 1788 | // Validate each object file's __objc_imageinfo and use them to generate the | |||
| 1789 | // image info for the output binary. Only two pieces of info are relevant: | |||
| 1790 | // 1. The Swift version (should be identical across inputs) | |||
| 1791 | // 2. `bool hasCategoryClassProperties` (true only if true for all inputs) | |||
| 1792 | void ObjCImageInfoSection::finalizeContents() { | |||
| 1793 | assert(files.size() != 0)(static_cast <bool> (files.size() != 0) ? void (0) : __assert_fail ("files.size() != 0", "lld/MachO/SyntheticSections.cpp", 1793 , __extension__ __PRETTY_FUNCTION__)); // should have already been checked via isNeeded() | |||
| 1794 | ||||
| 1795 | info.hasCategoryClassProperties = true; | |||
| 1796 | const InputFile *firstFile; | |||
| 1797 | for (auto file : files) { | |||
| 1798 | ImageInfo inputInfo = parseImageInfo(file); | |||
| 1799 | info.hasCategoryClassProperties &= inputInfo.hasCategoryClassProperties; | |||
| 1800 | ||||
| 1801 | // swiftVersion 0 means no Swift is present, so no version checking required | |||
| 1802 | if (inputInfo.swiftVersion == 0) | |||
| 1803 | continue; | |||
| 1804 | ||||
| 1805 | if (info.swiftVersion != 0 && info.swiftVersion != inputInfo.swiftVersion) { | |||
| 1806 | error("Swift version mismatch: " + toString(firstFile) + " has version " + | |||
| 1807 | swiftVersionString(info.swiftVersion) + " but " + toString(file) + | |||
| 1808 | " has version " + swiftVersionString(inputInfo.swiftVersion)); | |||
| 1809 | } else { | |||
| 1810 | info.swiftVersion = inputInfo.swiftVersion; | |||
| 1811 | firstFile = file; | |||
| 1812 | } | |||
| 1813 | } | |||
| 1814 | } | |||
| 1815 | ||||
| 1816 | void ObjCImageInfoSection::writeTo(uint8_t *buf) const { | |||
| 1817 | uint32_t flags = info.hasCategoryClassProperties ? 0x40 : 0x0; | |||
| 1818 | flags |= info.swiftVersion << 8; | |||
| 1819 | write32le(buf + 4, flags); | |||
| 1820 | } | |||
| 1821 | ||||
| 1822 | InitOffsetsSection::InitOffsetsSection() | |||
| 1823 | : SyntheticSection(segment_names::text, section_names::initOffsets) { | |||
| 1824 | flags = S_INIT_FUNC_OFFSETS; | |||
| 1825 | } | |||
| 1826 | ||||
| 1827 | uint64_t InitOffsetsSection::getSize() const { | |||
| 1828 | size_t count = 0; | |||
| 1829 | for (const ConcatInputSection *isec : sections) | |||
| 1830 | count += isec->relocs.size(); | |||
| 1831 | return count * sizeof(uint32_t); | |||
| 1832 | } | |||
| 1833 | ||||
| 1834 | void InitOffsetsSection::writeTo(uint8_t *buf) const { | |||
| 1835 | // FIXME: Add function specified by -init when that argument is implemented. | |||
| 1836 | for (ConcatInputSection *isec : sections) { | |||
| 1837 | for (const Reloc &rel : isec->relocs) { | |||
| 1838 | const Symbol *referent = rel.referent.dyn_cast<Symbol *>(); | |||
| 1839 | assert(referent && "section relocation should have been rejected")(static_cast <bool> (referent && "section relocation should have been rejected" ) ? void (0) : __assert_fail ("referent && \"section relocation should have been rejected\"" , "lld/MachO/SyntheticSections.cpp", 1839, __extension__ __PRETTY_FUNCTION__ )); | |||
| 1840 | uint64_t offset = referent->getVA() - in.header->addr; | |||
| 1841 | // FIXME: Can we handle this gracefully? | |||
| 1842 | if (offset > UINT32_MAX(4294967295U)) | |||
| 1843 | fatal(isec->getLocation(rel.offset) + ": offset to initializer " + | |||
| 1844 | referent->getName() + " (" + utohexstr(offset) + | |||
| 1845 | ") does not fit in 32 bits"); | |||
| 1846 | ||||
| 1847 | // Entries need to be added in the order they appear in the section, but | |||
| 1848 | // relocations aren't guaranteed to be sorted. | |||
| 1849 | size_t index = rel.offset >> target->p2WordSize; | |||
| 1850 | write32le(&buf[index * sizeof(uint32_t)], offset); | |||
| 1851 | } | |||
| 1852 | buf += isec->relocs.size() * sizeof(uint32_t); | |||
| 1853 | } | |||
| 1854 | } | |||
| 1855 | ||||
| 1856 | // The inputs are __mod_init_func sections, which contain pointers to | |||
| 1857 | // initializer functions, therefore all relocations should be of the UNSIGNED | |||
| 1858 | // type. InitOffsetsSection stores offsets, so if the initializer's address is | |||
| 1859 | // not known at link time, stub-indirection has to be used. | |||
| 1860 | void InitOffsetsSection::setUp() { | |||
| 1861 | for (const ConcatInputSection *isec : sections) { | |||
| 1862 | for (const Reloc &rel : isec->relocs) { | |||
| 1863 | RelocAttrs attrs = target->getRelocAttrs(rel.type); | |||
| 1864 | if (!attrs.hasAttr(RelocAttrBits::UNSIGNED)) | |||
| 1865 | error(isec->getLocation(rel.offset) + | |||
| 1866 | ": unsupported relocation type: " + attrs.name); | |||
| 1867 | if (rel.addend != 0) | |||
| 1868 | error(isec->getLocation(rel.offset) + | |||
| 1869 | ": relocation addend is not representable in __init_offsets"); | |||
| 1870 | if (rel.referent.is<InputSection *>()) | |||
| 1871 | error(isec->getLocation(rel.offset) + | |||
| 1872 | ": unexpected section relocation"); | |||
| 1873 | ||||
| 1874 | Symbol *sym = rel.referent.dyn_cast<Symbol *>(); | |||
| 1875 | if (auto *undefined = dyn_cast<Undefined>(sym)) | |||
| 1876 | treatUndefinedSymbol(*undefined, isec, rel.offset); | |||
| 1877 | if (needsBinding(sym)) | |||
| 1878 | in.stubs->addEntry(sym); | |||
| 1879 | } | |||
| 1880 | } | |||
| 1881 | } | |||
| 1882 | ||||
| 1883 | void macho::createSyntheticSymbols() { | |||
| 1884 | auto addHeaderSymbol = [](const char *name) { | |||
| 1885 | symtab->addSynthetic(name, in.header->isec, /*value=*/0, | |||
| 1886 | /*isPrivateExtern=*/true, /*includeInSymtab=*/false, | |||
| 1887 | /*referencedDynamically=*/false); | |||
| 1888 | }; | |||
| 1889 | ||||
| 1890 | switch (config->outputType) { | |||
| 1891 | // FIXME: Assign the right address value for these symbols | |||
| 1892 | // (rather than 0). But we need to do that after assignAddresses(). | |||
| 1893 | case MH_EXECUTE: | |||
| 1894 | // If linking PIE, __mh_execute_header is a defined symbol in | |||
| 1895 | // __TEXT, __text) | |||
| 1896 | // Otherwise, it's an absolute symbol. | |||
| 1897 | if (config->isPic) | |||
| 1898 | symtab->addSynthetic("__mh_execute_header", in.header->isec, /*value=*/0, | |||
| 1899 | /*isPrivateExtern=*/false, /*includeInSymtab=*/true, | |||
| 1900 | /*referencedDynamically=*/true); | |||
| 1901 | else | |||
| 1902 | symtab->addSynthetic("__mh_execute_header", /*isec=*/nullptr, /*value=*/0, | |||
| 1903 | /*isPrivateExtern=*/false, /*includeInSymtab=*/true, | |||
| 1904 | /*referencedDynamically=*/true); | |||
| 1905 | break; | |||
| 1906 | ||||
| 1907 | // The following symbols are N_SECT symbols, even though the header is not | |||
| 1908 | // part of any section and that they are private to the bundle/dylib/object | |||
| 1909 | // they are part of. | |||
| 1910 | case MH_BUNDLE: | |||
| 1911 | addHeaderSymbol("__mh_bundle_header"); | |||
| 1912 | break; | |||
| 1913 | case MH_DYLIB: | |||
| 1914 | addHeaderSymbol("__mh_dylib_header"); | |||
| 1915 | break; | |||
| 1916 | case MH_DYLINKER: | |||
| 1917 | addHeaderSymbol("__mh_dylinker_header"); | |||
| 1918 | break; | |||
| 1919 | case MH_OBJECT: | |||
| 1920 | addHeaderSymbol("__mh_object_header"); | |||
| 1921 | break; | |||
| 1922 | default: | |||
| 1923 | llvm_unreachable("unexpected outputType")::llvm::llvm_unreachable_internal("unexpected outputType", "lld/MachO/SyntheticSections.cpp" , 1923); | |||
| 1924 | break; | |||
| 1925 | } | |||
| 1926 | ||||
| 1927 | // The Itanium C++ ABI requires dylibs to pass a pointer to __cxa_atexit | |||
| 1928 | // which does e.g. cleanup of static global variables. The ABI document | |||
| 1929 | // says that the pointer can point to any address in one of the dylib's | |||
| 1930 | // segments, but in practice ld64 seems to set it to point to the header, | |||
| 1931 | // so that's what's implemented here. | |||
| 1932 | addHeaderSymbol("___dso_handle"); | |||
| 1933 | } | |||
| 1934 | ||||
| 1935 | template SymtabSection *macho::makeSymtabSection<LP64>(StringTableSection &); | |||
| 1936 | template SymtabSection *macho::makeSymtabSection<ILP32>(StringTableSection &); |
| 1 | //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// | ||||
| 2 | // | ||||
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
| 4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
| 6 | // | ||||
| 7 | //===----------------------------------------------------------------------===// | ||||
| 8 | // | ||||
| 9 | // This file contains some functions that are useful for math stuff. | ||||
| 10 | // | ||||
| 11 | //===----------------------------------------------------------------------===// | ||||
| 12 | |||||
| 13 | #ifndef LLVM_SUPPORT_MATHEXTRAS_H | ||||
| 14 | #define LLVM_SUPPORT_MATHEXTRAS_H | ||||
| 15 | |||||
| 16 | #include "llvm/ADT/bit.h" | ||||
| 17 | #include "llvm/Support/Compiler.h" | ||||
| 18 | #include <cassert> | ||||
| 19 | #include <climits> | ||||
| 20 | #include <cstdint> | ||||
| 21 | #include <cstring> | ||||
| 22 | #include <limits> | ||||
| 23 | #include <type_traits> | ||||
| 24 | |||||
| 25 | #ifdef _MSC_VER | ||||
| 26 | // Declare these intrinsics manually rather including intrin.h. It's very | ||||
| 27 | // expensive, and MathExtras.h is popular. | ||||
| 28 | // #include <intrin.h> | ||||
| 29 | extern "C" { | ||||
| 30 | unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); | ||||
| 31 | unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); | ||||
| 32 | unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); | ||||
| 33 | unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); | ||||
| 34 | } | ||||
| 35 | #endif | ||||
| 36 | |||||
| 37 | namespace llvm { | ||||
| 38 | |||||
| 39 | /// The behavior an operation has on an input of 0. | ||||
| 40 | enum ZeroBehavior { | ||||
| 41 | /// The returned value is undefined. | ||||
| 42 | ZB_Undefined, | ||||
| 43 | /// The returned value is numeric_limits<T>::max() | ||||
| 44 | ZB_Max, | ||||
| 45 | /// The returned value is numeric_limits<T>::digits | ||||
| 46 | ZB_Width | ||||
| 47 | }; | ||||
| 48 | |||||
| 49 | /// Mathematical constants. | ||||
| 50 | namespace numbers { | ||||
| 51 | // TODO: Track C++20 std::numbers. | ||||
| 52 | // TODO: Favor using the hexadecimal FP constants (requires C++17). | ||||
| 53 | constexpr double e = 2.7182818284590452354, // (0x1.5bf0a8b145749P+1) https://oeis.org/A001113 | ||||
| 54 | egamma = .57721566490153286061, // (0x1.2788cfc6fb619P-1) https://oeis.org/A001620 | ||||
| 55 | ln2 = .69314718055994530942, // (0x1.62e42fefa39efP-1) https://oeis.org/A002162 | ||||
| 56 | ln10 = 2.3025850929940456840, // (0x1.24bb1bbb55516P+1) https://oeis.org/A002392 | ||||
| 57 | log2e = 1.4426950408889634074, // (0x1.71547652b82feP+0) | ||||
| 58 | log10e = .43429448190325182765, // (0x1.bcb7b1526e50eP-2) | ||||
| 59 | pi = 3.1415926535897932385, // (0x1.921fb54442d18P+1) https://oeis.org/A000796 | ||||
| 60 | inv_pi = .31830988618379067154, // (0x1.45f306bc9c883P-2) https://oeis.org/A049541 | ||||
| 61 | sqrtpi = 1.7724538509055160273, // (0x1.c5bf891b4ef6bP+0) https://oeis.org/A002161 | ||||
| 62 | inv_sqrtpi = .56418958354775628695, // (0x1.20dd750429b6dP-1) https://oeis.org/A087197 | ||||
| 63 | sqrt2 = 1.4142135623730950488, // (0x1.6a09e667f3bcdP+0) https://oeis.org/A00219 | ||||
| 64 | inv_sqrt2 = .70710678118654752440, // (0x1.6a09e667f3bcdP-1) | ||||
| 65 | sqrt3 = 1.7320508075688772935, // (0x1.bb67ae8584caaP+0) https://oeis.org/A002194 | ||||
| 66 | inv_sqrt3 = .57735026918962576451, // (0x1.279a74590331cP-1) | ||||
| 67 | phi = 1.6180339887498948482; // (0x1.9e3779b97f4a8P+0) https://oeis.org/A001622 | ||||
| 68 | constexpr float ef = 2.71828183F, // (0x1.5bf0a8P+1) https://oeis.org/A001113 | ||||
| 69 | egammaf = .577215665F, // (0x1.2788d0P-1) https://oeis.org/A001620 | ||||
| 70 | ln2f = .693147181F, // (0x1.62e430P-1) https://oeis.org/A002162 | ||||
| 71 | ln10f = 2.30258509F, // (0x1.26bb1cP+1) https://oeis.org/A002392 | ||||
| 72 | log2ef = 1.44269504F, // (0x1.715476P+0) | ||||
| 73 | log10ef = .434294482F, // (0x1.bcb7b2P-2) | ||||
| 74 | pif = 3.14159265F, // (0x1.921fb6P+1) https://oeis.org/A000796 | ||||
| 75 | inv_pif = .318309886F, // (0x1.45f306P-2) https://oeis.org/A049541 | ||||
| 76 | sqrtpif = 1.77245385F, // (0x1.c5bf8aP+0) https://oeis.org/A002161 | ||||
| 77 | inv_sqrtpif = .564189584F, // (0x1.20dd76P-1) https://oeis.org/A087197 | ||||
| 78 | sqrt2f = 1.41421356F, // (0x1.6a09e6P+0) https://oeis.org/A002193 | ||||
| 79 | inv_sqrt2f = .707106781F, // (0x1.6a09e6P-1) | ||||
| 80 | sqrt3f = 1.73205081F, // (0x1.bb67aeP+0) https://oeis.org/A002194 | ||||
| 81 | inv_sqrt3f = .577350269F, // (0x1.279a74P-1) | ||||
| 82 | phif = 1.61803399F; // (0x1.9e377aP+0) https://oeis.org/A001622 | ||||
| 83 | } // namespace numbers | ||||
| 84 | |||||
| 85 | namespace detail { | ||||
| 86 | template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter { | ||||
| 87 | static unsigned count(T Val, ZeroBehavior) { | ||||
| 88 | if (!Val) | ||||
| 89 | return std::numeric_limits<T>::digits; | ||||
| 90 | if (Val & 0x1) | ||||
| 91 | return 0; | ||||
| 92 | |||||
| 93 | // Bisection method. | ||||
| 94 | unsigned ZeroBits = 0; | ||||
| 95 | T Shift = std::numeric_limits<T>::digits >> 1; | ||||
| 96 | T Mask = std::numeric_limits<T>::max() >> Shift; | ||||
| 97 | while (Shift) { | ||||
| 98 | if ((Val & Mask) == 0) { | ||||
| 99 | Val >>= Shift; | ||||
| 100 | ZeroBits |= Shift; | ||||
| 101 | } | ||||
| 102 | Shift >>= 1; | ||||
| 103 | Mask >>= Shift; | ||||
| 104 | } | ||||
| 105 | return ZeroBits; | ||||
| 106 | } | ||||
| 107 | }; | ||||
| 108 | |||||
| 109 | #if defined(__GNUC__4) || defined(_MSC_VER) | ||||
| 110 | template <typename T> struct TrailingZerosCounter<T, 4> { | ||||
| 111 | static unsigned count(T Val, ZeroBehavior ZB) { | ||||
| 112 | if (ZB
| ||||
| 113 | return 32; | ||||
| 114 | |||||
| 115 | #if __has_builtin(__builtin_ctz)1 || defined(__GNUC__4) | ||||
| 116 | return __builtin_ctz(Val); | ||||
| 117 | #elif defined(_MSC_VER) | ||||
| 118 | unsigned long Index; | ||||
| 119 | _BitScanForward(&Index, Val); | ||||
| 120 | return Index; | ||||
| 121 | #endif | ||||
| 122 | } | ||||
| 123 | }; | ||||
| 124 | |||||
| 125 | #if !defined(_MSC_VER) || defined(_M_X64) | ||||
| 126 | template <typename T> struct TrailingZerosCounter<T, 8> { | ||||
| 127 | static unsigned count(T Val, ZeroBehavior ZB) { | ||||
| 128 | if (ZB != ZB_Undefined && Val == 0) | ||||
| 129 | return 64; | ||||
| 130 | |||||
| 131 | #if __has_builtin(__builtin_ctzll)1 || defined(__GNUC__4) | ||||
| 132 | return __builtin_ctzll(Val); | ||||
| 133 | #elif defined(_MSC_VER) | ||||
| 134 | unsigned long Index; | ||||
| 135 | _BitScanForward64(&Index, Val); | ||||
| 136 | return Index; | ||||
| 137 | #endif | ||||
| 138 | } | ||||
| 139 | }; | ||||
| 140 | #endif | ||||
| 141 | #endif | ||||
| 142 | } // namespace detail | ||||
| 143 | |||||
| 144 | /// Count number of 0's from the least significant bit to the most | ||||
| 145 | /// stopping at the first 1. | ||||
| 146 | /// | ||||
| 147 | /// Only unsigned integral types are allowed. | ||||
| 148 | /// | ||||
| 149 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are | ||||
| 150 | /// valid arguments. | ||||
| 151 | template <typename T> | ||||
| 152 | unsigned countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { | ||||
| 153 | static_assert(std::is_unsigned_v<T>, | ||||
| 154 | "Only unsigned integral types are allowed."); | ||||
| 155 | return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB); | ||||
| 156 | } | ||||
| 157 | |||||
| 158 | namespace detail { | ||||
| 159 | template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter { | ||||
| 160 | static unsigned count(T Val, ZeroBehavior) { | ||||
| 161 | if (!Val) | ||||
| 162 | return std::numeric_limits<T>::digits; | ||||
| 163 | |||||
| 164 | // Bisection method. | ||||
| 165 | unsigned ZeroBits = 0; | ||||
| 166 | for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) { | ||||
| 167 | T Tmp = Val >> Shift; | ||||
| 168 | if (Tmp) | ||||
| 169 | Val = Tmp; | ||||
| 170 | else | ||||
| 171 | ZeroBits |= Shift; | ||||
| 172 | } | ||||
| 173 | return ZeroBits; | ||||
| 174 | } | ||||
| 175 | }; | ||||
| 176 | |||||
| 177 | #if defined(__GNUC__4) || defined(_MSC_VER) | ||||
| 178 | template <typename T> struct LeadingZerosCounter<T, 4> { | ||||
| 179 | static unsigned count(T Val, ZeroBehavior ZB) { | ||||
| 180 | if (ZB != ZB_Undefined && Val == 0) | ||||
| 181 | return 32; | ||||
| 182 | |||||
| 183 | #if __has_builtin(__builtin_clz)1 || defined(__GNUC__4) | ||||
| 184 | return __builtin_clz(Val); | ||||
| 185 | #elif defined(_MSC_VER) | ||||
| 186 | unsigned long Index; | ||||
| 187 | _BitScanReverse(&Index, Val); | ||||
| 188 | return Index ^ 31; | ||||
| 189 | #endif | ||||
| 190 | } | ||||
| 191 | }; | ||||
| 192 | |||||
| 193 | #if !defined(_MSC_VER) || defined(_M_X64) | ||||
| 194 | template <typename T> struct LeadingZerosCounter<T, 8> { | ||||
| 195 | static unsigned count(T Val, ZeroBehavior ZB) { | ||||
| 196 | if (ZB != ZB_Undefined && Val == 0) | ||||
| 197 | return 64; | ||||
| 198 | |||||
| 199 | #if __has_builtin(__builtin_clzll)1 || defined(__GNUC__4) | ||||
| 200 | return __builtin_clzll(Val); | ||||
| 201 | #elif defined(_MSC_VER) | ||||
| 202 | unsigned long Index; | ||||
| 203 | _BitScanReverse64(&Index, Val); | ||||
| 204 | return Index ^ 63; | ||||
| 205 | #endif | ||||
| 206 | } | ||||
| 207 | }; | ||||
| 208 | #endif | ||||
| 209 | #endif | ||||
| 210 | } // namespace detail | ||||
| 211 | |||||
| 212 | /// Count number of 0's from the most significant bit to the least | ||||
| 213 | /// stopping at the first 1. | ||||
| 214 | /// | ||||
| 215 | /// Only unsigned integral types are allowed. | ||||
| 216 | /// | ||||
| 217 | /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are | ||||
| 218 | /// valid arguments. | ||||
| 219 | template <typename T> | ||||
| 220 | unsigned countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { | ||||
| 221 | static_assert(std::is_unsigned_v<T>, | ||||
| 222 | "Only unsigned integral types are allowed."); | ||||
| 223 | return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB); | ||||
| 224 | } | ||||
| 225 | |||||
| 226 | /// Get the index of the first set bit starting from the least | ||||
| 227 | /// significant bit. | ||||
| 228 | /// | ||||
| 229 | /// Only unsigned integral types are allowed. | ||||
| 230 | /// | ||||
| 231 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are | ||||
| 232 | /// valid arguments. | ||||
| 233 | template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { | ||||
| 234 | if (ZB == ZB_Max && Val == 0) | ||||
| 235 | return std::numeric_limits<T>::max(); | ||||
| 236 | |||||
| 237 | return countTrailingZeros(Val, ZB_Undefined); | ||||
| 238 | } | ||||
| 239 | |||||
| 240 | /// Create a bitmask with the N right-most bits set to 1, and all other | ||||
| 241 | /// bits set to 0. Only unsigned types are allowed. | ||||
| 242 | template <typename T> T maskTrailingOnes(unsigned N) { | ||||
| 243 | static_assert(std::is_unsigned<T>::value, "Invalid type!"); | ||||
| 244 | const unsigned Bits = CHAR_BIT8 * sizeof(T); | ||||
| 245 | assert(N <= Bits && "Invalid bit index")(static_cast <bool> (N <= Bits && "Invalid bit index" ) ? void (0) : __assert_fail ("N <= Bits && \"Invalid bit index\"" , "llvm/include/llvm/Support/MathExtras.h", 245, __extension__ __PRETTY_FUNCTION__)); | ||||
| 246 | return N == 0 ? 0 : (T(-1) >> (Bits - N)); | ||||
| 247 | } | ||||
| 248 | |||||
| 249 | /// Create a bitmask with the N left-most bits set to 1, and all other | ||||
| 250 | /// bits set to 0. Only unsigned types are allowed. | ||||
| 251 | template <typename T> T maskLeadingOnes(unsigned N) { | ||||
| 252 | return ~maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); | ||||
| 253 | } | ||||
| 254 | |||||
| 255 | /// Create a bitmask with the N right-most bits set to 0, and all other | ||||
| 256 | /// bits set to 1. Only unsigned types are allowed. | ||||
| 257 | template <typename T> T maskTrailingZeros(unsigned N) { | ||||
| 258 | return maskLeadingOnes<T>(CHAR_BIT8 * sizeof(T) - N); | ||||
| 259 | } | ||||
| 260 | |||||
| 261 | /// Create a bitmask with the N left-most bits set to 0, and all other | ||||
| 262 | /// bits set to 1. Only unsigned types are allowed. | ||||
| 263 | template <typename T> T maskLeadingZeros(unsigned N) { | ||||
| 264 | return maskTrailingOnes<T>(CHAR_BIT8 * sizeof(T) - N); | ||||
| 265 | } | ||||
| 266 | |||||
| 267 | /// Get the index of the last set bit starting from the least | ||||
| 268 | /// significant bit. | ||||
| 269 | /// | ||||
| 270 | /// Only unsigned integral types are allowed. | ||||
| 271 | /// | ||||
| 272 | /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are | ||||
| 273 | /// valid arguments. | ||||
| 274 | template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { | ||||
| 275 | if (ZB == ZB_Max && Val == 0) | ||||
| 276 | return std::numeric_limits<T>::max(); | ||||
| 277 | |||||
| 278 | // Use ^ instead of - because both gcc and llvm can remove the associated ^ | ||||
| 279 | // in the __builtin_clz intrinsic on x86. | ||||
| 280 | return countLeadingZeros(Val, ZB_Undefined) ^ | ||||
| 281 | (std::numeric_limits<T>::digits - 1); | ||||
| 282 | } | ||||
| 283 | |||||
| 284 | /// Macro compressed bit reversal table for 256 bits. | ||||
| 285 | /// | ||||
| 286 | /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable | ||||
| 287 | static const unsigned char BitReverseTable256[256] = { | ||||
| 288 | #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 | ||||
| 289 | #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) | ||||
| 290 | #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) | ||||
| 291 | R6(0), R6(2), R6(1), R6(3) | ||||
| 292 | #undef R2 | ||||
| 293 | #undef R4 | ||||
| 294 | #undef R6 | ||||
| 295 | }; | ||||
| 296 | |||||
| 297 | /// Reverse the bits in \p Val. | ||||
| 298 | template <typename T> T reverseBits(T Val) { | ||||
| 299 | #if __has_builtin(__builtin_bitreverse8)1 | ||||
| 300 | if constexpr (std::is_same_v<T, uint8_t>) | ||||
| 301 | return __builtin_bitreverse8(Val); | ||||
| 302 | #endif | ||||
| 303 | #if __has_builtin(__builtin_bitreverse16)1 | ||||
| 304 | if constexpr (std::is_same_v<T, uint16_t>) | ||||
| 305 | return __builtin_bitreverse16(Val); | ||||
| 306 | #endif | ||||
| 307 | #if __has_builtin(__builtin_bitreverse32)1 | ||||
| 308 | if constexpr (std::is_same_v<T, uint32_t>) | ||||
| 309 | return __builtin_bitreverse32(Val); | ||||
| 310 | #endif | ||||
| 311 | #if __has_builtin(__builtin_bitreverse64)1 | ||||
| 312 | if constexpr (std::is_same_v<T, uint64_t>) | ||||
| 313 | return __builtin_bitreverse64(Val); | ||||
| 314 | #endif | ||||
| 315 | |||||
| 316 | unsigned char in[sizeof(Val)]; | ||||
| 317 | unsigned char out[sizeof(Val)]; | ||||
| 318 | std::memcpy(in, &Val, sizeof(Val)); | ||||
| 319 | for (unsigned i = 0; i < sizeof(Val); ++i) | ||||
| 320 | out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; | ||||
| 321 | std::memcpy(&Val, out, sizeof(Val)); | ||||
| 322 | return Val; | ||||
| 323 | } | ||||
| 324 | |||||
| 325 | // NOTE: The following support functions use the _32/_64 extensions instead of | ||||
| 326 | // type overloading so that signed and unsigned integers can be used without | ||||
| 327 | // ambiguity. | ||||
| 328 | |||||
| 329 | /// Return the high 32 bits of a 64 bit value. | ||||
| 330 | constexpr inline uint32_t Hi_32(uint64_t Value) { | ||||
| 331 | return static_cast<uint32_t>(Value >> 32); | ||||
| 332 | } | ||||
| 333 | |||||
| 334 | /// Return the low 32 bits of a 64 bit value. | ||||
| 335 | constexpr inline uint32_t Lo_32(uint64_t Value) { | ||||
| 336 | return static_cast<uint32_t>(Value); | ||||
| 337 | } | ||||
| 338 | |||||
| 339 | /// Make a 64-bit integer from a high / low pair of 32-bit integers. | ||||
| 340 | constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { | ||||
| 341 | return ((uint64_t)High << 32) | (uint64_t)Low; | ||||
| 342 | } | ||||
| 343 | |||||
| 344 | /// Checks if an integer fits into the given bit width. | ||||
| 345 | template <unsigned N> constexpr inline bool isInt(int64_t x) { | ||||
| 346 | if constexpr (N == 8) | ||||
| 347 | return static_cast<int8_t>(x) == x; | ||||
| 348 | if constexpr (N == 16) | ||||
| 349 | return static_cast<int16_t>(x) == x; | ||||
| 350 | if constexpr (N == 32) | ||||
| 351 | return static_cast<int32_t>(x) == x; | ||||
| 352 | if constexpr (N < 64) | ||||
| 353 | return -(INT64_C(1)1L << (N - 1)) <= x && x < (INT64_C(1)1L << (N - 1)); | ||||
| 354 | (void)x; // MSVC v19.25 warns that x is unused. | ||||
| 355 | return true; | ||||
| 356 | } | ||||
| 357 | |||||
| 358 | /// Checks if a signed integer is an N bit number shifted left by S. | ||||
| 359 | template <unsigned N, unsigned S> | ||||
| 360 | constexpr inline bool isShiftedInt(int64_t x) { | ||||
| 361 | static_assert( | ||||
| 362 | N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); | ||||
| 363 | static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide."); | ||||
| 364 | return isInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); | ||||
| 365 | } | ||||
| 366 | |||||
| 367 | /// Checks if an unsigned integer fits into the given bit width. | ||||
| 368 | template <unsigned N> constexpr inline bool isUInt(uint64_t x) { | ||||
| 369 | static_assert(N > 0, "isUInt<0> doesn't make sense"); | ||||
| 370 | if constexpr (N == 8) | ||||
| 371 | return static_cast<uint8_t>(x) == x; | ||||
| 372 | if constexpr (N == 16) | ||||
| 373 | return static_cast<uint16_t>(x) == x; | ||||
| 374 | if constexpr (N == 32) | ||||
| 375 | return static_cast<uint32_t>(x) == x; | ||||
| 376 | if constexpr (N < 64) | ||||
| 377 | return x < (UINT64_C(1)1UL << (N)); | ||||
| 378 | (void)x; // MSVC v19.25 warns that x is unused. | ||||
| 379 | return true; | ||||
| 380 | } | ||||
| 381 | |||||
| 382 | /// Checks if a unsigned integer is an N bit number shifted left by S. | ||||
| 383 | template <unsigned N, unsigned S> | ||||
| 384 | constexpr inline bool isShiftedUInt(uint64_t x) { | ||||
| 385 | static_assert( | ||||
| 386 | N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); | ||||
| 387 | static_assert(N + S <= 64, | ||||
| 388 | "isShiftedUInt<N, S> with N + S > 64 is too wide."); | ||||
| 389 | // Per the two static_asserts above, S must be strictly less than 64. So | ||||
| 390 | // 1 << S is not undefined behavior. | ||||
| 391 | return isUInt<N + S>(x) && (x % (UINT64_C(1)1UL << S) == 0); | ||||
| 392 | } | ||||
| 393 | |||||
| 394 | /// Gets the maximum value for a N-bit unsigned integer. | ||||
| 395 | inline uint64_t maxUIntN(uint64_t N) { | ||||
| 396 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "llvm/include/llvm/Support/MathExtras.h", 396, __extension__ __PRETTY_FUNCTION__)); | ||||
| 397 | |||||
| 398 | // uint64_t(1) << 64 is undefined behavior, so we can't do | ||||
| 399 | // (uint64_t(1) << N) - 1 | ||||
| 400 | // without checking first that N != 64. But this works and doesn't have a | ||||
| 401 | // branch. | ||||
| 402 | return UINT64_MAX(18446744073709551615UL) >> (64 - N); | ||||
| 403 | } | ||||
| 404 | |||||
| 405 | /// Gets the minimum value for a N-bit signed integer. | ||||
| 406 | inline int64_t minIntN(int64_t N) { | ||||
| 407 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "llvm/include/llvm/Support/MathExtras.h", 407, __extension__ __PRETTY_FUNCTION__)); | ||||
| 408 | |||||
| 409 | return UINT64_C(1)1UL + ~(UINT64_C(1)1UL << (N - 1)); | ||||
| 410 | } | ||||
| 411 | |||||
| 412 | /// Gets the maximum value for a N-bit signed integer. | ||||
| 413 | inline int64_t maxIntN(int64_t N) { | ||||
| 414 | assert(N > 0 && N <= 64 && "integer width out of range")(static_cast <bool> (N > 0 && N <= 64 && "integer width out of range") ? void (0) : __assert_fail ("N > 0 && N <= 64 && \"integer width out of range\"" , "llvm/include/llvm/Support/MathExtras.h", 414, __extension__ __PRETTY_FUNCTION__)); | ||||
| 415 | |||||
| 416 | // This relies on two's complement wraparound when N == 64, so we convert to | ||||
| 417 | // int64_t only at the very end to avoid UB. | ||||
| 418 | return (UINT64_C(1)1UL << (N - 1)) - 1; | ||||
| 419 | } | ||||
| 420 | |||||
| 421 | /// Checks if an unsigned integer fits into the given (dynamic) bit width. | ||||
| 422 | inline bool isUIntN(unsigned N, uint64_t x) { | ||||
| 423 | return N >= 64 || x <= maxUIntN(N); | ||||
| 424 | } | ||||
| 425 | |||||
| 426 | /// Checks if an signed integer fits into the given (dynamic) bit width. | ||||
| 427 | inline bool isIntN(unsigned N, int64_t x) { | ||||
| 428 | return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); | ||||
| 429 | } | ||||
| 430 | |||||
| 431 | /// Return true if the argument is a non-empty sequence of ones starting at the | ||||
| 432 | /// least significant bit with the remainder zero (32 bit version). | ||||
| 433 | /// Ex. isMask_32(0x0000FFFFU) == true. | ||||
| 434 | constexpr inline bool isMask_32(uint32_t Value) { | ||||
| 435 | return Value && ((Value + 1) & Value) == 0; | ||||
| 436 | } | ||||
| 437 | |||||
| 438 | /// Return true if the argument is a non-empty sequence of ones starting at the | ||||
| 439 | /// least significant bit with the remainder zero (64 bit version). | ||||
| 440 | constexpr inline bool isMask_64(uint64_t Value) { | ||||
| 441 | return Value && ((Value + 1) & Value) == 0; | ||||
| 442 | } | ||||
| 443 | |||||
| 444 | /// Return true if the argument contains a non-empty sequence of ones with the | ||||
| 445 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. | ||||
| 446 | constexpr inline bool isShiftedMask_32(uint32_t Value) { | ||||
| 447 | return Value && isMask_32((Value - 1) | Value); | ||||
| 448 | } | ||||
| 449 | |||||
| 450 | /// Return true if the argument contains a non-empty sequence of ones with the | ||||
| 451 | /// remainder zero (64 bit version.) | ||||
| 452 | constexpr inline bool isShiftedMask_64(uint64_t Value) { | ||||
| 453 | return Value && isMask_64((Value - 1) | Value); | ||||
| 454 | } | ||||
| 455 | |||||
| 456 | /// Return true if the argument is a power of two > 0. | ||||
| 457 | /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) | ||||
| 458 | constexpr inline bool isPowerOf2_32(uint32_t Value) { | ||||
| 459 | return llvm::has_single_bit(Value); | ||||
| 460 | } | ||||
| 461 | |||||
| 462 | /// Return true if the argument is a power of two > 0 (64 bit edition.) | ||||
| 463 | constexpr inline bool isPowerOf2_64(uint64_t Value) { | ||||
| 464 | return llvm::has_single_bit(Value); | ||||
| 465 | } | ||||
| 466 | |||||
| 467 | /// Count the number of ones from the most significant bit to the first | ||||
| 468 | /// zero bit. | ||||
| 469 | /// | ||||
| 470 | /// Ex. countLeadingOnes(0xFF0FFF00) == 8. | ||||
| 471 | /// Only unsigned integral types are allowed. | ||||
| 472 | /// | ||||
| 473 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and | ||||
| 474 | /// ZB_Undefined are valid arguments. | ||||
| 475 | template <typename T> | ||||
| 476 | unsigned countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { | ||||
| 477 | static_assert(std::is_unsigned_v<T>, | ||||
| 478 | "Only unsigned integral types are allowed."); | ||||
| 479 | return countLeadingZeros<T>(~Value, ZB); | ||||
| 480 | } | ||||
| 481 | |||||
| 482 | /// Count the number of ones from the least significant bit to the first | ||||
| 483 | /// zero bit. | ||||
| 484 | /// | ||||
| 485 | /// Ex. countTrailingOnes(0x00FF00FF) == 8. | ||||
| 486 | /// Only unsigned integral types are allowed. | ||||
| 487 | /// | ||||
| 488 | /// \param ZB the behavior on an input of all ones. Only ZB_Width and | ||||
| 489 | /// ZB_Undefined are valid arguments. | ||||
| 490 | template <typename T> | ||||
| 491 | unsigned countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { | ||||
| 492 | static_assert(std::is_unsigned_v<T>, | ||||
| 493 | "Only unsigned integral types are allowed."); | ||||
| 494 | return countTrailingZeros<T>(~Value, ZB); | ||||
| 495 | } | ||||
| 496 | |||||
| 497 | /// Count the number of set bits in a value. | ||||
| 498 | /// Ex. countPopulation(0xF000F000) = 8 | ||||
| 499 | /// Returns 0 if the word is zero. | ||||
| 500 | template <typename T> | ||||
| 501 | inline unsigned countPopulation(T Value) { | ||||
| 502 | static_assert(std::is_unsigned_v<T>, | ||||
| 503 | "Only unsigned integral types are allowed."); | ||||
| 504 | return (unsigned)llvm::popcount(Value); | ||||
| 505 | } | ||||
| 506 | |||||
| 507 | /// Return true if the argument contains a non-empty sequence of ones with the | ||||
| 508 | /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. | ||||
| 509 | /// If true, \p MaskIdx will specify the index of the lowest set bit and \p | ||||
| 510 | /// MaskLen is updated to specify the length of the mask, else neither are | ||||
| 511 | /// updated. | ||||
| 512 | inline bool isShiftedMask_32(uint32_t Value, unsigned &MaskIdx, | ||||
| 513 | unsigned &MaskLen) { | ||||
| 514 | if (!isShiftedMask_32(Value)) | ||||
| 515 | return false; | ||||
| 516 | MaskIdx = countTrailingZeros(Value); | ||||
| 517 | MaskLen = countPopulation(Value); | ||||
| 518 | return true; | ||||
| 519 | } | ||||
| 520 | |||||
| 521 | /// Return true if the argument contains a non-empty sequence of ones with the | ||||
| 522 | /// remainder zero (64 bit version.) If true, \p MaskIdx will specify the index | ||||
| 523 | /// of the lowest set bit and \p MaskLen is updated to specify the length of the | ||||
| 524 | /// mask, else neither are updated. | ||||
| 525 | inline bool isShiftedMask_64(uint64_t Value, unsigned &MaskIdx, | ||||
| 526 | unsigned &MaskLen) { | ||||
| 527 | if (!isShiftedMask_64(Value)) | ||||
| 528 | return false; | ||||
| 529 | MaskIdx = countTrailingZeros(Value); | ||||
| 530 | MaskLen = countPopulation(Value); | ||||
| 531 | return true; | ||||
| 532 | } | ||||
| 533 | |||||
| 534 | /// Compile time Log2. | ||||
| 535 | /// Valid only for positive powers of two. | ||||
| 536 | template <size_t kValue> constexpr inline size_t CTLog2() { | ||||
| 537 | static_assert(kValue > 0 && llvm::isPowerOf2_64(kValue), | ||||
| 538 | "Value is not a valid power of 2"); | ||||
| 539 | return 1 + CTLog2<kValue / 2>(); | ||||
| 540 | } | ||||
| 541 | |||||
| 542 | template <> constexpr inline size_t CTLog2<1>() { return 0; } | ||||
| 543 | |||||
| 544 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. | ||||
| 545 | /// (32 bit edition.) | ||||
| 546 | /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 | ||||
| 547 | inline unsigned Log2_32(uint32_t Value) { | ||||
| 548 | return 31 - countLeadingZeros(Value); | ||||
| 549 | } | ||||
| 550 | |||||
| 551 | /// Return the floor log base 2 of the specified value, -1 if the value is zero. | ||||
| 552 | /// (64 bit edition.) | ||||
| 553 | inline unsigned Log2_64(uint64_t Value) { | ||||
| 554 | return 63 - countLeadingZeros(Value); | ||||
| 555 | } | ||||
| 556 | |||||
| 557 | /// Return the ceil log base 2 of the specified value, 32 if the value is zero. | ||||
| 558 | /// (32 bit edition). | ||||
| 559 | /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 | ||||
| 560 | inline unsigned Log2_32_Ceil(uint32_t Value) { | ||||
| 561 | return 32 - countLeadingZeros(Value - 1); | ||||
| 562 | } | ||||
| 563 | |||||
| 564 | /// Return the ceil log base 2 of the specified value, 64 if the value is zero. | ||||
| 565 | /// (64 bit edition.) | ||||
| 566 | inline unsigned Log2_64_Ceil(uint64_t Value) { | ||||
| 567 | return 64 - countLeadingZeros(Value - 1); | ||||
| 568 | } | ||||
| 569 | |||||
| 570 | /// This function takes a 64-bit integer and returns the bit equivalent double. | ||||
| 571 | inline double BitsToDouble(uint64_t Bits) { | ||||
| 572 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); | ||||
| 573 | return llvm::bit_cast<double>(Bits); | ||||
| 574 | } | ||||
| 575 | |||||
| 576 | /// This function takes a 32-bit integer and returns the bit equivalent float. | ||||
| 577 | inline float BitsToFloat(uint32_t Bits) { | ||||
| 578 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); | ||||
| 579 | return llvm::bit_cast<float>(Bits); | ||||
| 580 | } | ||||
| 581 | |||||
| 582 | /// This function takes a double and returns the bit equivalent 64-bit integer. | ||||
| 583 | /// Note that copying doubles around changes the bits of NaNs on some hosts, | ||||
| 584 | /// notably x86, so this routine cannot be used if these bits are needed. | ||||
| 585 | inline uint64_t DoubleToBits(double Double) { | ||||
| 586 | static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); | ||||
| 587 | return llvm::bit_cast<uint64_t>(Double); | ||||
| 588 | } | ||||
| 589 | |||||
| 590 | /// This function takes a float and returns the bit equivalent 32-bit integer. | ||||
| 591 | /// Note that copying floats around changes the bits of NaNs on some hosts, | ||||
| 592 | /// notably x86, so this routine cannot be used if these bits are needed. | ||||
| 593 | inline uint32_t FloatToBits(float Float) { | ||||
| 594 | static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); | ||||
| 595 | return llvm::bit_cast<uint32_t>(Float); | ||||
| 596 | } | ||||
| 597 | |||||
| 598 | /// A and B are either alignments or offsets. Return the minimum alignment that | ||||
| 599 | /// may be assumed after adding the two together. | ||||
| 600 | constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { | ||||
| 601 | // The largest power of 2 that divides both A and B. | ||||
| 602 | // | ||||
| 603 | // Replace "-Value" by "1+~Value" in the following commented code to avoid | ||||
| 604 | // MSVC warning C4146 | ||||
| 605 | // return (A | B) & -(A | B); | ||||
| 606 | return (A | B) & (1 + ~(A | B)); | ||||
| 607 | } | ||||
| 608 | |||||
| 609 | /// Returns the next power of two (in 64-bits) that is strictly greater than A. | ||||
| 610 | /// Returns zero on overflow. | ||||
| 611 | constexpr inline uint64_t NextPowerOf2(uint64_t A) { | ||||
| 612 | A |= (A >> 1); | ||||
| 613 | A |= (A >> 2); | ||||
| 614 | A |= (A >> 4); | ||||
| 615 | A |= (A >> 8); | ||||
| 616 | A |= (A >> 16); | ||||
| 617 | A |= (A >> 32); | ||||
| 618 | return A + 1; | ||||
| 619 | } | ||||
| 620 | |||||
| 621 | /// Returns the power of two which is less than or equal to the given value. | ||||
| 622 | /// Essentially, it is a floor operation across the domain of powers of two. | ||||
| 623 | inline uint64_t PowerOf2Floor(uint64_t A) { | ||||
| 624 | if (!A) return 0; | ||||
| 625 | return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); | ||||
| 626 | } | ||||
| 627 | |||||
| 628 | /// Returns the power of two which is greater than or equal to the given value. | ||||
| 629 | /// Essentially, it is a ceil operation across the domain of powers of two. | ||||
| 630 | inline uint64_t PowerOf2Ceil(uint64_t A) { | ||||
| 631 | if (!A) | ||||
| 632 | return 0; | ||||
| 633 | return NextPowerOf2(A - 1); | ||||
| 634 | } | ||||
| 635 | |||||
| 636 | /// Returns the next integer (mod 2**64) that is greater than or equal to | ||||
| 637 | /// \p Value and is a multiple of \p Align. \p Align must be non-zero. | ||||
| 638 | /// | ||||
| 639 | /// Examples: | ||||
| 640 | /// \code | ||||
| 641 | /// alignTo(5, 8) = 8 | ||||
| 642 | /// alignTo(17, 8) = 24 | ||||
| 643 | /// alignTo(~0LL, 8) = 0 | ||||
| 644 | /// alignTo(321, 255) = 510 | ||||
| 645 | /// \endcode | ||||
| 646 | inline uint64_t alignTo(uint64_t Value, uint64_t Align) { | ||||
| 647 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 647, __extension__ __PRETTY_FUNCTION__)); | ||||
| 648 | return (Value + Align - 1) / Align * Align; | ||||
| 649 | } | ||||
| 650 | |||||
| 651 | inline uint64_t alignToPowerOf2(uint64_t Value, uint64_t Align) { | ||||
| 652 | assert(Align != 0 && (Align & (Align - 1)) == 0 &&(static_cast <bool> (Align != 0 && (Align & (Align - 1)) == 0 && "Align must be a power of 2") ? void (0) : __assert_fail ("Align != 0 && (Align & (Align - 1)) == 0 && \"Align must be a power of 2\"" , "llvm/include/llvm/Support/MathExtras.h", 653, __extension__ __PRETTY_FUNCTION__)) | ||||
| 653 | "Align must be a power of 2")(static_cast <bool> (Align != 0 && (Align & (Align - 1)) == 0 && "Align must be a power of 2") ? void (0) : __assert_fail ("Align != 0 && (Align & (Align - 1)) == 0 && \"Align must be a power of 2\"" , "llvm/include/llvm/Support/MathExtras.h", 653, __extension__ __PRETTY_FUNCTION__)); | ||||
| 654 | return (Value + Align - 1) & -Align; | ||||
| 655 | } | ||||
| 656 | |||||
| 657 | /// If non-zero \p Skew is specified, the return value will be a minimal integer | ||||
| 658 | /// that is greater than or equal to \p Size and equal to \p A * N + \p Skew for | ||||
| 659 | /// some integer N. If \p Skew is larger than \p A, its value is adjusted to '\p | ||||
| 660 | /// Skew mod \p A'. \p Align must be non-zero. | ||||
| 661 | /// | ||||
| 662 | /// Examples: | ||||
| 663 | /// \code | ||||
| 664 | /// alignTo(5, 8, 7) = 7 | ||||
| 665 | /// alignTo(17, 8, 1) = 17 | ||||
| 666 | /// alignTo(~0LL, 8, 3) = 3 | ||||
| 667 | /// alignTo(321, 255, 42) = 552 | ||||
| 668 | /// \endcode | ||||
| 669 | inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew) { | ||||
| 670 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 670, __extension__ __PRETTY_FUNCTION__)); | ||||
| 671 | Skew %= Align; | ||||
| 672 | return alignTo(Value - Skew, Align) + Skew; | ||||
| 673 | } | ||||
| 674 | |||||
| 675 | /// Returns the next integer (mod 2**64) that is greater than or equal to | ||||
| 676 | /// \p Value and is a multiple of \c Align. \c Align must be non-zero. | ||||
| 677 | template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) { | ||||
| 678 | static_assert(Align != 0u, "Align must be non-zero"); | ||||
| 679 | return (Value + Align - 1) / Align * Align; | ||||
| 680 | } | ||||
| 681 | |||||
| 682 | /// Returns the integer ceil(Numerator / Denominator). | ||||
| 683 | inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { | ||||
| 684 | return alignTo(Numerator, Denominator) / Denominator; | ||||
| 685 | } | ||||
| 686 | |||||
| 687 | /// Returns the integer nearest(Numerator / Denominator). | ||||
| 688 | inline uint64_t divideNearest(uint64_t Numerator, uint64_t Denominator) { | ||||
| 689 | return (Numerator + (Denominator / 2)) / Denominator; | ||||
| 690 | } | ||||
| 691 | |||||
| 692 | /// Returns the largest uint64_t less than or equal to \p Value and is | ||||
| 693 | /// \p Skew mod \p Align. \p Align must be non-zero | ||||
| 694 | inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { | ||||
| 695 | assert(Align != 0u && "Align can't be 0.")(static_cast <bool> (Align != 0u && "Align can't be 0." ) ? void (0) : __assert_fail ("Align != 0u && \"Align can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 695, __extension__ __PRETTY_FUNCTION__)); | ||||
| 696 | Skew %= Align; | ||||
| 697 | return (Value - Skew) / Align * Align + Skew; | ||||
| 698 | } | ||||
| 699 | |||||
| 700 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. | ||||
| 701 | /// Requires 0 < B <= 32. | ||||
| 702 | template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) { | ||||
| 703 | static_assert(B > 0, "Bit width can't be 0."); | ||||
| 704 | static_assert(B <= 32, "Bit width out of range."); | ||||
| 705 | return int32_t(X << (32 - B)) >> (32 - B); | ||||
| 706 | } | ||||
| 707 | |||||
| 708 | /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. | ||||
| 709 | /// Requires 0 < B <= 32. | ||||
| 710 | inline int32_t SignExtend32(uint32_t X, unsigned B) { | ||||
| 711 | assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0." ) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 711, __extension__ __PRETTY_FUNCTION__)); | ||||
| 712 | assert(B <= 32 && "Bit width out of range.")(static_cast <bool> (B <= 32 && "Bit width out of range." ) ? void (0) : __assert_fail ("B <= 32 && \"Bit width out of range.\"" , "llvm/include/llvm/Support/MathExtras.h", 712, __extension__ __PRETTY_FUNCTION__)); | ||||
| 713 | return int32_t(X << (32 - B)) >> (32 - B); | ||||
| 714 | } | ||||
| 715 | |||||
| 716 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. | ||||
| 717 | /// Requires 0 < B <= 64. | ||||
| 718 | template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) { | ||||
| 719 | static_assert(B > 0, "Bit width can't be 0."); | ||||
| 720 | static_assert(B <= 64, "Bit width out of range."); | ||||
| 721 | return int64_t(x << (64 - B)) >> (64 - B); | ||||
| 722 | } | ||||
| 723 | |||||
| 724 | /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. | ||||
| 725 | /// Requires 0 < B <= 64. | ||||
| 726 | inline int64_t SignExtend64(uint64_t X, unsigned B) { | ||||
| 727 | assert(B > 0 && "Bit width can't be 0.")(static_cast <bool> (B > 0 && "Bit width can't be 0." ) ? void (0) : __assert_fail ("B > 0 && \"Bit width can't be 0.\"" , "llvm/include/llvm/Support/MathExtras.h", 727, __extension__ __PRETTY_FUNCTION__)); | ||||
| 728 | assert(B <= 64 && "Bit width out of range.")(static_cast <bool> (B <= 64 && "Bit width out of range." ) ? void (0) : __assert_fail ("B <= 64 && \"Bit width out of range.\"" , "llvm/include/llvm/Support/MathExtras.h", 728, __extension__ __PRETTY_FUNCTION__)); | ||||
| 729 | return int64_t(X << (64 - B)) >> (64 - B); | ||||
| 730 | } | ||||
| 731 | |||||
| 732 | /// Subtract two unsigned integers, X and Y, of type T and return the absolute | ||||
| 733 | /// value of the result. | ||||
| 734 | template <typename T> | ||||
| 735 | std::enable_if_t<std::is_unsigned<T>::value, T> AbsoluteDifference(T X, T Y) { | ||||
| 736 | return X > Y ? (X - Y) : (Y - X); | ||||
| 737 | } | ||||
| 738 | |||||
| 739 | /// Add two unsigned integers, X and Y, of type T. Clamp the result to the | ||||
| 740 | /// maximum representable value of T on overflow. ResultOverflowed indicates if | ||||
| 741 | /// the result is larger than the maximum representable value of type T. | ||||
| 742 | template <typename T> | ||||
| 743 | std::enable_if_t<std::is_unsigned<T>::value, T> | ||||
| 744 | SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { | ||||
| 745 | bool Dummy; | ||||
| 746 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; | ||||
| 747 | // Hacker's Delight, p. 29 | ||||
| 748 | T Z = X + Y; | ||||
| 749 | Overflowed = (Z < X || Z < Y); | ||||
| 750 | if (Overflowed) | ||||
| 751 | return std::numeric_limits<T>::max(); | ||||
| 752 | else | ||||
| 753 | return Z; | ||||
| 754 | } | ||||
| 755 | |||||
| 756 | /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the | ||||
| 757 | /// maximum representable value of T on overflow. ResultOverflowed indicates if | ||||
| 758 | /// the result is larger than the maximum representable value of type T. | ||||
| 759 | template <typename T> | ||||
| 760 | std::enable_if_t<std::is_unsigned<T>::value, T> | ||||
| 761 | SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { | ||||
| 762 | bool Dummy; | ||||
| 763 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; | ||||
| 764 | |||||
| 765 | // Hacker's Delight, p. 30 has a different algorithm, but we don't use that | ||||
| 766 | // because it fails for uint16_t (where multiplication can have undefined | ||||
| 767 | // behavior due to promotion to int), and requires a division in addition | ||||
| 768 | // to the multiplication. | ||||
| 769 | |||||
| 770 | Overflowed = false; | ||||
| 771 | |||||
| 772 | // Log2(Z) would be either Log2Z or Log2Z + 1. | ||||
| 773 | // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z | ||||
| 774 | // will necessarily be less than Log2Max as desired. | ||||
| 775 | int Log2Z = Log2_64(X) + Log2_64(Y); | ||||
| 776 | const T Max = std::numeric_limits<T>::max(); | ||||
| 777 | int Log2Max = Log2_64(Max); | ||||
| 778 | if (Log2Z < Log2Max) { | ||||
| 779 | return X * Y; | ||||
| 780 | } | ||||
| 781 | if (Log2Z > Log2Max) { | ||||
| 782 | Overflowed = true; | ||||
| 783 | return Max; | ||||
| 784 | } | ||||
| 785 | |||||
| 786 | // We're going to use the top bit, and maybe overflow one | ||||
| 787 | // bit past it. Multiply all but the bottom bit then add | ||||
| 788 | // that on at the end. | ||||
| 789 | T Z = (X >> 1) * Y; | ||||
| 790 | if (Z & ~(Max >> 1)) { | ||||
| 791 | Overflowed = true; | ||||
| 792 | return Max; | ||||
| 793 | } | ||||
| 794 | Z <<= 1; | ||||
| 795 | if (X & 1) | ||||
| 796 | return SaturatingAdd(Z, Y, ResultOverflowed); | ||||
| 797 | |||||
| 798 | return Z; | ||||
| 799 | } | ||||
| 800 | |||||
| 801 | /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to | ||||
| 802 | /// the product. Clamp the result to the maximum representable value of T on | ||||
| 803 | /// overflow. ResultOverflowed indicates if the result is larger than the | ||||
| 804 | /// maximum representable value of type T. | ||||
| 805 | template <typename T> | ||||
| 806 | std::enable_if_t<std::is_unsigned<T>::value, T> | ||||
| 807 | SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { | ||||
| 808 | bool Dummy; | ||||
| 809 | bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; | ||||
| 810 | |||||
| 811 | T Product = SaturatingMultiply(X, Y, &Overflowed); | ||||
| 812 | if (Overflowed) | ||||
| 813 | return Product; | ||||
| 814 | |||||
| 815 | return SaturatingAdd(A, Product, &Overflowed); | ||||
| 816 | } | ||||
| 817 | |||||
| 818 | /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. | ||||
| 819 | extern const float huge_valf; | ||||
| 820 | |||||
| 821 | |||||
| 822 | /// Add two signed integers, computing the two's complement truncated result, | ||||
| 823 | /// returning true if overflow occurred. | ||||
| 824 | template <typename T> | ||||
| 825 | std::enable_if_t<std::is_signed<T>::value, T> AddOverflow(T X, T Y, T &Result) { | ||||
| 826 | #if __has_builtin(__builtin_add_overflow)1 | ||||
| 827 | return __builtin_add_overflow(X, Y, &Result); | ||||
| 828 | #else | ||||
| 829 | // Perform the unsigned addition. | ||||
| 830 | using U = std::make_unsigned_t<T>; | ||||
| 831 | const U UX = static_cast<U>(X); | ||||
| 832 | const U UY = static_cast<U>(Y); | ||||
| 833 | const U UResult = UX + UY; | ||||
| 834 | |||||
| 835 | // Convert to signed. | ||||
| 836 | Result = static_cast<T>(UResult); | ||||
| 837 | |||||
| 838 | // Adding two positive numbers should result in a positive number. | ||||
| 839 | if (X > 0 && Y > 0) | ||||
| 840 | return Result <= 0; | ||||
| 841 | // Adding two negatives should result in a negative number. | ||||
| 842 | if (X < 0 && Y < 0) | ||||
| 843 | return Result >= 0; | ||||
| 844 | return false; | ||||
| 845 | #endif | ||||
| 846 | } | ||||
| 847 | |||||
| 848 | /// Subtract two signed integers, computing the two's complement truncated | ||||
| 849 | /// result, returning true if an overflow ocurred. | ||||
| 850 | template <typename T> | ||||
| 851 | std::enable_if_t<std::is_signed<T>::value, T> SubOverflow(T X, T Y, T &Result) { | ||||
| 852 | #if __has_builtin(__builtin_sub_overflow)1 | ||||
| 853 | return __builtin_sub_overflow(X, Y, &Result); | ||||
| 854 | #else | ||||
| 855 | // Perform the unsigned addition. | ||||
| 856 | using U = std::make_unsigned_t<T>; | ||||
| 857 | const U UX = static_cast<U>(X); | ||||
| 858 | const U UY = static_cast<U>(Y); | ||||
| 859 | const U UResult = UX - UY; | ||||
| 860 | |||||
| 861 | // Convert to signed. | ||||
| 862 | Result = static_cast<T>(UResult); | ||||
| 863 | |||||
| 864 | // Subtracting a positive number from a negative results in a negative number. | ||||
| 865 | if (X <= 0 && Y > 0) | ||||
| 866 | return Result >= 0; | ||||
| 867 | // Subtracting a negative number from a positive results in a positive number. | ||||
| 868 | if (X >= 0 && Y < 0) | ||||
| 869 | return Result <= 0; | ||||
| 870 | return false; | ||||
| 871 | #endif | ||||
| 872 | } | ||||
| 873 | |||||
| 874 | /// Multiply two signed integers, computing the two's complement truncated | ||||
| 875 | /// result, returning true if an overflow ocurred. | ||||
| 876 | template <typename T> | ||||
| 877 | std::enable_if_t<std::is_signed<T>::value, T> MulOverflow(T X, T Y, T &Result) { | ||||
| 878 | // Perform the unsigned multiplication on absolute values. | ||||
| 879 | using U = std::make_unsigned_t<T>; | ||||
| 880 | const U UX = X < 0 ? (0 - static_cast<U>(X)) : static_cast<U>(X); | ||||
| 881 | const U UY = Y < 0 ? (0 - static_cast<U>(Y)) : static_cast<U>(Y); | ||||
| 882 | const U UResult = UX * UY; | ||||
| 883 | |||||
| 884 | // Convert to signed. | ||||
| 885 | const bool IsNegative = (X < 0) ^ (Y < 0); | ||||
| 886 | Result = IsNegative ? (0 - UResult) : UResult; | ||||
| 887 | |||||
| 888 | // If any of the args was 0, result is 0 and no overflow occurs. | ||||
| 889 | if (UX == 0 || UY == 0) | ||||
| 890 | return false; | ||||
| 891 | |||||
| 892 | // UX and UY are in [1, 2^n], where n is the number of digits. | ||||
| 893 | // Check how the max allowed absolute value (2^n for negative, 2^(n-1) for | ||||
| 894 | // positive) divided by an argument compares to the other. | ||||
| 895 | if (IsNegative) | ||||
| 896 | return UX > (static_cast<U>(std::numeric_limits<T>::max()) + U(1)) / UY; | ||||
| 897 | else | ||||
| 898 | return UX > (static_cast<U>(std::numeric_limits<T>::max())) / UY; | ||||
| 899 | } | ||||
| 900 | |||||
| 901 | } // End llvm namespace | ||||
| 902 | |||||
| 903 | #endif |