File: | lib/Bitcode/Reader/BitcodeReader.cpp |
Warning: | line 2878, column 3 1st function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- BitcodeReader.cpp - Internal BitcodeReader implementation ----------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | ||||
10 | #include "llvm/Bitcode/BitcodeReader.h" | |||
11 | #include "MetadataLoader.h" | |||
12 | #include "ValueList.h" | |||
13 | #include "llvm/ADT/APFloat.h" | |||
14 | #include "llvm/ADT/APInt.h" | |||
15 | #include "llvm/ADT/ArrayRef.h" | |||
16 | #include "llvm/ADT/DenseMap.h" | |||
17 | #include "llvm/ADT/Optional.h" | |||
18 | #include "llvm/ADT/STLExtras.h" | |||
19 | #include "llvm/ADT/SmallString.h" | |||
20 | #include "llvm/ADT/SmallVector.h" | |||
21 | #include "llvm/ADT/StringRef.h" | |||
22 | #include "llvm/ADT/Triple.h" | |||
23 | #include "llvm/ADT/Twine.h" | |||
24 | #include "llvm/Bitcode/BitstreamReader.h" | |||
25 | #include "llvm/Bitcode/LLVMBitCodes.h" | |||
26 | #include "llvm/IR/Argument.h" | |||
27 | #include "llvm/IR/Attributes.h" | |||
28 | #include "llvm/IR/AutoUpgrade.h" | |||
29 | #include "llvm/IR/BasicBlock.h" | |||
30 | #include "llvm/IR/CallSite.h" | |||
31 | #include "llvm/IR/CallingConv.h" | |||
32 | #include "llvm/IR/Comdat.h" | |||
33 | #include "llvm/IR/Constant.h" | |||
34 | #include "llvm/IR/Constants.h" | |||
35 | #include "llvm/IR/DataLayout.h" | |||
36 | #include "llvm/IR/DebugInfo.h" | |||
37 | #include "llvm/IR/DebugInfoMetadata.h" | |||
38 | #include "llvm/IR/DebugLoc.h" | |||
39 | #include "llvm/IR/DerivedTypes.h" | |||
40 | #include "llvm/IR/Function.h" | |||
41 | #include "llvm/IR/GVMaterializer.h" | |||
42 | #include "llvm/IR/GlobalAlias.h" | |||
43 | #include "llvm/IR/GlobalIFunc.h" | |||
44 | #include "llvm/IR/GlobalIndirectSymbol.h" | |||
45 | #include "llvm/IR/GlobalObject.h" | |||
46 | #include "llvm/IR/GlobalValue.h" | |||
47 | #include "llvm/IR/GlobalVariable.h" | |||
48 | #include "llvm/IR/InlineAsm.h" | |||
49 | #include "llvm/IR/InstIterator.h" | |||
50 | #include "llvm/IR/InstrTypes.h" | |||
51 | #include "llvm/IR/Instruction.h" | |||
52 | #include "llvm/IR/Instructions.h" | |||
53 | #include "llvm/IR/Intrinsics.h" | |||
54 | #include "llvm/IR/LLVMContext.h" | |||
55 | #include "llvm/IR/Metadata.h" | |||
56 | #include "llvm/IR/Module.h" | |||
57 | #include "llvm/IR/ModuleSummaryIndex.h" | |||
58 | #include "llvm/IR/Operator.h" | |||
59 | #include "llvm/IR/Type.h" | |||
60 | #include "llvm/IR/Value.h" | |||
61 | #include "llvm/IR/Verifier.h" | |||
62 | #include "llvm/Support/AtomicOrdering.h" | |||
63 | #include "llvm/Support/Casting.h" | |||
64 | #include "llvm/Support/CommandLine.h" | |||
65 | #include "llvm/Support/Compiler.h" | |||
66 | #include "llvm/Support/Debug.h" | |||
67 | #include "llvm/Support/Error.h" | |||
68 | #include "llvm/Support/ErrorHandling.h" | |||
69 | #include "llvm/Support/ErrorOr.h" | |||
70 | #include "llvm/Support/ManagedStatic.h" | |||
71 | #include "llvm/Support/MathExtras.h" | |||
72 | #include "llvm/Support/MemoryBuffer.h" | |||
73 | #include "llvm/Support/raw_ostream.h" | |||
74 | #include <algorithm> | |||
75 | #include <cassert> | |||
76 | #include <cstddef> | |||
77 | #include <cstdint> | |||
78 | #include <deque> | |||
79 | #include <map> | |||
80 | #include <memory> | |||
81 | #include <set> | |||
82 | #include <string> | |||
83 | #include <system_error> | |||
84 | #include <tuple> | |||
85 | #include <utility> | |||
86 | #include <vector> | |||
87 | ||||
88 | using namespace llvm; | |||
89 | ||||
90 | static cl::opt<bool> PrintSummaryGUIDs( | |||
91 | "print-summary-global-ids", cl::init(false), cl::Hidden, | |||
92 | cl::desc( | |||
93 | "Print the global id for each value when reading the module summary")); | |||
94 | ||||
95 | namespace { | |||
96 | ||||
97 | enum { | |||
98 | SWITCH_INST_MAGIC = 0x4B5 // May 2012 => 1205 => Hex | |||
99 | }; | |||
100 | ||||
101 | } // end anonymous namespace | |||
102 | ||||
103 | static Error error(const Twine &Message) { | |||
104 | return make_error<StringError>( | |||
105 | Message, make_error_code(BitcodeError::CorruptedBitcode)); | |||
106 | } | |||
107 | ||||
108 | /// Helper to read the header common to all bitcode files. | |||
109 | static bool hasValidBitcodeHeader(BitstreamCursor &Stream) { | |||
110 | // Sniff for the signature. | |||
111 | if (!Stream.canSkipToPos(4) || | |||
112 | Stream.Read(8) != 'B' || | |||
113 | Stream.Read(8) != 'C' || | |||
114 | Stream.Read(4) != 0x0 || | |||
115 | Stream.Read(4) != 0xC || | |||
116 | Stream.Read(4) != 0xE || | |||
117 | Stream.Read(4) != 0xD) | |||
118 | return false; | |||
119 | return true; | |||
120 | } | |||
121 | ||||
122 | static Expected<BitstreamCursor> initStream(MemoryBufferRef Buffer) { | |||
123 | const unsigned char *BufPtr = (const unsigned char *)Buffer.getBufferStart(); | |||
124 | const unsigned char *BufEnd = BufPtr + Buffer.getBufferSize(); | |||
125 | ||||
126 | if (Buffer.getBufferSize() & 3) | |||
127 | return error("Invalid bitcode signature"); | |||
128 | ||||
129 | // If we have a wrapper header, parse it and ignore the non-bc file contents. | |||
130 | // The magic number is 0x0B17C0DE stored in little endian. | |||
131 | if (isBitcodeWrapper(BufPtr, BufEnd)) | |||
132 | if (SkipBitcodeWrapperHeader(BufPtr, BufEnd, true)) | |||
133 | return error("Invalid bitcode wrapper header"); | |||
134 | ||||
135 | BitstreamCursor Stream(ArrayRef<uint8_t>(BufPtr, BufEnd)); | |||
136 | if (!hasValidBitcodeHeader(Stream)) | |||
137 | return error("Invalid bitcode signature"); | |||
138 | ||||
139 | return std::move(Stream); | |||
140 | } | |||
141 | ||||
142 | /// Convert a string from a record into an std::string, return true on failure. | |||
143 | template <typename StrTy> | |||
144 | static bool convertToString(ArrayRef<uint64_t> Record, unsigned Idx, | |||
145 | StrTy &Result) { | |||
146 | if (Idx > Record.size()) | |||
147 | return true; | |||
148 | ||||
149 | for (unsigned i = Idx, e = Record.size(); i != e; ++i) | |||
150 | Result += (char)Record[i]; | |||
151 | return false; | |||
152 | } | |||
153 | ||||
154 | // Strip all the TBAA attachment for the module. | |||
155 | static void stripTBAA(Module *M) { | |||
156 | for (auto &F : *M) { | |||
157 | if (F.isMaterializable()) | |||
158 | continue; | |||
159 | for (auto &I : instructions(F)) | |||
160 | I.setMetadata(LLVMContext::MD_tbaa, nullptr); | |||
161 | } | |||
162 | } | |||
163 | ||||
164 | /// Read the "IDENTIFICATION_BLOCK_ID" block, do some basic enforcement on the | |||
165 | /// "epoch" encoded in the bitcode, and return the producer name if any. | |||
166 | static Expected<std::string> readIdentificationBlock(BitstreamCursor &Stream) { | |||
167 | if (Stream.EnterSubBlock(bitc::IDENTIFICATION_BLOCK_ID)) | |||
168 | return error("Invalid record"); | |||
169 | ||||
170 | // Read all the records. | |||
171 | SmallVector<uint64_t, 64> Record; | |||
172 | ||||
173 | std::string ProducerIdentification; | |||
174 | ||||
175 | while (true) { | |||
176 | BitstreamEntry Entry = Stream.advance(); | |||
177 | ||||
178 | switch (Entry.Kind) { | |||
179 | default: | |||
180 | case BitstreamEntry::Error: | |||
181 | return error("Malformed block"); | |||
182 | case BitstreamEntry::EndBlock: | |||
183 | return ProducerIdentification; | |||
184 | case BitstreamEntry::Record: | |||
185 | // The interesting case. | |||
186 | break; | |||
187 | } | |||
188 | ||||
189 | // Read a record. | |||
190 | Record.clear(); | |||
191 | unsigned BitCode = Stream.readRecord(Entry.ID, Record); | |||
192 | switch (BitCode) { | |||
193 | default: // Default behavior: reject | |||
194 | return error("Invalid value"); | |||
195 | case bitc::IDENTIFICATION_CODE_STRING: // IDENTIFICATION: [strchr x N] | |||
196 | convertToString(Record, 0, ProducerIdentification); | |||
197 | break; | |||
198 | case bitc::IDENTIFICATION_CODE_EPOCH: { // EPOCH: [epoch#] | |||
199 | unsigned epoch = (unsigned)Record[0]; | |||
200 | if (epoch != bitc::BITCODE_CURRENT_EPOCH) { | |||
201 | return error( | |||
202 | Twine("Incompatible epoch: Bitcode '") + Twine(epoch) + | |||
203 | "' vs current: '" + Twine(bitc::BITCODE_CURRENT_EPOCH) + "'"); | |||
204 | } | |||
205 | } | |||
206 | } | |||
207 | } | |||
208 | } | |||
209 | ||||
210 | static Expected<std::string> readIdentificationCode(BitstreamCursor &Stream) { | |||
211 | // We expect a number of well-defined blocks, though we don't necessarily | |||
212 | // need to understand them all. | |||
213 | while (true) { | |||
214 | if (Stream.AtEndOfStream()) | |||
215 | return ""; | |||
216 | ||||
217 | BitstreamEntry Entry = Stream.advance(); | |||
218 | switch (Entry.Kind) { | |||
219 | case BitstreamEntry::EndBlock: | |||
220 | case BitstreamEntry::Error: | |||
221 | return error("Malformed block"); | |||
222 | ||||
223 | case BitstreamEntry::SubBlock: | |||
224 | if (Entry.ID == bitc::IDENTIFICATION_BLOCK_ID) | |||
225 | return readIdentificationBlock(Stream); | |||
226 | ||||
227 | // Ignore other sub-blocks. | |||
228 | if (Stream.SkipBlock()) | |||
229 | return error("Malformed block"); | |||
230 | continue; | |||
231 | case BitstreamEntry::Record: | |||
232 | Stream.skipRecord(Entry.ID); | |||
233 | continue; | |||
234 | } | |||
235 | } | |||
236 | } | |||
237 | ||||
238 | static Expected<bool> hasObjCCategoryInModule(BitstreamCursor &Stream) { | |||
239 | if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) | |||
240 | return error("Invalid record"); | |||
241 | ||||
242 | SmallVector<uint64_t, 64> Record; | |||
243 | // Read all the records for this module. | |||
244 | ||||
245 | while (true) { | |||
246 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
247 | ||||
248 | switch (Entry.Kind) { | |||
249 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
250 | case BitstreamEntry::Error: | |||
251 | return error("Malformed block"); | |||
252 | case BitstreamEntry::EndBlock: | |||
253 | return false; | |||
254 | case BitstreamEntry::Record: | |||
255 | // The interesting case. | |||
256 | break; | |||
257 | } | |||
258 | ||||
259 | // Read a record. | |||
260 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
261 | default: | |||
262 | break; // Default behavior, ignore unknown content. | |||
263 | case bitc::MODULE_CODE_SECTIONNAME: { // SECTIONNAME: [strchr x N] | |||
264 | std::string S; | |||
265 | if (convertToString(Record, 0, S)) | |||
266 | return error("Invalid record"); | |||
267 | // Check for the i386 and other (x86_64, ARM) conventions | |||
268 | if (S.find("__DATA,__objc_catlist") != std::string::npos || | |||
269 | S.find("__OBJC,__category") != std::string::npos) | |||
270 | return true; | |||
271 | break; | |||
272 | } | |||
273 | } | |||
274 | Record.clear(); | |||
275 | } | |||
276 | llvm_unreachable("Exit infinite loop")::llvm::llvm_unreachable_internal("Exit infinite loop", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 276); | |||
277 | } | |||
278 | ||||
279 | static Expected<bool> hasObjCCategory(BitstreamCursor &Stream) { | |||
280 | // We expect a number of well-defined blocks, though we don't necessarily | |||
281 | // need to understand them all. | |||
282 | while (true) { | |||
283 | BitstreamEntry Entry = Stream.advance(); | |||
284 | ||||
285 | switch (Entry.Kind) { | |||
286 | case BitstreamEntry::Error: | |||
287 | return error("Malformed block"); | |||
288 | case BitstreamEntry::EndBlock: | |||
289 | return false; | |||
290 | ||||
291 | case BitstreamEntry::SubBlock: | |||
292 | if (Entry.ID == bitc::MODULE_BLOCK_ID) | |||
293 | return hasObjCCategoryInModule(Stream); | |||
294 | ||||
295 | // Ignore other sub-blocks. | |||
296 | if (Stream.SkipBlock()) | |||
297 | return error("Malformed block"); | |||
298 | continue; | |||
299 | ||||
300 | case BitstreamEntry::Record: | |||
301 | Stream.skipRecord(Entry.ID); | |||
302 | continue; | |||
303 | } | |||
304 | } | |||
305 | } | |||
306 | ||||
307 | static Expected<std::string> readModuleTriple(BitstreamCursor &Stream) { | |||
308 | if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) | |||
309 | return error("Invalid record"); | |||
310 | ||||
311 | SmallVector<uint64_t, 64> Record; | |||
312 | ||||
313 | std::string Triple; | |||
314 | ||||
315 | // Read all the records for this module. | |||
316 | while (true) { | |||
317 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
318 | ||||
319 | switch (Entry.Kind) { | |||
320 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
321 | case BitstreamEntry::Error: | |||
322 | return error("Malformed block"); | |||
323 | case BitstreamEntry::EndBlock: | |||
324 | return Triple; | |||
325 | case BitstreamEntry::Record: | |||
326 | // The interesting case. | |||
327 | break; | |||
328 | } | |||
329 | ||||
330 | // Read a record. | |||
331 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
332 | default: break; // Default behavior, ignore unknown content. | |||
333 | case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N] | |||
334 | std::string S; | |||
335 | if (convertToString(Record, 0, S)) | |||
336 | return error("Invalid record"); | |||
337 | Triple = S; | |||
338 | break; | |||
339 | } | |||
340 | } | |||
341 | Record.clear(); | |||
342 | } | |||
343 | llvm_unreachable("Exit infinite loop")::llvm::llvm_unreachable_internal("Exit infinite loop", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 343); | |||
344 | } | |||
345 | ||||
346 | static Expected<std::string> readTriple(BitstreamCursor &Stream) { | |||
347 | // We expect a number of well-defined blocks, though we don't necessarily | |||
348 | // need to understand them all. | |||
349 | while (true) { | |||
350 | BitstreamEntry Entry = Stream.advance(); | |||
351 | ||||
352 | switch (Entry.Kind) { | |||
353 | case BitstreamEntry::Error: | |||
354 | return error("Malformed block"); | |||
355 | case BitstreamEntry::EndBlock: | |||
356 | return ""; | |||
357 | ||||
358 | case BitstreamEntry::SubBlock: | |||
359 | if (Entry.ID == bitc::MODULE_BLOCK_ID) | |||
360 | return readModuleTriple(Stream); | |||
361 | ||||
362 | // Ignore other sub-blocks. | |||
363 | if (Stream.SkipBlock()) | |||
364 | return error("Malformed block"); | |||
365 | continue; | |||
366 | ||||
367 | case BitstreamEntry::Record: | |||
368 | Stream.skipRecord(Entry.ID); | |||
369 | continue; | |||
370 | } | |||
371 | } | |||
372 | } | |||
373 | ||||
374 | namespace { | |||
375 | ||||
376 | class BitcodeReaderBase { | |||
377 | protected: | |||
378 | BitcodeReaderBase(BitstreamCursor Stream, StringRef Strtab) | |||
379 | : Stream(std::move(Stream)), Strtab(Strtab) { | |||
380 | this->Stream.setBlockInfo(&BlockInfo); | |||
381 | } | |||
382 | ||||
383 | BitstreamBlockInfo BlockInfo; | |||
384 | BitstreamCursor Stream; | |||
385 | StringRef Strtab; | |||
386 | ||||
387 | /// In version 2 of the bitcode we store names of global values and comdats in | |||
388 | /// a string table rather than in the VST. | |||
389 | bool UseStrtab = false; | |||
390 | ||||
391 | Expected<unsigned> parseVersionRecord(ArrayRef<uint64_t> Record); | |||
392 | ||||
393 | /// If this module uses a string table, pop the reference to the string table | |||
394 | /// and return the referenced string and the rest of the record. Otherwise | |||
395 | /// just return the record itself. | |||
396 | std::pair<StringRef, ArrayRef<uint64_t>> | |||
397 | readNameFromStrtab(ArrayRef<uint64_t> Record); | |||
398 | ||||
399 | bool readBlockInfo(); | |||
400 | ||||
401 | // Contains an arbitrary and optional string identifying the bitcode producer | |||
402 | std::string ProducerIdentification; | |||
403 | ||||
404 | Error error(const Twine &Message); | |||
405 | }; | |||
406 | ||||
407 | } // end anonymous namespace | |||
408 | ||||
409 | Error BitcodeReaderBase::error(const Twine &Message) { | |||
410 | std::string FullMsg = Message.str(); | |||
411 | if (!ProducerIdentification.empty()) | |||
412 | FullMsg += " (Producer: '" + ProducerIdentification + "' Reader: 'LLVM " + | |||
413 | LLVM_VERSION_STRING"7.0.0" "')"; | |||
414 | return ::error(FullMsg); | |||
415 | } | |||
416 | ||||
417 | Expected<unsigned> | |||
418 | BitcodeReaderBase::parseVersionRecord(ArrayRef<uint64_t> Record) { | |||
419 | if (Record.empty()) | |||
420 | return error("Invalid record"); | |||
421 | unsigned ModuleVersion = Record[0]; | |||
422 | if (ModuleVersion > 2) | |||
423 | return error("Invalid value"); | |||
424 | UseStrtab = ModuleVersion >= 2; | |||
425 | return ModuleVersion; | |||
426 | } | |||
427 | ||||
428 | std::pair<StringRef, ArrayRef<uint64_t>> | |||
429 | BitcodeReaderBase::readNameFromStrtab(ArrayRef<uint64_t> Record) { | |||
430 | if (!UseStrtab) | |||
431 | return {"", Record}; | |||
432 | // Invalid reference. Let the caller complain about the record being empty. | |||
433 | if (Record[0] + Record[1] > Strtab.size()) | |||
434 | return {"", {}}; | |||
435 | return {StringRef(Strtab.data() + Record[0], Record[1]), Record.slice(2)}; | |||
436 | } | |||
437 | ||||
438 | namespace { | |||
439 | ||||
440 | class BitcodeReader : public BitcodeReaderBase, public GVMaterializer { | |||
441 | LLVMContext &Context; | |||
442 | Module *TheModule = nullptr; | |||
443 | // Next offset to start scanning for lazy parsing of function bodies. | |||
444 | uint64_t NextUnreadBit = 0; | |||
445 | // Last function offset found in the VST. | |||
446 | uint64_t LastFunctionBlockBit = 0; | |||
447 | bool SeenValueSymbolTable = false; | |||
448 | uint64_t VSTOffset = 0; | |||
449 | ||||
450 | std::vector<std::string> SectionTable; | |||
451 | std::vector<std::string> GCTable; | |||
452 | ||||
453 | std::vector<Type*> TypeList; | |||
454 | BitcodeReaderValueList ValueList; | |||
455 | Optional<MetadataLoader> MDLoader; | |||
456 | std::vector<Comdat *> ComdatList; | |||
457 | SmallVector<Instruction *, 64> InstructionList; | |||
458 | ||||
459 | std::vector<std::pair<GlobalVariable *, unsigned>> GlobalInits; | |||
460 | std::vector<std::pair<GlobalIndirectSymbol *, unsigned>> IndirectSymbolInits; | |||
461 | std::vector<std::pair<Function *, unsigned>> FunctionPrefixes; | |||
462 | std::vector<std::pair<Function *, unsigned>> FunctionPrologues; | |||
463 | std::vector<std::pair<Function *, unsigned>> FunctionPersonalityFns; | |||
464 | ||||
465 | /// The set of attributes by index. Index zero in the file is for null, and | |||
466 | /// is thus not represented here. As such all indices are off by one. | |||
467 | std::vector<AttributeList> MAttributes; | |||
468 | ||||
469 | /// The set of attribute groups. | |||
470 | std::map<unsigned, AttributeList> MAttributeGroups; | |||
471 | ||||
472 | /// While parsing a function body, this is a list of the basic blocks for the | |||
473 | /// function. | |||
474 | std::vector<BasicBlock*> FunctionBBs; | |||
475 | ||||
476 | // When reading the module header, this list is populated with functions that | |||
477 | // have bodies later in the file. | |||
478 | std::vector<Function*> FunctionsWithBodies; | |||
479 | ||||
480 | // When intrinsic functions are encountered which require upgrading they are | |||
481 | // stored here with their replacement function. | |||
482 | using UpdatedIntrinsicMap = DenseMap<Function *, Function *>; | |||
483 | UpdatedIntrinsicMap UpgradedIntrinsics; | |||
484 | // Intrinsics which were remangled because of types rename | |||
485 | UpdatedIntrinsicMap RemangledIntrinsics; | |||
486 | ||||
487 | // Several operations happen after the module header has been read, but | |||
488 | // before function bodies are processed. This keeps track of whether | |||
489 | // we've done this yet. | |||
490 | bool SeenFirstFunctionBody = false; | |||
491 | ||||
492 | /// When function bodies are initially scanned, this map contains info about | |||
493 | /// where to find deferred function body in the stream. | |||
494 | DenseMap<Function*, uint64_t> DeferredFunctionInfo; | |||
495 | ||||
496 | /// When Metadata block is initially scanned when parsing the module, we may | |||
497 | /// choose to defer parsing of the metadata. This vector contains info about | |||
498 | /// which Metadata blocks are deferred. | |||
499 | std::vector<uint64_t> DeferredMetadataInfo; | |||
500 | ||||
501 | /// These are basic blocks forward-referenced by block addresses. They are | |||
502 | /// inserted lazily into functions when they're loaded. The basic block ID is | |||
503 | /// its index into the vector. | |||
504 | DenseMap<Function *, std::vector<BasicBlock *>> BasicBlockFwdRefs; | |||
505 | std::deque<Function *> BasicBlockFwdRefQueue; | |||
506 | ||||
507 | /// Indicates that we are using a new encoding for instruction operands where | |||
508 | /// most operands in the current FUNCTION_BLOCK are encoded relative to the | |||
509 | /// instruction number, for a more compact encoding. Some instruction | |||
510 | /// operands are not relative to the instruction ID: basic block numbers, and | |||
511 | /// types. Once the old style function blocks have been phased out, we would | |||
512 | /// not need this flag. | |||
513 | bool UseRelativeIDs = false; | |||
514 | ||||
515 | /// True if all functions will be materialized, negating the need to process | |||
516 | /// (e.g.) blockaddress forward references. | |||
517 | bool WillMaterializeAllForwardRefs = false; | |||
518 | ||||
519 | bool StripDebugInfo = false; | |||
520 | TBAAVerifier TBAAVerifyHelper; | |||
521 | ||||
522 | std::vector<std::string> BundleTags; | |||
523 | SmallVector<SyncScope::ID, 8> SSIDs; | |||
524 | ||||
525 | public: | |||
526 | BitcodeReader(BitstreamCursor Stream, StringRef Strtab, | |||
527 | StringRef ProducerIdentification, LLVMContext &Context); | |||
528 | ||||
529 | Error materializeForwardReferencedFunctions(); | |||
530 | ||||
531 | Error materialize(GlobalValue *GV) override; | |||
532 | Error materializeModule() override; | |||
533 | std::vector<StructType *> getIdentifiedStructTypes() const override; | |||
534 | ||||
535 | /// \brief Main interface to parsing a bitcode buffer. | |||
536 | /// \returns true if an error occurred. | |||
537 | Error parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata = false, | |||
538 | bool IsImporting = false); | |||
539 | ||||
540 | static uint64_t decodeSignRotatedValue(uint64_t V); | |||
541 | ||||
542 | /// Materialize any deferred Metadata block. | |||
543 | Error materializeMetadata() override; | |||
544 | ||||
545 | void setStripDebugInfo() override; | |||
546 | ||||
547 | private: | |||
548 | std::vector<StructType *> IdentifiedStructTypes; | |||
549 | StructType *createIdentifiedStructType(LLVMContext &Context, StringRef Name); | |||
550 | StructType *createIdentifiedStructType(LLVMContext &Context); | |||
551 | ||||
552 | Type *getTypeByID(unsigned ID); | |||
553 | ||||
554 | Value *getFnValueByID(unsigned ID, Type *Ty) { | |||
555 | if (Ty && Ty->isMetadataTy()) | |||
556 | return MetadataAsValue::get(Ty->getContext(), getFnMetadataByID(ID)); | |||
557 | return ValueList.getValueFwdRef(ID, Ty); | |||
558 | } | |||
559 | ||||
560 | Metadata *getFnMetadataByID(unsigned ID) { | |||
561 | return MDLoader->getMetadataFwdRefOrLoad(ID); | |||
562 | } | |||
563 | ||||
564 | BasicBlock *getBasicBlock(unsigned ID) const { | |||
565 | if (ID >= FunctionBBs.size()) return nullptr; // Invalid ID | |||
566 | return FunctionBBs[ID]; | |||
567 | } | |||
568 | ||||
569 | AttributeList getAttributes(unsigned i) const { | |||
570 | if (i-1 < MAttributes.size()) | |||
571 | return MAttributes[i-1]; | |||
572 | return AttributeList(); | |||
573 | } | |||
574 | ||||
575 | /// Read a value/type pair out of the specified record from slot 'Slot'. | |||
576 | /// Increment Slot past the number of slots used in the record. Return true on | |||
577 | /// failure. | |||
578 | bool getValueTypePair(SmallVectorImpl<uint64_t> &Record, unsigned &Slot, | |||
579 | unsigned InstNum, Value *&ResVal) { | |||
580 | if (Slot == Record.size()) return true; | |||
581 | unsigned ValNo = (unsigned)Record[Slot++]; | |||
582 | // Adjust the ValNo, if it was encoded relative to the InstNum. | |||
583 | if (UseRelativeIDs) | |||
584 | ValNo = InstNum - ValNo; | |||
585 | if (ValNo < InstNum) { | |||
586 | // If this is not a forward reference, just return the value we already | |||
587 | // have. | |||
588 | ResVal = getFnValueByID(ValNo, nullptr); | |||
589 | return ResVal == nullptr; | |||
590 | } | |||
591 | if (Slot == Record.size()) | |||
592 | return true; | |||
593 | ||||
594 | unsigned TypeNo = (unsigned)Record[Slot++]; | |||
595 | ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo)); | |||
596 | return ResVal == nullptr; | |||
597 | } | |||
598 | ||||
599 | /// Read a value out of the specified record from slot 'Slot'. Increment Slot | |||
600 | /// past the number of slots used by the value in the record. Return true if | |||
601 | /// there is an error. | |||
602 | bool popValue(SmallVectorImpl<uint64_t> &Record, unsigned &Slot, | |||
603 | unsigned InstNum, Type *Ty, Value *&ResVal) { | |||
604 | if (getValue(Record, Slot, InstNum, Ty, ResVal)) | |||
605 | return true; | |||
606 | // All values currently take a single record slot. | |||
607 | ++Slot; | |||
608 | return false; | |||
609 | } | |||
610 | ||||
611 | /// Like popValue, but does not increment the Slot number. | |||
612 | bool getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot, | |||
613 | unsigned InstNum, Type *Ty, Value *&ResVal) { | |||
614 | ResVal = getValue(Record, Slot, InstNum, Ty); | |||
615 | return ResVal == nullptr; | |||
616 | } | |||
617 | ||||
618 | /// Version of getValue that returns ResVal directly, or 0 if there is an | |||
619 | /// error. | |||
620 | Value *getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot, | |||
621 | unsigned InstNum, Type *Ty) { | |||
622 | if (Slot == Record.size()) return nullptr; | |||
623 | unsigned ValNo = (unsigned)Record[Slot]; | |||
624 | // Adjust the ValNo, if it was encoded relative to the InstNum. | |||
625 | if (UseRelativeIDs) | |||
626 | ValNo = InstNum - ValNo; | |||
627 | return getFnValueByID(ValNo, Ty); | |||
628 | } | |||
629 | ||||
630 | /// Like getValue, but decodes signed VBRs. | |||
631 | Value *getValueSigned(SmallVectorImpl<uint64_t> &Record, unsigned Slot, | |||
632 | unsigned InstNum, Type *Ty) { | |||
633 | if (Slot == Record.size()) return nullptr; | |||
634 | unsigned ValNo = (unsigned)decodeSignRotatedValue(Record[Slot]); | |||
635 | // Adjust the ValNo, if it was encoded relative to the InstNum. | |||
636 | if (UseRelativeIDs) | |||
637 | ValNo = InstNum - ValNo; | |||
638 | return getFnValueByID(ValNo, Ty); | |||
639 | } | |||
640 | ||||
641 | /// Converts alignment exponent (i.e. power of two (or zero)) to the | |||
642 | /// corresponding alignment to use. If alignment is too large, returns | |||
643 | /// a corresponding error code. | |||
644 | Error parseAlignmentValue(uint64_t Exponent, unsigned &Alignment); | |||
645 | Error parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind); | |||
646 | Error parseModule(uint64_t ResumeBit, bool ShouldLazyLoadMetadata = false); | |||
647 | ||||
648 | Error parseComdatRecord(ArrayRef<uint64_t> Record); | |||
649 | Error parseGlobalVarRecord(ArrayRef<uint64_t> Record); | |||
650 | Error parseFunctionRecord(ArrayRef<uint64_t> Record); | |||
651 | Error parseGlobalIndirectSymbolRecord(unsigned BitCode, | |||
652 | ArrayRef<uint64_t> Record); | |||
653 | ||||
654 | Error parseAttributeBlock(); | |||
655 | Error parseAttributeGroupBlock(); | |||
656 | Error parseTypeTable(); | |||
657 | Error parseTypeTableBody(); | |||
658 | Error parseOperandBundleTags(); | |||
659 | Error parseSyncScopeNames(); | |||
660 | ||||
661 | Expected<Value *> recordValue(SmallVectorImpl<uint64_t> &Record, | |||
662 | unsigned NameIndex, Triple &TT); | |||
663 | void setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta, Function *F, | |||
664 | ArrayRef<uint64_t> Record); | |||
665 | Error parseValueSymbolTable(uint64_t Offset = 0); | |||
666 | Error parseGlobalValueSymbolTable(); | |||
667 | Error parseConstants(); | |||
668 | Error rememberAndSkipFunctionBodies(); | |||
669 | Error rememberAndSkipFunctionBody(); | |||
670 | /// Save the positions of the Metadata blocks and skip parsing the blocks. | |||
671 | Error rememberAndSkipMetadata(); | |||
672 | Error typeCheckLoadStoreInst(Type *ValType, Type *PtrType); | |||
673 | Error parseFunctionBody(Function *F); | |||
674 | Error globalCleanup(); | |||
675 | Error resolveGlobalAndIndirectSymbolInits(); | |||
676 | Error parseUseLists(); | |||
677 | Error findFunctionInStream( | |||
678 | Function *F, | |||
679 | DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator); | |||
680 | ||||
681 | SyncScope::ID getDecodedSyncScopeID(unsigned Val); | |||
682 | }; | |||
683 | ||||
684 | /// Class to manage reading and parsing function summary index bitcode | |||
685 | /// files/sections. | |||
686 | class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase { | |||
687 | /// The module index built during parsing. | |||
688 | ModuleSummaryIndex &TheIndex; | |||
689 | ||||
690 | /// Indicates whether we have encountered a global value summary section | |||
691 | /// yet during parsing. | |||
692 | bool SeenGlobalValSummary = false; | |||
693 | ||||
694 | /// Indicates whether we have already parsed the VST, used for error checking. | |||
695 | bool SeenValueSymbolTable = false; | |||
696 | ||||
697 | /// Set to the offset of the VST recorded in the MODULE_CODE_VSTOFFSET record. | |||
698 | /// Used to enable on-demand parsing of the VST. | |||
699 | uint64_t VSTOffset = 0; | |||
700 | ||||
701 | // Map to save ValueId to ValueInfo association that was recorded in the | |||
702 | // ValueSymbolTable. It is used after the VST is parsed to convert | |||
703 | // call graph edges read from the function summary from referencing | |||
704 | // callees by their ValueId to using the ValueInfo instead, which is how | |||
705 | // they are recorded in the summary index being built. | |||
706 | // We save a GUID which refers to the same global as the ValueInfo, but | |||
707 | // ignoring the linkage, i.e. for values other than local linkage they are | |||
708 | // identical. | |||
709 | DenseMap<unsigned, std::pair<ValueInfo, GlobalValue::GUID>> | |||
710 | ValueIdToValueInfoMap; | |||
711 | ||||
712 | /// Map populated during module path string table parsing, from the | |||
713 | /// module ID to a string reference owned by the index's module | |||
714 | /// path string table, used to correlate with combined index | |||
715 | /// summary records. | |||
716 | DenseMap<uint64_t, StringRef> ModuleIdMap; | |||
717 | ||||
718 | /// Original source file name recorded in a bitcode record. | |||
719 | std::string SourceFileName; | |||
720 | ||||
721 | /// The string identifier given to this module by the client, normally the | |||
722 | /// path to the bitcode file. | |||
723 | StringRef ModulePath; | |||
724 | ||||
725 | /// For per-module summary indexes, the unique numerical identifier given to | |||
726 | /// this module by the client. | |||
727 | unsigned ModuleId; | |||
728 | ||||
729 | public: | |||
730 | ModuleSummaryIndexBitcodeReader(BitstreamCursor Stream, StringRef Strtab, | |||
731 | ModuleSummaryIndex &TheIndex, | |||
732 | StringRef ModulePath, unsigned ModuleId); | |||
733 | ||||
734 | Error parseModule(); | |||
735 | ||||
736 | private: | |||
737 | void setValueGUID(uint64_t ValueID, StringRef ValueName, | |||
738 | GlobalValue::LinkageTypes Linkage, | |||
739 | StringRef SourceFileName); | |||
740 | Error parseValueSymbolTable( | |||
741 | uint64_t Offset, | |||
742 | DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap); | |||
743 | std::vector<ValueInfo> makeRefList(ArrayRef<uint64_t> Record); | |||
744 | std::vector<FunctionSummary::EdgeTy> makeCallList(ArrayRef<uint64_t> Record, | |||
745 | bool IsOldProfileFormat, | |||
746 | bool HasProfile, | |||
747 | bool HasRelBF); | |||
748 | Error parseEntireSummary(unsigned ID); | |||
749 | Error parseModuleStringTable(); | |||
750 | ||||
751 | std::pair<ValueInfo, GlobalValue::GUID> | |||
752 | getValueInfoFromValueId(unsigned ValueId); | |||
753 | ||||
754 | ModuleSummaryIndex::ModuleInfo *addThisModule(); | |||
755 | }; | |||
756 | ||||
757 | } // end anonymous namespace | |||
758 | ||||
759 | std::error_code llvm::errorToErrorCodeAndEmitErrors(LLVMContext &Ctx, | |||
760 | Error Err) { | |||
761 | if (Err) { | |||
762 | std::error_code EC; | |||
763 | handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) { | |||
764 | EC = EIB.convertToErrorCode(); | |||
765 | Ctx.emitError(EIB.message()); | |||
766 | }); | |||
767 | return EC; | |||
768 | } | |||
769 | return std::error_code(); | |||
770 | } | |||
771 | ||||
772 | BitcodeReader::BitcodeReader(BitstreamCursor Stream, StringRef Strtab, | |||
773 | StringRef ProducerIdentification, | |||
774 | LLVMContext &Context) | |||
775 | : BitcodeReaderBase(std::move(Stream), Strtab), Context(Context), | |||
776 | ValueList(Context) { | |||
777 | this->ProducerIdentification = ProducerIdentification; | |||
778 | } | |||
779 | ||||
780 | Error BitcodeReader::materializeForwardReferencedFunctions() { | |||
781 | if (WillMaterializeAllForwardRefs) | |||
782 | return Error::success(); | |||
783 | ||||
784 | // Prevent recursion. | |||
785 | WillMaterializeAllForwardRefs = true; | |||
786 | ||||
787 | while (!BasicBlockFwdRefQueue.empty()) { | |||
788 | Function *F = BasicBlockFwdRefQueue.front(); | |||
789 | BasicBlockFwdRefQueue.pop_front(); | |||
790 | assert(F && "Expected valid function")(static_cast <bool> (F && "Expected valid function" ) ? void (0) : __assert_fail ("F && \"Expected valid function\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 790, __extension__ __PRETTY_FUNCTION__)); | |||
791 | if (!BasicBlockFwdRefs.count(F)) | |||
792 | // Already materialized. | |||
793 | continue; | |||
794 | ||||
795 | // Check for a function that isn't materializable to prevent an infinite | |||
796 | // loop. When parsing a blockaddress stored in a global variable, there | |||
797 | // isn't a trivial way to check if a function will have a body without a | |||
798 | // linear search through FunctionsWithBodies, so just check it here. | |||
799 | if (!F->isMaterializable()) | |||
800 | return error("Never resolved function from blockaddress"); | |||
801 | ||||
802 | // Try to materialize F. | |||
803 | if (Error Err = materialize(F)) | |||
804 | return Err; | |||
805 | } | |||
806 | assert(BasicBlockFwdRefs.empty() && "Function missing from queue")(static_cast <bool> (BasicBlockFwdRefs.empty() && "Function missing from queue") ? void (0) : __assert_fail ("BasicBlockFwdRefs.empty() && \"Function missing from queue\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 806, __extension__ __PRETTY_FUNCTION__)); | |||
807 | ||||
808 | // Reset state. | |||
809 | WillMaterializeAllForwardRefs = false; | |||
810 | return Error::success(); | |||
811 | } | |||
812 | ||||
813 | //===----------------------------------------------------------------------===// | |||
814 | // Helper functions to implement forward reference resolution, etc. | |||
815 | //===----------------------------------------------------------------------===// | |||
816 | ||||
817 | static bool hasImplicitComdat(size_t Val) { | |||
818 | switch (Val) { | |||
819 | default: | |||
820 | return false; | |||
821 | case 1: // Old WeakAnyLinkage | |||
822 | case 4: // Old LinkOnceAnyLinkage | |||
823 | case 10: // Old WeakODRLinkage | |||
824 | case 11: // Old LinkOnceODRLinkage | |||
825 | return true; | |||
826 | } | |||
827 | } | |||
828 | ||||
829 | static GlobalValue::LinkageTypes getDecodedLinkage(unsigned Val) { | |||
830 | switch (Val) { | |||
831 | default: // Map unknown/new linkages to external | |||
832 | case 0: | |||
833 | return GlobalValue::ExternalLinkage; | |||
834 | case 2: | |||
835 | return GlobalValue::AppendingLinkage; | |||
836 | case 3: | |||
837 | return GlobalValue::InternalLinkage; | |||
838 | case 5: | |||
839 | return GlobalValue::ExternalLinkage; // Obsolete DLLImportLinkage | |||
840 | case 6: | |||
841 | return GlobalValue::ExternalLinkage; // Obsolete DLLExportLinkage | |||
842 | case 7: | |||
843 | return GlobalValue::ExternalWeakLinkage; | |||
844 | case 8: | |||
845 | return GlobalValue::CommonLinkage; | |||
846 | case 9: | |||
847 | return GlobalValue::PrivateLinkage; | |||
848 | case 12: | |||
849 | return GlobalValue::AvailableExternallyLinkage; | |||
850 | case 13: | |||
851 | return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateLinkage | |||
852 | case 14: | |||
853 | return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateWeakLinkage | |||
854 | case 15: | |||
855 | return GlobalValue::ExternalLinkage; // Obsolete LinkOnceODRAutoHideLinkage | |||
856 | case 1: // Old value with implicit comdat. | |||
857 | case 16: | |||
858 | return GlobalValue::WeakAnyLinkage; | |||
859 | case 10: // Old value with implicit comdat. | |||
860 | case 17: | |||
861 | return GlobalValue::WeakODRLinkage; | |||
862 | case 4: // Old value with implicit comdat. | |||
863 | case 18: | |||
864 | return GlobalValue::LinkOnceAnyLinkage; | |||
865 | case 11: // Old value with implicit comdat. | |||
866 | case 19: | |||
867 | return GlobalValue::LinkOnceODRLinkage; | |||
868 | } | |||
869 | } | |||
870 | ||||
871 | static FunctionSummary::FFlags getDecodedFFlags(uint64_t RawFlags) { | |||
872 | FunctionSummary::FFlags Flags; | |||
873 | Flags.ReadNone = RawFlags & 0x1; | |||
874 | Flags.ReadOnly = (RawFlags >> 1) & 0x1; | |||
875 | Flags.NoRecurse = (RawFlags >> 2) & 0x1; | |||
876 | Flags.ReturnDoesNotAlias = (RawFlags >> 3) & 0x1; | |||
877 | return Flags; | |||
878 | } | |||
879 | ||||
880 | /// Decode the flags for GlobalValue in the summary. | |||
881 | static GlobalValueSummary::GVFlags getDecodedGVSummaryFlags(uint64_t RawFlags, | |||
882 | uint64_t Version) { | |||
883 | // Summary were not emitted before LLVM 3.9, we don't need to upgrade Linkage | |||
884 | // like getDecodedLinkage() above. Any future change to the linkage enum and | |||
885 | // to getDecodedLinkage() will need to be taken into account here as above. | |||
886 | auto Linkage = GlobalValue::LinkageTypes(RawFlags & 0xF); // 4 bits | |||
887 | RawFlags = RawFlags >> 4; | |||
888 | bool NotEligibleToImport = (RawFlags & 0x1) || Version < 3; | |||
889 | // The Live flag wasn't introduced until version 3. For dead stripping | |||
890 | // to work correctly on earlier versions, we must conservatively treat all | |||
891 | // values as live. | |||
892 | bool Live = (RawFlags & 0x2) || Version < 3; | |||
893 | bool Local = (RawFlags & 0x4); | |||
894 | ||||
895 | return GlobalValueSummary::GVFlags(Linkage, NotEligibleToImport, Live, Local); | |||
896 | } | |||
897 | ||||
898 | static GlobalValue::VisibilityTypes getDecodedVisibility(unsigned Val) { | |||
899 | switch (Val) { | |||
900 | default: // Map unknown visibilities to default. | |||
901 | case 0: return GlobalValue::DefaultVisibility; | |||
902 | case 1: return GlobalValue::HiddenVisibility; | |||
903 | case 2: return GlobalValue::ProtectedVisibility; | |||
904 | } | |||
905 | } | |||
906 | ||||
907 | static GlobalValue::DLLStorageClassTypes | |||
908 | getDecodedDLLStorageClass(unsigned Val) { | |||
909 | switch (Val) { | |||
910 | default: // Map unknown values to default. | |||
911 | case 0: return GlobalValue::DefaultStorageClass; | |||
912 | case 1: return GlobalValue::DLLImportStorageClass; | |||
913 | case 2: return GlobalValue::DLLExportStorageClass; | |||
914 | } | |||
915 | } | |||
916 | ||||
917 | static bool getDecodedDSOLocal(unsigned Val) { | |||
918 | switch(Val) { | |||
919 | default: // Map unknown values to preemptable. | |||
920 | case 0: return false; | |||
921 | case 1: return true; | |||
922 | } | |||
923 | } | |||
924 | ||||
925 | static GlobalVariable::ThreadLocalMode getDecodedThreadLocalMode(unsigned Val) { | |||
926 | switch (Val) { | |||
927 | case 0: return GlobalVariable::NotThreadLocal; | |||
928 | default: // Map unknown non-zero value to general dynamic. | |||
929 | case 1: return GlobalVariable::GeneralDynamicTLSModel; | |||
930 | case 2: return GlobalVariable::LocalDynamicTLSModel; | |||
931 | case 3: return GlobalVariable::InitialExecTLSModel; | |||
932 | case 4: return GlobalVariable::LocalExecTLSModel; | |||
933 | } | |||
934 | } | |||
935 | ||||
936 | static GlobalVariable::UnnamedAddr getDecodedUnnamedAddrType(unsigned Val) { | |||
937 | switch (Val) { | |||
938 | default: // Map unknown to UnnamedAddr::None. | |||
939 | case 0: return GlobalVariable::UnnamedAddr::None; | |||
940 | case 1: return GlobalVariable::UnnamedAddr::Global; | |||
941 | case 2: return GlobalVariable::UnnamedAddr::Local; | |||
942 | } | |||
943 | } | |||
944 | ||||
945 | static int getDecodedCastOpcode(unsigned Val) { | |||
946 | switch (Val) { | |||
947 | default: return -1; | |||
948 | case bitc::CAST_TRUNC : return Instruction::Trunc; | |||
949 | case bitc::CAST_ZEXT : return Instruction::ZExt; | |||
950 | case bitc::CAST_SEXT : return Instruction::SExt; | |||
951 | case bitc::CAST_FPTOUI : return Instruction::FPToUI; | |||
952 | case bitc::CAST_FPTOSI : return Instruction::FPToSI; | |||
953 | case bitc::CAST_UITOFP : return Instruction::UIToFP; | |||
954 | case bitc::CAST_SITOFP : return Instruction::SIToFP; | |||
955 | case bitc::CAST_FPTRUNC : return Instruction::FPTrunc; | |||
956 | case bitc::CAST_FPEXT : return Instruction::FPExt; | |||
957 | case bitc::CAST_PTRTOINT: return Instruction::PtrToInt; | |||
958 | case bitc::CAST_INTTOPTR: return Instruction::IntToPtr; | |||
959 | case bitc::CAST_BITCAST : return Instruction::BitCast; | |||
960 | case bitc::CAST_ADDRSPACECAST: return Instruction::AddrSpaceCast; | |||
961 | } | |||
962 | } | |||
963 | ||||
964 | static int getDecodedBinaryOpcode(unsigned Val, Type *Ty) { | |||
965 | bool IsFP = Ty->isFPOrFPVectorTy(); | |||
966 | // BinOps are only valid for int/fp or vector of int/fp types | |||
967 | if (!IsFP && !Ty->isIntOrIntVectorTy()) | |||
968 | return -1; | |||
969 | ||||
970 | switch (Val) { | |||
971 | default: | |||
972 | return -1; | |||
973 | case bitc::BINOP_ADD: | |||
974 | return IsFP ? Instruction::FAdd : Instruction::Add; | |||
975 | case bitc::BINOP_SUB: | |||
976 | return IsFP ? Instruction::FSub : Instruction::Sub; | |||
977 | case bitc::BINOP_MUL: | |||
978 | return IsFP ? Instruction::FMul : Instruction::Mul; | |||
979 | case bitc::BINOP_UDIV: | |||
980 | return IsFP ? -1 : Instruction::UDiv; | |||
981 | case bitc::BINOP_SDIV: | |||
982 | return IsFP ? Instruction::FDiv : Instruction::SDiv; | |||
983 | case bitc::BINOP_UREM: | |||
984 | return IsFP ? -1 : Instruction::URem; | |||
985 | case bitc::BINOP_SREM: | |||
986 | return IsFP ? Instruction::FRem : Instruction::SRem; | |||
987 | case bitc::BINOP_SHL: | |||
988 | return IsFP ? -1 : Instruction::Shl; | |||
989 | case bitc::BINOP_LSHR: | |||
990 | return IsFP ? -1 : Instruction::LShr; | |||
991 | case bitc::BINOP_ASHR: | |||
992 | return IsFP ? -1 : Instruction::AShr; | |||
993 | case bitc::BINOP_AND: | |||
994 | return IsFP ? -1 : Instruction::And; | |||
995 | case bitc::BINOP_OR: | |||
996 | return IsFP ? -1 : Instruction::Or; | |||
997 | case bitc::BINOP_XOR: | |||
998 | return IsFP ? -1 : Instruction::Xor; | |||
999 | } | |||
1000 | } | |||
1001 | ||||
1002 | static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) { | |||
1003 | switch (Val) { | |||
1004 | default: return AtomicRMWInst::BAD_BINOP; | |||
1005 | case bitc::RMW_XCHG: return AtomicRMWInst::Xchg; | |||
1006 | case bitc::RMW_ADD: return AtomicRMWInst::Add; | |||
1007 | case bitc::RMW_SUB: return AtomicRMWInst::Sub; | |||
1008 | case bitc::RMW_AND: return AtomicRMWInst::And; | |||
1009 | case bitc::RMW_NAND: return AtomicRMWInst::Nand; | |||
1010 | case bitc::RMW_OR: return AtomicRMWInst::Or; | |||
1011 | case bitc::RMW_XOR: return AtomicRMWInst::Xor; | |||
1012 | case bitc::RMW_MAX: return AtomicRMWInst::Max; | |||
1013 | case bitc::RMW_MIN: return AtomicRMWInst::Min; | |||
1014 | case bitc::RMW_UMAX: return AtomicRMWInst::UMax; | |||
1015 | case bitc::RMW_UMIN: return AtomicRMWInst::UMin; | |||
1016 | } | |||
1017 | } | |||
1018 | ||||
1019 | static AtomicOrdering getDecodedOrdering(unsigned Val) { | |||
1020 | switch (Val) { | |||
1021 | case bitc::ORDERING_NOTATOMIC: return AtomicOrdering::NotAtomic; | |||
1022 | case bitc::ORDERING_UNORDERED: return AtomicOrdering::Unordered; | |||
1023 | case bitc::ORDERING_MONOTONIC: return AtomicOrdering::Monotonic; | |||
1024 | case bitc::ORDERING_ACQUIRE: return AtomicOrdering::Acquire; | |||
1025 | case bitc::ORDERING_RELEASE: return AtomicOrdering::Release; | |||
1026 | case bitc::ORDERING_ACQREL: return AtomicOrdering::AcquireRelease; | |||
1027 | default: // Map unknown orderings to sequentially-consistent. | |||
1028 | case bitc::ORDERING_SEQCST: return AtomicOrdering::SequentiallyConsistent; | |||
1029 | } | |||
1030 | } | |||
1031 | ||||
1032 | static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) { | |||
1033 | switch (Val) { | |||
1034 | default: // Map unknown selection kinds to any. | |||
1035 | case bitc::COMDAT_SELECTION_KIND_ANY: | |||
1036 | return Comdat::Any; | |||
1037 | case bitc::COMDAT_SELECTION_KIND_EXACT_MATCH: | |||
1038 | return Comdat::ExactMatch; | |||
1039 | case bitc::COMDAT_SELECTION_KIND_LARGEST: | |||
1040 | return Comdat::Largest; | |||
1041 | case bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES: | |||
1042 | return Comdat::NoDuplicates; | |||
1043 | case bitc::COMDAT_SELECTION_KIND_SAME_SIZE: | |||
1044 | return Comdat::SameSize; | |||
1045 | } | |||
1046 | } | |||
1047 | ||||
1048 | static FastMathFlags getDecodedFastMathFlags(unsigned Val) { | |||
1049 | FastMathFlags FMF; | |||
1050 | if (0 != (Val & bitc::UnsafeAlgebra)) | |||
1051 | FMF.setFast(); | |||
1052 | if (0 != (Val & bitc::AllowReassoc)) | |||
1053 | FMF.setAllowReassoc(); | |||
1054 | if (0 != (Val & bitc::NoNaNs)) | |||
1055 | FMF.setNoNaNs(); | |||
1056 | if (0 != (Val & bitc::NoInfs)) | |||
1057 | FMF.setNoInfs(); | |||
1058 | if (0 != (Val & bitc::NoSignedZeros)) | |||
1059 | FMF.setNoSignedZeros(); | |||
1060 | if (0 != (Val & bitc::AllowReciprocal)) | |||
1061 | FMF.setAllowReciprocal(); | |||
1062 | if (0 != (Val & bitc::AllowContract)) | |||
1063 | FMF.setAllowContract(true); | |||
1064 | if (0 != (Val & bitc::ApproxFunc)) | |||
1065 | FMF.setApproxFunc(); | |||
1066 | return FMF; | |||
1067 | } | |||
1068 | ||||
1069 | static void upgradeDLLImportExportLinkage(GlobalValue *GV, unsigned Val) { | |||
1070 | switch (Val) { | |||
1071 | case 5: GV->setDLLStorageClass(GlobalValue::DLLImportStorageClass); break; | |||
1072 | case 6: GV->setDLLStorageClass(GlobalValue::DLLExportStorageClass); break; | |||
1073 | } | |||
1074 | } | |||
1075 | ||||
1076 | Type *BitcodeReader::getTypeByID(unsigned ID) { | |||
1077 | // The type table size is always specified correctly. | |||
1078 | if (ID >= TypeList.size()) | |||
1079 | return nullptr; | |||
1080 | ||||
1081 | if (Type *Ty = TypeList[ID]) | |||
1082 | return Ty; | |||
1083 | ||||
1084 | // If we have a forward reference, the only possible case is when it is to a | |||
1085 | // named struct. Just create a placeholder for now. | |||
1086 | return TypeList[ID] = createIdentifiedStructType(Context); | |||
1087 | } | |||
1088 | ||||
1089 | StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context, | |||
1090 | StringRef Name) { | |||
1091 | auto *Ret = StructType::create(Context, Name); | |||
1092 | IdentifiedStructTypes.push_back(Ret); | |||
1093 | return Ret; | |||
1094 | } | |||
1095 | ||||
1096 | StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context) { | |||
1097 | auto *Ret = StructType::create(Context); | |||
1098 | IdentifiedStructTypes.push_back(Ret); | |||
1099 | return Ret; | |||
1100 | } | |||
1101 | ||||
1102 | //===----------------------------------------------------------------------===// | |||
1103 | // Functions for parsing blocks from the bitcode file | |||
1104 | //===----------------------------------------------------------------------===// | |||
1105 | ||||
1106 | static uint64_t getRawAttributeMask(Attribute::AttrKind Val) { | |||
1107 | switch (Val) { | |||
1108 | case Attribute::EndAttrKinds: | |||
1109 | llvm_unreachable("Synthetic enumerators which should never get here")::llvm::llvm_unreachable_internal("Synthetic enumerators which should never get here" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1109); | |||
1110 | ||||
1111 | case Attribute::None: return 0; | |||
1112 | case Attribute::ZExt: return 1 << 0; | |||
1113 | case Attribute::SExt: return 1 << 1; | |||
1114 | case Attribute::NoReturn: return 1 << 2; | |||
1115 | case Attribute::InReg: return 1 << 3; | |||
1116 | case Attribute::StructRet: return 1 << 4; | |||
1117 | case Attribute::NoUnwind: return 1 << 5; | |||
1118 | case Attribute::NoAlias: return 1 << 6; | |||
1119 | case Attribute::ByVal: return 1 << 7; | |||
1120 | case Attribute::Nest: return 1 << 8; | |||
1121 | case Attribute::ReadNone: return 1 << 9; | |||
1122 | case Attribute::ReadOnly: return 1 << 10; | |||
1123 | case Attribute::NoInline: return 1 << 11; | |||
1124 | case Attribute::AlwaysInline: return 1 << 12; | |||
1125 | case Attribute::OptimizeForSize: return 1 << 13; | |||
1126 | case Attribute::StackProtect: return 1 << 14; | |||
1127 | case Attribute::StackProtectReq: return 1 << 15; | |||
1128 | case Attribute::Alignment: return 31 << 16; | |||
1129 | case Attribute::NoCapture: return 1 << 21; | |||
1130 | case Attribute::NoRedZone: return 1 << 22; | |||
1131 | case Attribute::NoImplicitFloat: return 1 << 23; | |||
1132 | case Attribute::Naked: return 1 << 24; | |||
1133 | case Attribute::InlineHint: return 1 << 25; | |||
1134 | case Attribute::StackAlignment: return 7 << 26; | |||
1135 | case Attribute::ReturnsTwice: return 1 << 29; | |||
1136 | case Attribute::UWTable: return 1 << 30; | |||
1137 | case Attribute::NonLazyBind: return 1U << 31; | |||
1138 | case Attribute::SanitizeAddress: return 1ULL << 32; | |||
1139 | case Attribute::MinSize: return 1ULL << 33; | |||
1140 | case Attribute::NoDuplicate: return 1ULL << 34; | |||
1141 | case Attribute::StackProtectStrong: return 1ULL << 35; | |||
1142 | case Attribute::SanitizeThread: return 1ULL << 36; | |||
1143 | case Attribute::SanitizeMemory: return 1ULL << 37; | |||
1144 | case Attribute::NoBuiltin: return 1ULL << 38; | |||
1145 | case Attribute::Returned: return 1ULL << 39; | |||
1146 | case Attribute::Cold: return 1ULL << 40; | |||
1147 | case Attribute::Builtin: return 1ULL << 41; | |||
1148 | case Attribute::OptimizeNone: return 1ULL << 42; | |||
1149 | case Attribute::InAlloca: return 1ULL << 43; | |||
1150 | case Attribute::NonNull: return 1ULL << 44; | |||
1151 | case Attribute::JumpTable: return 1ULL << 45; | |||
1152 | case Attribute::Convergent: return 1ULL << 46; | |||
1153 | case Attribute::SafeStack: return 1ULL << 47; | |||
1154 | case Attribute::NoRecurse: return 1ULL << 48; | |||
1155 | case Attribute::InaccessibleMemOnly: return 1ULL << 49; | |||
1156 | case Attribute::InaccessibleMemOrArgMemOnly: return 1ULL << 50; | |||
1157 | case Attribute::SwiftSelf: return 1ULL << 51; | |||
1158 | case Attribute::SwiftError: return 1ULL << 52; | |||
1159 | case Attribute::WriteOnly: return 1ULL << 53; | |||
1160 | case Attribute::Speculatable: return 1ULL << 54; | |||
1161 | case Attribute::StrictFP: return 1ULL << 55; | |||
1162 | case Attribute::SanitizeHWAddress: return 1ULL << 56; | |||
1163 | case Attribute::Dereferenceable: | |||
1164 | llvm_unreachable("dereferenceable attribute not supported in raw format")::llvm::llvm_unreachable_internal("dereferenceable attribute not supported in raw format" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1164); | |||
1165 | break; | |||
1166 | case Attribute::DereferenceableOrNull: | |||
1167 | llvm_unreachable("dereferenceable_or_null attribute not supported in raw "::llvm::llvm_unreachable_internal("dereferenceable_or_null attribute not supported in raw " "format", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1168) | |||
1168 | "format")::llvm::llvm_unreachable_internal("dereferenceable_or_null attribute not supported in raw " "format", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1168); | |||
1169 | break; | |||
1170 | case Attribute::ArgMemOnly: | |||
1171 | llvm_unreachable("argmemonly attribute not supported in raw format")::llvm::llvm_unreachable_internal("argmemonly attribute not supported in raw format" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1171); | |||
1172 | break; | |||
1173 | case Attribute::AllocSize: | |||
1174 | llvm_unreachable("allocsize not supported in raw format")::llvm::llvm_unreachable_internal("allocsize not supported in raw format" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1174); | |||
1175 | break; | |||
1176 | } | |||
1177 | llvm_unreachable("Unsupported attribute type")::llvm::llvm_unreachable_internal("Unsupported attribute type" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1177); | |||
1178 | } | |||
1179 | ||||
1180 | static void addRawAttributeValue(AttrBuilder &B, uint64_t Val) { | |||
1181 | if (!Val) return; | |||
1182 | ||||
1183 | for (Attribute::AttrKind I = Attribute::None; I != Attribute::EndAttrKinds; | |||
1184 | I = Attribute::AttrKind(I + 1)) { | |||
1185 | if (I == Attribute::Dereferenceable || | |||
1186 | I == Attribute::DereferenceableOrNull || | |||
1187 | I == Attribute::ArgMemOnly || | |||
1188 | I == Attribute::AllocSize) | |||
1189 | continue; | |||
1190 | if (uint64_t A = (Val & getRawAttributeMask(I))) { | |||
1191 | if (I == Attribute::Alignment) | |||
1192 | B.addAlignmentAttr(1ULL << ((A >> 16) - 1)); | |||
1193 | else if (I == Attribute::StackAlignment) | |||
1194 | B.addStackAlignmentAttr(1ULL << ((A >> 26)-1)); | |||
1195 | else | |||
1196 | B.addAttribute(I); | |||
1197 | } | |||
1198 | } | |||
1199 | } | |||
1200 | ||||
1201 | /// \brief This fills an AttrBuilder object with the LLVM attributes that have | |||
1202 | /// been decoded from the given integer. This function must stay in sync with | |||
1203 | /// 'encodeLLVMAttributesForBitcode'. | |||
1204 | static void decodeLLVMAttributesForBitcode(AttrBuilder &B, | |||
1205 | uint64_t EncodedAttrs) { | |||
1206 | // FIXME: Remove in 4.0. | |||
1207 | ||||
1208 | // The alignment is stored as a 16-bit raw value from bits 31--16. We shift | |||
1209 | // the bits above 31 down by 11 bits. | |||
1210 | unsigned Alignment = (EncodedAttrs & (0xffffULL << 16)) >> 16; | |||
1211 | assert((!Alignment || isPowerOf2_32(Alignment)) &&(static_cast <bool> ((!Alignment || isPowerOf2_32(Alignment )) && "Alignment must be a power of two.") ? void (0) : __assert_fail ("(!Alignment || isPowerOf2_32(Alignment)) && \"Alignment must be a power of two.\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1212, __extension__ __PRETTY_FUNCTION__)) | |||
1212 | "Alignment must be a power of two.")(static_cast <bool> ((!Alignment || isPowerOf2_32(Alignment )) && "Alignment must be a power of two.") ? void (0) : __assert_fail ("(!Alignment || isPowerOf2_32(Alignment)) && \"Alignment must be a power of two.\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1212, __extension__ __PRETTY_FUNCTION__)); | |||
1213 | ||||
1214 | if (Alignment) | |||
1215 | B.addAlignmentAttr(Alignment); | |||
1216 | addRawAttributeValue(B, ((EncodedAttrs & (0xfffffULL << 32)) >> 11) | | |||
1217 | (EncodedAttrs & 0xffff)); | |||
1218 | } | |||
1219 | ||||
1220 | Error BitcodeReader::parseAttributeBlock() { | |||
1221 | if (Stream.EnterSubBlock(bitc::PARAMATTR_BLOCK_ID)) | |||
1222 | return error("Invalid record"); | |||
1223 | ||||
1224 | if (!MAttributes.empty()) | |||
1225 | return error("Invalid multiple blocks"); | |||
1226 | ||||
1227 | SmallVector<uint64_t, 64> Record; | |||
1228 | ||||
1229 | SmallVector<AttributeList, 8> Attrs; | |||
1230 | ||||
1231 | // Read all the records. | |||
1232 | while (true) { | |||
1233 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
1234 | ||||
1235 | switch (Entry.Kind) { | |||
1236 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
1237 | case BitstreamEntry::Error: | |||
1238 | return error("Malformed block"); | |||
1239 | case BitstreamEntry::EndBlock: | |||
1240 | return Error::success(); | |||
1241 | case BitstreamEntry::Record: | |||
1242 | // The interesting case. | |||
1243 | break; | |||
1244 | } | |||
1245 | ||||
1246 | // Read a record. | |||
1247 | Record.clear(); | |||
1248 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
1249 | default: // Default behavior: ignore. | |||
1250 | break; | |||
1251 | case bitc::PARAMATTR_CODE_ENTRY_OLD: // ENTRY: [paramidx0, attr0, ...] | |||
1252 | // FIXME: Remove in 4.0. | |||
1253 | if (Record.size() & 1) | |||
1254 | return error("Invalid record"); | |||
1255 | ||||
1256 | for (unsigned i = 0, e = Record.size(); i != e; i += 2) { | |||
1257 | AttrBuilder B; | |||
1258 | decodeLLVMAttributesForBitcode(B, Record[i+1]); | |||
1259 | Attrs.push_back(AttributeList::get(Context, Record[i], B)); | |||
1260 | } | |||
1261 | ||||
1262 | MAttributes.push_back(AttributeList::get(Context, Attrs)); | |||
1263 | Attrs.clear(); | |||
1264 | break; | |||
1265 | case bitc::PARAMATTR_CODE_ENTRY: // ENTRY: [attrgrp0, attrgrp1, ...] | |||
1266 | for (unsigned i = 0, e = Record.size(); i != e; ++i) | |||
1267 | Attrs.push_back(MAttributeGroups[Record[i]]); | |||
1268 | ||||
1269 | MAttributes.push_back(AttributeList::get(Context, Attrs)); | |||
1270 | Attrs.clear(); | |||
1271 | break; | |||
1272 | } | |||
1273 | } | |||
1274 | } | |||
1275 | ||||
1276 | // Returns Attribute::None on unrecognized codes. | |||
1277 | static Attribute::AttrKind getAttrFromCode(uint64_t Code) { | |||
1278 | switch (Code) { | |||
1279 | default: | |||
1280 | return Attribute::None; | |||
1281 | case bitc::ATTR_KIND_ALIGNMENT: | |||
1282 | return Attribute::Alignment; | |||
1283 | case bitc::ATTR_KIND_ALWAYS_INLINE: | |||
1284 | return Attribute::AlwaysInline; | |||
1285 | case bitc::ATTR_KIND_ARGMEMONLY: | |||
1286 | return Attribute::ArgMemOnly; | |||
1287 | case bitc::ATTR_KIND_BUILTIN: | |||
1288 | return Attribute::Builtin; | |||
1289 | case bitc::ATTR_KIND_BY_VAL: | |||
1290 | return Attribute::ByVal; | |||
1291 | case bitc::ATTR_KIND_IN_ALLOCA: | |||
1292 | return Attribute::InAlloca; | |||
1293 | case bitc::ATTR_KIND_COLD: | |||
1294 | return Attribute::Cold; | |||
1295 | case bitc::ATTR_KIND_CONVERGENT: | |||
1296 | return Attribute::Convergent; | |||
1297 | case bitc::ATTR_KIND_INACCESSIBLEMEM_ONLY: | |||
1298 | return Attribute::InaccessibleMemOnly; | |||
1299 | case bitc::ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY: | |||
1300 | return Attribute::InaccessibleMemOrArgMemOnly; | |||
1301 | case bitc::ATTR_KIND_INLINE_HINT: | |||
1302 | return Attribute::InlineHint; | |||
1303 | case bitc::ATTR_KIND_IN_REG: | |||
1304 | return Attribute::InReg; | |||
1305 | case bitc::ATTR_KIND_JUMP_TABLE: | |||
1306 | return Attribute::JumpTable; | |||
1307 | case bitc::ATTR_KIND_MIN_SIZE: | |||
1308 | return Attribute::MinSize; | |||
1309 | case bitc::ATTR_KIND_NAKED: | |||
1310 | return Attribute::Naked; | |||
1311 | case bitc::ATTR_KIND_NEST: | |||
1312 | return Attribute::Nest; | |||
1313 | case bitc::ATTR_KIND_NO_ALIAS: | |||
1314 | return Attribute::NoAlias; | |||
1315 | case bitc::ATTR_KIND_NO_BUILTIN: | |||
1316 | return Attribute::NoBuiltin; | |||
1317 | case bitc::ATTR_KIND_NO_CAPTURE: | |||
1318 | return Attribute::NoCapture; | |||
1319 | case bitc::ATTR_KIND_NO_DUPLICATE: | |||
1320 | return Attribute::NoDuplicate; | |||
1321 | case bitc::ATTR_KIND_NO_IMPLICIT_FLOAT: | |||
1322 | return Attribute::NoImplicitFloat; | |||
1323 | case bitc::ATTR_KIND_NO_INLINE: | |||
1324 | return Attribute::NoInline; | |||
1325 | case bitc::ATTR_KIND_NO_RECURSE: | |||
1326 | return Attribute::NoRecurse; | |||
1327 | case bitc::ATTR_KIND_NON_LAZY_BIND: | |||
1328 | return Attribute::NonLazyBind; | |||
1329 | case bitc::ATTR_KIND_NON_NULL: | |||
1330 | return Attribute::NonNull; | |||
1331 | case bitc::ATTR_KIND_DEREFERENCEABLE: | |||
1332 | return Attribute::Dereferenceable; | |||
1333 | case bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL: | |||
1334 | return Attribute::DereferenceableOrNull; | |||
1335 | case bitc::ATTR_KIND_ALLOC_SIZE: | |||
1336 | return Attribute::AllocSize; | |||
1337 | case bitc::ATTR_KIND_NO_RED_ZONE: | |||
1338 | return Attribute::NoRedZone; | |||
1339 | case bitc::ATTR_KIND_NO_RETURN: | |||
1340 | return Attribute::NoReturn; | |||
1341 | case bitc::ATTR_KIND_NO_UNWIND: | |||
1342 | return Attribute::NoUnwind; | |||
1343 | case bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE: | |||
1344 | return Attribute::OptimizeForSize; | |||
1345 | case bitc::ATTR_KIND_OPTIMIZE_NONE: | |||
1346 | return Attribute::OptimizeNone; | |||
1347 | case bitc::ATTR_KIND_READ_NONE: | |||
1348 | return Attribute::ReadNone; | |||
1349 | case bitc::ATTR_KIND_READ_ONLY: | |||
1350 | return Attribute::ReadOnly; | |||
1351 | case bitc::ATTR_KIND_RETURNED: | |||
1352 | return Attribute::Returned; | |||
1353 | case bitc::ATTR_KIND_RETURNS_TWICE: | |||
1354 | return Attribute::ReturnsTwice; | |||
1355 | case bitc::ATTR_KIND_S_EXT: | |||
1356 | return Attribute::SExt; | |||
1357 | case bitc::ATTR_KIND_SPECULATABLE: | |||
1358 | return Attribute::Speculatable; | |||
1359 | case bitc::ATTR_KIND_STACK_ALIGNMENT: | |||
1360 | return Attribute::StackAlignment; | |||
1361 | case bitc::ATTR_KIND_STACK_PROTECT: | |||
1362 | return Attribute::StackProtect; | |||
1363 | case bitc::ATTR_KIND_STACK_PROTECT_REQ: | |||
1364 | return Attribute::StackProtectReq; | |||
1365 | case bitc::ATTR_KIND_STACK_PROTECT_STRONG: | |||
1366 | return Attribute::StackProtectStrong; | |||
1367 | case bitc::ATTR_KIND_SAFESTACK: | |||
1368 | return Attribute::SafeStack; | |||
1369 | case bitc::ATTR_KIND_STRICT_FP: | |||
1370 | return Attribute::StrictFP; | |||
1371 | case bitc::ATTR_KIND_STRUCT_RET: | |||
1372 | return Attribute::StructRet; | |||
1373 | case bitc::ATTR_KIND_SANITIZE_ADDRESS: | |||
1374 | return Attribute::SanitizeAddress; | |||
1375 | case bitc::ATTR_KIND_SANITIZE_HWADDRESS: | |||
1376 | return Attribute::SanitizeHWAddress; | |||
1377 | case bitc::ATTR_KIND_SANITIZE_THREAD: | |||
1378 | return Attribute::SanitizeThread; | |||
1379 | case bitc::ATTR_KIND_SANITIZE_MEMORY: | |||
1380 | return Attribute::SanitizeMemory; | |||
1381 | case bitc::ATTR_KIND_SWIFT_ERROR: | |||
1382 | return Attribute::SwiftError; | |||
1383 | case bitc::ATTR_KIND_SWIFT_SELF: | |||
1384 | return Attribute::SwiftSelf; | |||
1385 | case bitc::ATTR_KIND_UW_TABLE: | |||
1386 | return Attribute::UWTable; | |||
1387 | case bitc::ATTR_KIND_WRITEONLY: | |||
1388 | return Attribute::WriteOnly; | |||
1389 | case bitc::ATTR_KIND_Z_EXT: | |||
1390 | return Attribute::ZExt; | |||
1391 | } | |||
1392 | } | |||
1393 | ||||
1394 | Error BitcodeReader::parseAlignmentValue(uint64_t Exponent, | |||
1395 | unsigned &Alignment) { | |||
1396 | // Note: Alignment in bitcode files is incremented by 1, so that zero | |||
1397 | // can be used for default alignment. | |||
1398 | if (Exponent > Value::MaxAlignmentExponent + 1) | |||
1399 | return error("Invalid alignment value"); | |||
1400 | Alignment = (1 << static_cast<unsigned>(Exponent)) >> 1; | |||
1401 | return Error::success(); | |||
1402 | } | |||
1403 | ||||
1404 | Error BitcodeReader::parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind) { | |||
1405 | *Kind = getAttrFromCode(Code); | |||
1406 | if (*Kind == Attribute::None) | |||
1407 | return error("Unknown attribute kind (" + Twine(Code) + ")"); | |||
1408 | return Error::success(); | |||
1409 | } | |||
1410 | ||||
1411 | Error BitcodeReader::parseAttributeGroupBlock() { | |||
1412 | if (Stream.EnterSubBlock(bitc::PARAMATTR_GROUP_BLOCK_ID)) | |||
1413 | return error("Invalid record"); | |||
1414 | ||||
1415 | if (!MAttributeGroups.empty()) | |||
1416 | return error("Invalid multiple blocks"); | |||
1417 | ||||
1418 | SmallVector<uint64_t, 64> Record; | |||
1419 | ||||
1420 | // Read all the records. | |||
1421 | while (true) { | |||
1422 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
1423 | ||||
1424 | switch (Entry.Kind) { | |||
1425 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
1426 | case BitstreamEntry::Error: | |||
1427 | return error("Malformed block"); | |||
1428 | case BitstreamEntry::EndBlock: | |||
1429 | return Error::success(); | |||
1430 | case BitstreamEntry::Record: | |||
1431 | // The interesting case. | |||
1432 | break; | |||
1433 | } | |||
1434 | ||||
1435 | // Read a record. | |||
1436 | Record.clear(); | |||
1437 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
1438 | default: // Default behavior: ignore. | |||
1439 | break; | |||
1440 | case bitc::PARAMATTR_GRP_CODE_ENTRY: { // ENTRY: [grpid, idx, a0, a1, ...] | |||
1441 | if (Record.size() < 3) | |||
1442 | return error("Invalid record"); | |||
1443 | ||||
1444 | uint64_t GrpID = Record[0]; | |||
1445 | uint64_t Idx = Record[1]; // Index of the object this attribute refers to. | |||
1446 | ||||
1447 | AttrBuilder B; | |||
1448 | for (unsigned i = 2, e = Record.size(); i != e; ++i) { | |||
1449 | if (Record[i] == 0) { // Enum attribute | |||
1450 | Attribute::AttrKind Kind; | |||
1451 | if (Error Err = parseAttrKind(Record[++i], &Kind)) | |||
1452 | return Err; | |||
1453 | ||||
1454 | B.addAttribute(Kind); | |||
1455 | } else if (Record[i] == 1) { // Integer attribute | |||
1456 | Attribute::AttrKind Kind; | |||
1457 | if (Error Err = parseAttrKind(Record[++i], &Kind)) | |||
1458 | return Err; | |||
1459 | if (Kind == Attribute::Alignment) | |||
1460 | B.addAlignmentAttr(Record[++i]); | |||
1461 | else if (Kind == Attribute::StackAlignment) | |||
1462 | B.addStackAlignmentAttr(Record[++i]); | |||
1463 | else if (Kind == Attribute::Dereferenceable) | |||
1464 | B.addDereferenceableAttr(Record[++i]); | |||
1465 | else if (Kind == Attribute::DereferenceableOrNull) | |||
1466 | B.addDereferenceableOrNullAttr(Record[++i]); | |||
1467 | else if (Kind == Attribute::AllocSize) | |||
1468 | B.addAllocSizeAttrFromRawRepr(Record[++i]); | |||
1469 | } else { // String attribute | |||
1470 | assert((Record[i] == 3 || Record[i] == 4) &&(static_cast <bool> ((Record[i] == 3 || Record[i] == 4) && "Invalid attribute group entry") ? void (0) : __assert_fail ("(Record[i] == 3 || Record[i] == 4) && \"Invalid attribute group entry\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1471, __extension__ __PRETTY_FUNCTION__)) | |||
1471 | "Invalid attribute group entry")(static_cast <bool> ((Record[i] == 3 || Record[i] == 4) && "Invalid attribute group entry") ? void (0) : __assert_fail ("(Record[i] == 3 || Record[i] == 4) && \"Invalid attribute group entry\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1471, __extension__ __PRETTY_FUNCTION__)); | |||
1472 | bool HasValue = (Record[i++] == 4); | |||
1473 | SmallString<64> KindStr; | |||
1474 | SmallString<64> ValStr; | |||
1475 | ||||
1476 | while (Record[i] != 0 && i != e) | |||
1477 | KindStr += Record[i++]; | |||
1478 | assert(Record[i] == 0 && "Kind string not null terminated")(static_cast <bool> (Record[i] == 0 && "Kind string not null terminated" ) ? void (0) : __assert_fail ("Record[i] == 0 && \"Kind string not null terminated\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1478, __extension__ __PRETTY_FUNCTION__)); | |||
1479 | ||||
1480 | if (HasValue) { | |||
1481 | // Has a value associated with it. | |||
1482 | ++i; // Skip the '0' that terminates the "kind" string. | |||
1483 | while (Record[i] != 0 && i != e) | |||
1484 | ValStr += Record[i++]; | |||
1485 | assert(Record[i] == 0 && "Value string not null terminated")(static_cast <bool> (Record[i] == 0 && "Value string not null terminated" ) ? void (0) : __assert_fail ("Record[i] == 0 && \"Value string not null terminated\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1485, __extension__ __PRETTY_FUNCTION__)); | |||
1486 | } | |||
1487 | ||||
1488 | B.addAttribute(KindStr.str(), ValStr.str()); | |||
1489 | } | |||
1490 | } | |||
1491 | ||||
1492 | MAttributeGroups[GrpID] = AttributeList::get(Context, Idx, B); | |||
1493 | break; | |||
1494 | } | |||
1495 | } | |||
1496 | } | |||
1497 | } | |||
1498 | ||||
1499 | Error BitcodeReader::parseTypeTable() { | |||
1500 | if (Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID_NEW)) | |||
1501 | return error("Invalid record"); | |||
1502 | ||||
1503 | return parseTypeTableBody(); | |||
1504 | } | |||
1505 | ||||
1506 | Error BitcodeReader::parseTypeTableBody() { | |||
1507 | if (!TypeList.empty()) | |||
1508 | return error("Invalid multiple blocks"); | |||
1509 | ||||
1510 | SmallVector<uint64_t, 64> Record; | |||
1511 | unsigned NumRecords = 0; | |||
1512 | ||||
1513 | SmallString<64> TypeName; | |||
1514 | ||||
1515 | // Read all the records for this type table. | |||
1516 | while (true) { | |||
1517 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
1518 | ||||
1519 | switch (Entry.Kind) { | |||
1520 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
1521 | case BitstreamEntry::Error: | |||
1522 | return error("Malformed block"); | |||
1523 | case BitstreamEntry::EndBlock: | |||
1524 | if (NumRecords != TypeList.size()) | |||
1525 | return error("Malformed block"); | |||
1526 | return Error::success(); | |||
1527 | case BitstreamEntry::Record: | |||
1528 | // The interesting case. | |||
1529 | break; | |||
1530 | } | |||
1531 | ||||
1532 | // Read a record. | |||
1533 | Record.clear(); | |||
1534 | Type *ResultTy = nullptr; | |||
1535 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
1536 | default: | |||
1537 | return error("Invalid value"); | |||
1538 | case bitc::TYPE_CODE_NUMENTRY: // TYPE_CODE_NUMENTRY: [numentries] | |||
1539 | // TYPE_CODE_NUMENTRY contains a count of the number of types in the | |||
1540 | // type list. This allows us to reserve space. | |||
1541 | if (Record.size() < 1) | |||
1542 | return error("Invalid record"); | |||
1543 | TypeList.resize(Record[0]); | |||
1544 | continue; | |||
1545 | case bitc::TYPE_CODE_VOID: // VOID | |||
1546 | ResultTy = Type::getVoidTy(Context); | |||
1547 | break; | |||
1548 | case bitc::TYPE_CODE_HALF: // HALF | |||
1549 | ResultTy = Type::getHalfTy(Context); | |||
1550 | break; | |||
1551 | case bitc::TYPE_CODE_FLOAT: // FLOAT | |||
1552 | ResultTy = Type::getFloatTy(Context); | |||
1553 | break; | |||
1554 | case bitc::TYPE_CODE_DOUBLE: // DOUBLE | |||
1555 | ResultTy = Type::getDoubleTy(Context); | |||
1556 | break; | |||
1557 | case bitc::TYPE_CODE_X86_FP80: // X86_FP80 | |||
1558 | ResultTy = Type::getX86_FP80Ty(Context); | |||
1559 | break; | |||
1560 | case bitc::TYPE_CODE_FP128: // FP128 | |||
1561 | ResultTy = Type::getFP128Ty(Context); | |||
1562 | break; | |||
1563 | case bitc::TYPE_CODE_PPC_FP128: // PPC_FP128 | |||
1564 | ResultTy = Type::getPPC_FP128Ty(Context); | |||
1565 | break; | |||
1566 | case bitc::TYPE_CODE_LABEL: // LABEL | |||
1567 | ResultTy = Type::getLabelTy(Context); | |||
1568 | break; | |||
1569 | case bitc::TYPE_CODE_METADATA: // METADATA | |||
1570 | ResultTy = Type::getMetadataTy(Context); | |||
1571 | break; | |||
1572 | case bitc::TYPE_CODE_X86_MMX: // X86_MMX | |||
1573 | ResultTy = Type::getX86_MMXTy(Context); | |||
1574 | break; | |||
1575 | case bitc::TYPE_CODE_TOKEN: // TOKEN | |||
1576 | ResultTy = Type::getTokenTy(Context); | |||
1577 | break; | |||
1578 | case bitc::TYPE_CODE_INTEGER: { // INTEGER: [width] | |||
1579 | if (Record.size() < 1) | |||
1580 | return error("Invalid record"); | |||
1581 | ||||
1582 | uint64_t NumBits = Record[0]; | |||
1583 | if (NumBits < IntegerType::MIN_INT_BITS || | |||
1584 | NumBits > IntegerType::MAX_INT_BITS) | |||
1585 | return error("Bitwidth for integer type out of range"); | |||
1586 | ResultTy = IntegerType::get(Context, NumBits); | |||
1587 | break; | |||
1588 | } | |||
1589 | case bitc::TYPE_CODE_POINTER: { // POINTER: [pointee type] or | |||
1590 | // [pointee type, address space] | |||
1591 | if (Record.size() < 1) | |||
1592 | return error("Invalid record"); | |||
1593 | unsigned AddressSpace = 0; | |||
1594 | if (Record.size() == 2) | |||
1595 | AddressSpace = Record[1]; | |||
1596 | ResultTy = getTypeByID(Record[0]); | |||
1597 | if (!ResultTy || | |||
1598 | !PointerType::isValidElementType(ResultTy)) | |||
1599 | return error("Invalid type"); | |||
1600 | ResultTy = PointerType::get(ResultTy, AddressSpace); | |||
1601 | break; | |||
1602 | } | |||
1603 | case bitc::TYPE_CODE_FUNCTION_OLD: { | |||
1604 | // FIXME: attrid is dead, remove it in LLVM 4.0 | |||
1605 | // FUNCTION: [vararg, attrid, retty, paramty x N] | |||
1606 | if (Record.size() < 3) | |||
1607 | return error("Invalid record"); | |||
1608 | SmallVector<Type*, 8> ArgTys; | |||
1609 | for (unsigned i = 3, e = Record.size(); i != e; ++i) { | |||
1610 | if (Type *T = getTypeByID(Record[i])) | |||
1611 | ArgTys.push_back(T); | |||
1612 | else | |||
1613 | break; | |||
1614 | } | |||
1615 | ||||
1616 | ResultTy = getTypeByID(Record[2]); | |||
1617 | if (!ResultTy || ArgTys.size() < Record.size()-3) | |||
1618 | return error("Invalid type"); | |||
1619 | ||||
1620 | ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]); | |||
1621 | break; | |||
1622 | } | |||
1623 | case bitc::TYPE_CODE_FUNCTION: { | |||
1624 | // FUNCTION: [vararg, retty, paramty x N] | |||
1625 | if (Record.size() < 2) | |||
1626 | return error("Invalid record"); | |||
1627 | SmallVector<Type*, 8> ArgTys; | |||
1628 | for (unsigned i = 2, e = Record.size(); i != e; ++i) { | |||
1629 | if (Type *T = getTypeByID(Record[i])) { | |||
1630 | if (!FunctionType::isValidArgumentType(T)) | |||
1631 | return error("Invalid function argument type"); | |||
1632 | ArgTys.push_back(T); | |||
1633 | } | |||
1634 | else | |||
1635 | break; | |||
1636 | } | |||
1637 | ||||
1638 | ResultTy = getTypeByID(Record[1]); | |||
1639 | if (!ResultTy || ArgTys.size() < Record.size()-2) | |||
1640 | return error("Invalid type"); | |||
1641 | ||||
1642 | ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]); | |||
1643 | break; | |||
1644 | } | |||
1645 | case bitc::TYPE_CODE_STRUCT_ANON: { // STRUCT: [ispacked, eltty x N] | |||
1646 | if (Record.size() < 1) | |||
1647 | return error("Invalid record"); | |||
1648 | SmallVector<Type*, 8> EltTys; | |||
1649 | for (unsigned i = 1, e = Record.size(); i != e; ++i) { | |||
1650 | if (Type *T = getTypeByID(Record[i])) | |||
1651 | EltTys.push_back(T); | |||
1652 | else | |||
1653 | break; | |||
1654 | } | |||
1655 | if (EltTys.size() != Record.size()-1) | |||
1656 | return error("Invalid type"); | |||
1657 | ResultTy = StructType::get(Context, EltTys, Record[0]); | |||
1658 | break; | |||
1659 | } | |||
1660 | case bitc::TYPE_CODE_STRUCT_NAME: // STRUCT_NAME: [strchr x N] | |||
1661 | if (convertToString(Record, 0, TypeName)) | |||
1662 | return error("Invalid record"); | |||
1663 | continue; | |||
1664 | ||||
1665 | case bitc::TYPE_CODE_STRUCT_NAMED: { // STRUCT: [ispacked, eltty x N] | |||
1666 | if (Record.size() < 1) | |||
1667 | return error("Invalid record"); | |||
1668 | ||||
1669 | if (NumRecords >= TypeList.size()) | |||
1670 | return error("Invalid TYPE table"); | |||
1671 | ||||
1672 | // Check to see if this was forward referenced, if so fill in the temp. | |||
1673 | StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]); | |||
1674 | if (Res) { | |||
1675 | Res->setName(TypeName); | |||
1676 | TypeList[NumRecords] = nullptr; | |||
1677 | } else // Otherwise, create a new struct. | |||
1678 | Res = createIdentifiedStructType(Context, TypeName); | |||
1679 | TypeName.clear(); | |||
1680 | ||||
1681 | SmallVector<Type*, 8> EltTys; | |||
1682 | for (unsigned i = 1, e = Record.size(); i != e; ++i) { | |||
1683 | if (Type *T = getTypeByID(Record[i])) | |||
1684 | EltTys.push_back(T); | |||
1685 | else | |||
1686 | break; | |||
1687 | } | |||
1688 | if (EltTys.size() != Record.size()-1) | |||
1689 | return error("Invalid record"); | |||
1690 | Res->setBody(EltTys, Record[0]); | |||
1691 | ResultTy = Res; | |||
1692 | break; | |||
1693 | } | |||
1694 | case bitc::TYPE_CODE_OPAQUE: { // OPAQUE: [] | |||
1695 | if (Record.size() != 1) | |||
1696 | return error("Invalid record"); | |||
1697 | ||||
1698 | if (NumRecords >= TypeList.size()) | |||
1699 | return error("Invalid TYPE table"); | |||
1700 | ||||
1701 | // Check to see if this was forward referenced, if so fill in the temp. | |||
1702 | StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]); | |||
1703 | if (Res) { | |||
1704 | Res->setName(TypeName); | |||
1705 | TypeList[NumRecords] = nullptr; | |||
1706 | } else // Otherwise, create a new struct with no body. | |||
1707 | Res = createIdentifiedStructType(Context, TypeName); | |||
1708 | TypeName.clear(); | |||
1709 | ResultTy = Res; | |||
1710 | break; | |||
1711 | } | |||
1712 | case bitc::TYPE_CODE_ARRAY: // ARRAY: [numelts, eltty] | |||
1713 | if (Record.size() < 2) | |||
1714 | return error("Invalid record"); | |||
1715 | ResultTy = getTypeByID(Record[1]); | |||
1716 | if (!ResultTy || !ArrayType::isValidElementType(ResultTy)) | |||
1717 | return error("Invalid type"); | |||
1718 | ResultTy = ArrayType::get(ResultTy, Record[0]); | |||
1719 | break; | |||
1720 | case bitc::TYPE_CODE_VECTOR: // VECTOR: [numelts, eltty] | |||
1721 | if (Record.size() < 2) | |||
1722 | return error("Invalid record"); | |||
1723 | if (Record[0] == 0) | |||
1724 | return error("Invalid vector length"); | |||
1725 | ResultTy = getTypeByID(Record[1]); | |||
1726 | if (!ResultTy || !StructType::isValidElementType(ResultTy)) | |||
1727 | return error("Invalid type"); | |||
1728 | ResultTy = VectorType::get(ResultTy, Record[0]); | |||
1729 | break; | |||
1730 | } | |||
1731 | ||||
1732 | if (NumRecords >= TypeList.size()) | |||
1733 | return error("Invalid TYPE table"); | |||
1734 | if (TypeList[NumRecords]) | |||
1735 | return error( | |||
1736 | "Invalid TYPE table: Only named structs can be forward referenced"); | |||
1737 | assert(ResultTy && "Didn't read a type?")(static_cast <bool> (ResultTy && "Didn't read a type?" ) ? void (0) : __assert_fail ("ResultTy && \"Didn't read a type?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1737, __extension__ __PRETTY_FUNCTION__)); | |||
1738 | TypeList[NumRecords++] = ResultTy; | |||
1739 | } | |||
1740 | } | |||
1741 | ||||
1742 | Error BitcodeReader::parseOperandBundleTags() { | |||
1743 | if (Stream.EnterSubBlock(bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID)) | |||
1744 | return error("Invalid record"); | |||
1745 | ||||
1746 | if (!BundleTags.empty()) | |||
1747 | return error("Invalid multiple blocks"); | |||
1748 | ||||
1749 | SmallVector<uint64_t, 64> Record; | |||
1750 | ||||
1751 | while (true) { | |||
1752 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
1753 | ||||
1754 | switch (Entry.Kind) { | |||
1755 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
1756 | case BitstreamEntry::Error: | |||
1757 | return error("Malformed block"); | |||
1758 | case BitstreamEntry::EndBlock: | |||
1759 | return Error::success(); | |||
1760 | case BitstreamEntry::Record: | |||
1761 | // The interesting case. | |||
1762 | break; | |||
1763 | } | |||
1764 | ||||
1765 | // Tags are implicitly mapped to integers by their order. | |||
1766 | ||||
1767 | if (Stream.readRecord(Entry.ID, Record) != bitc::OPERAND_BUNDLE_TAG) | |||
1768 | return error("Invalid record"); | |||
1769 | ||||
1770 | // OPERAND_BUNDLE_TAG: [strchr x N] | |||
1771 | BundleTags.emplace_back(); | |||
1772 | if (convertToString(Record, 0, BundleTags.back())) | |||
1773 | return error("Invalid record"); | |||
1774 | Record.clear(); | |||
1775 | } | |||
1776 | } | |||
1777 | ||||
1778 | Error BitcodeReader::parseSyncScopeNames() { | |||
1779 | if (Stream.EnterSubBlock(bitc::SYNC_SCOPE_NAMES_BLOCK_ID)) | |||
1780 | return error("Invalid record"); | |||
1781 | ||||
1782 | if (!SSIDs.empty()) | |||
1783 | return error("Invalid multiple synchronization scope names blocks"); | |||
1784 | ||||
1785 | SmallVector<uint64_t, 64> Record; | |||
1786 | while (true) { | |||
1787 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
1788 | switch (Entry.Kind) { | |||
1789 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
1790 | case BitstreamEntry::Error: | |||
1791 | return error("Malformed block"); | |||
1792 | case BitstreamEntry::EndBlock: | |||
1793 | if (SSIDs.empty()) | |||
1794 | return error("Invalid empty synchronization scope names block"); | |||
1795 | return Error::success(); | |||
1796 | case BitstreamEntry::Record: | |||
1797 | // The interesting case. | |||
1798 | break; | |||
1799 | } | |||
1800 | ||||
1801 | // Synchronization scope names are implicitly mapped to synchronization | |||
1802 | // scope IDs by their order. | |||
1803 | ||||
1804 | if (Stream.readRecord(Entry.ID, Record) != bitc::SYNC_SCOPE_NAME) | |||
1805 | return error("Invalid record"); | |||
1806 | ||||
1807 | SmallString<16> SSN; | |||
1808 | if (convertToString(Record, 0, SSN)) | |||
1809 | return error("Invalid record"); | |||
1810 | ||||
1811 | SSIDs.push_back(Context.getOrInsertSyncScopeID(SSN)); | |||
1812 | Record.clear(); | |||
1813 | } | |||
1814 | } | |||
1815 | ||||
1816 | /// Associate a value with its name from the given index in the provided record. | |||
1817 | Expected<Value *> BitcodeReader::recordValue(SmallVectorImpl<uint64_t> &Record, | |||
1818 | unsigned NameIndex, Triple &TT) { | |||
1819 | SmallString<128> ValueName; | |||
1820 | if (convertToString(Record, NameIndex, ValueName)) | |||
1821 | return error("Invalid record"); | |||
1822 | unsigned ValueID = Record[0]; | |||
1823 | if (ValueID >= ValueList.size() || !ValueList[ValueID]) | |||
1824 | return error("Invalid record"); | |||
1825 | Value *V = ValueList[ValueID]; | |||
1826 | ||||
1827 | StringRef NameStr(ValueName.data(), ValueName.size()); | |||
1828 | if (NameStr.find_first_of(0) != StringRef::npos) | |||
1829 | return error("Invalid value name"); | |||
1830 | V->setName(NameStr); | |||
1831 | auto *GO = dyn_cast<GlobalObject>(V); | |||
1832 | if (GO) { | |||
1833 | if (GO->getComdat() == reinterpret_cast<Comdat *>(1)) { | |||
1834 | if (TT.supportsCOMDAT()) | |||
1835 | GO->setComdat(TheModule->getOrInsertComdat(V->getName())); | |||
1836 | else | |||
1837 | GO->setComdat(nullptr); | |||
1838 | } | |||
1839 | } | |||
1840 | return V; | |||
1841 | } | |||
1842 | ||||
1843 | /// Helper to note and return the current location, and jump to the given | |||
1844 | /// offset. | |||
1845 | static uint64_t jumpToValueSymbolTable(uint64_t Offset, | |||
1846 | BitstreamCursor &Stream) { | |||
1847 | // Save the current parsing location so we can jump back at the end | |||
1848 | // of the VST read. | |||
1849 | uint64_t CurrentBit = Stream.GetCurrentBitNo(); | |||
1850 | Stream.JumpToBit(Offset * 32); | |||
1851 | #ifndef NDEBUG | |||
1852 | // Do some checking if we are in debug mode. | |||
1853 | BitstreamEntry Entry = Stream.advance(); | |||
1854 | assert(Entry.Kind == BitstreamEntry::SubBlock)(static_cast <bool> (Entry.Kind == BitstreamEntry::SubBlock ) ? void (0) : __assert_fail ("Entry.Kind == BitstreamEntry::SubBlock" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1854, __extension__ __PRETTY_FUNCTION__)); | |||
1855 | assert(Entry.ID == bitc::VALUE_SYMTAB_BLOCK_ID)(static_cast <bool> (Entry.ID == bitc::VALUE_SYMTAB_BLOCK_ID ) ? void (0) : __assert_fail ("Entry.ID == bitc::VALUE_SYMTAB_BLOCK_ID" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 1855, __extension__ __PRETTY_FUNCTION__)); | |||
1856 | #else | |||
1857 | // In NDEBUG mode ignore the output so we don't get an unused variable | |||
1858 | // warning. | |||
1859 | Stream.advance(); | |||
1860 | #endif | |||
1861 | return CurrentBit; | |||
1862 | } | |||
1863 | ||||
1864 | void BitcodeReader::setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta, | |||
1865 | Function *F, | |||
1866 | ArrayRef<uint64_t> Record) { | |||
1867 | // Note that we subtract 1 here because the offset is relative to one word | |||
1868 | // before the start of the identification or module block, which was | |||
1869 | // historically always the start of the regular bitcode header. | |||
1870 | uint64_t FuncWordOffset = Record[1] - 1; | |||
1871 | uint64_t FuncBitOffset = FuncWordOffset * 32; | |||
1872 | DeferredFunctionInfo[F] = FuncBitOffset + FuncBitcodeOffsetDelta; | |||
1873 | // Set the LastFunctionBlockBit to point to the last function block. | |||
1874 | // Later when parsing is resumed after function materialization, | |||
1875 | // we can simply skip that last function block. | |||
1876 | if (FuncBitOffset > LastFunctionBlockBit) | |||
1877 | LastFunctionBlockBit = FuncBitOffset; | |||
1878 | } | |||
1879 | ||||
1880 | /// Read a new-style GlobalValue symbol table. | |||
1881 | Error BitcodeReader::parseGlobalValueSymbolTable() { | |||
1882 | unsigned FuncBitcodeOffsetDelta = | |||
1883 | Stream.getAbbrevIDWidth() + bitc::BlockIDWidth; | |||
1884 | ||||
1885 | if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID)) | |||
1886 | return error("Invalid record"); | |||
1887 | ||||
1888 | SmallVector<uint64_t, 64> Record; | |||
1889 | while (true) { | |||
1890 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
1891 | ||||
1892 | switch (Entry.Kind) { | |||
1893 | case BitstreamEntry::SubBlock: | |||
1894 | case BitstreamEntry::Error: | |||
1895 | return error("Malformed block"); | |||
1896 | case BitstreamEntry::EndBlock: | |||
1897 | return Error::success(); | |||
1898 | case BitstreamEntry::Record: | |||
1899 | break; | |||
1900 | } | |||
1901 | ||||
1902 | Record.clear(); | |||
1903 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
1904 | case bitc::VST_CODE_FNENTRY: // [valueid, offset] | |||
1905 | setDeferredFunctionInfo(FuncBitcodeOffsetDelta, | |||
1906 | cast<Function>(ValueList[Record[0]]), Record); | |||
1907 | break; | |||
1908 | } | |||
1909 | } | |||
1910 | } | |||
1911 | ||||
1912 | /// Parse the value symbol table at either the current parsing location or | |||
1913 | /// at the given bit offset if provided. | |||
1914 | Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) { | |||
1915 | uint64_t CurrentBit; | |||
1916 | // Pass in the Offset to distinguish between calling for the module-level | |||
1917 | // VST (where we want to jump to the VST offset) and the function-level | |||
1918 | // VST (where we don't). | |||
1919 | if (Offset > 0) { | |||
1920 | CurrentBit = jumpToValueSymbolTable(Offset, Stream); | |||
1921 | // If this module uses a string table, read this as a module-level VST. | |||
1922 | if (UseStrtab) { | |||
1923 | if (Error Err = parseGlobalValueSymbolTable()) | |||
1924 | return Err; | |||
1925 | Stream.JumpToBit(CurrentBit); | |||
1926 | return Error::success(); | |||
1927 | } | |||
1928 | // Otherwise, the VST will be in a similar format to a function-level VST, | |||
1929 | // and will contain symbol names. | |||
1930 | } | |||
1931 | ||||
1932 | // Compute the delta between the bitcode indices in the VST (the word offset | |||
1933 | // to the word-aligned ENTER_SUBBLOCK for the function block, and that | |||
1934 | // expected by the lazy reader. The reader's EnterSubBlock expects to have | |||
1935 | // already read the ENTER_SUBBLOCK code (size getAbbrevIDWidth) and BlockID | |||
1936 | // (size BlockIDWidth). Note that we access the stream's AbbrevID width here | |||
1937 | // just before entering the VST subblock because: 1) the EnterSubBlock | |||
1938 | // changes the AbbrevID width; 2) the VST block is nested within the same | |||
1939 | // outer MODULE_BLOCK as the FUNCTION_BLOCKs and therefore have the same | |||
1940 | // AbbrevID width before calling EnterSubBlock; and 3) when we want to | |||
1941 | // jump to the FUNCTION_BLOCK using this offset later, we don't want | |||
1942 | // to rely on the stream's AbbrevID width being that of the MODULE_BLOCK. | |||
1943 | unsigned FuncBitcodeOffsetDelta = | |||
1944 | Stream.getAbbrevIDWidth() + bitc::BlockIDWidth; | |||
1945 | ||||
1946 | if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID)) | |||
1947 | return error("Invalid record"); | |||
1948 | ||||
1949 | SmallVector<uint64_t, 64> Record; | |||
1950 | ||||
1951 | Triple TT(TheModule->getTargetTriple()); | |||
1952 | ||||
1953 | // Read all the records for this value table. | |||
1954 | SmallString<128> ValueName; | |||
1955 | ||||
1956 | while (true) { | |||
1957 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
1958 | ||||
1959 | switch (Entry.Kind) { | |||
1960 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
1961 | case BitstreamEntry::Error: | |||
1962 | return error("Malformed block"); | |||
1963 | case BitstreamEntry::EndBlock: | |||
1964 | if (Offset > 0) | |||
1965 | Stream.JumpToBit(CurrentBit); | |||
1966 | return Error::success(); | |||
1967 | case BitstreamEntry::Record: | |||
1968 | // The interesting case. | |||
1969 | break; | |||
1970 | } | |||
1971 | ||||
1972 | // Read a record. | |||
1973 | Record.clear(); | |||
1974 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
1975 | default: // Default behavior: unknown type. | |||
1976 | break; | |||
1977 | case bitc::VST_CODE_ENTRY: { // VST_CODE_ENTRY: [valueid, namechar x N] | |||
1978 | Expected<Value *> ValOrErr = recordValue(Record, 1, TT); | |||
1979 | if (Error Err = ValOrErr.takeError()) | |||
1980 | return Err; | |||
1981 | ValOrErr.get(); | |||
1982 | break; | |||
1983 | } | |||
1984 | case bitc::VST_CODE_FNENTRY: { | |||
1985 | // VST_CODE_FNENTRY: [valueid, offset, namechar x N] | |||
1986 | Expected<Value *> ValOrErr = recordValue(Record, 2, TT); | |||
1987 | if (Error Err = ValOrErr.takeError()) | |||
1988 | return Err; | |||
1989 | Value *V = ValOrErr.get(); | |||
1990 | ||||
1991 | // Ignore function offsets emitted for aliases of functions in older | |||
1992 | // versions of LLVM. | |||
1993 | if (auto *F = dyn_cast<Function>(V)) | |||
1994 | setDeferredFunctionInfo(FuncBitcodeOffsetDelta, F, Record); | |||
1995 | break; | |||
1996 | } | |||
1997 | case bitc::VST_CODE_BBENTRY: { | |||
1998 | if (convertToString(Record, 1, ValueName)) | |||
1999 | return error("Invalid record"); | |||
2000 | BasicBlock *BB = getBasicBlock(Record[0]); | |||
2001 | if (!BB) | |||
2002 | return error("Invalid record"); | |||
2003 | ||||
2004 | BB->setName(StringRef(ValueName.data(), ValueName.size())); | |||
2005 | ValueName.clear(); | |||
2006 | break; | |||
2007 | } | |||
2008 | } | |||
2009 | } | |||
2010 | } | |||
2011 | ||||
2012 | /// Decode a signed value stored with the sign bit in the LSB for dense VBR | |||
2013 | /// encoding. | |||
2014 | uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) { | |||
2015 | if ((V & 1) == 0) | |||
2016 | return V >> 1; | |||
2017 | if (V != 1) | |||
2018 | return -(V >> 1); | |||
2019 | // There is no such thing as -0 with integers. "-0" really means MININT. | |||
2020 | return 1ULL << 63; | |||
2021 | } | |||
2022 | ||||
2023 | /// Resolve all of the initializers for global values and aliases that we can. | |||
2024 | Error BitcodeReader::resolveGlobalAndIndirectSymbolInits() { | |||
2025 | std::vector<std::pair<GlobalVariable *, unsigned>> GlobalInitWorklist; | |||
2026 | std::vector<std::pair<GlobalIndirectSymbol *, unsigned>> | |||
2027 | IndirectSymbolInitWorklist; | |||
2028 | std::vector<std::pair<Function *, unsigned>> FunctionPrefixWorklist; | |||
2029 | std::vector<std::pair<Function *, unsigned>> FunctionPrologueWorklist; | |||
2030 | std::vector<std::pair<Function *, unsigned>> FunctionPersonalityFnWorklist; | |||
2031 | ||||
2032 | GlobalInitWorklist.swap(GlobalInits); | |||
2033 | IndirectSymbolInitWorklist.swap(IndirectSymbolInits); | |||
2034 | FunctionPrefixWorklist.swap(FunctionPrefixes); | |||
2035 | FunctionPrologueWorklist.swap(FunctionPrologues); | |||
2036 | FunctionPersonalityFnWorklist.swap(FunctionPersonalityFns); | |||
2037 | ||||
2038 | while (!GlobalInitWorklist.empty()) { | |||
2039 | unsigned ValID = GlobalInitWorklist.back().second; | |||
2040 | if (ValID >= ValueList.size()) { | |||
2041 | // Not ready to resolve this yet, it requires something later in the file. | |||
2042 | GlobalInits.push_back(GlobalInitWorklist.back()); | |||
2043 | } else { | |||
2044 | if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) | |||
2045 | GlobalInitWorklist.back().first->setInitializer(C); | |||
2046 | else | |||
2047 | return error("Expected a constant"); | |||
2048 | } | |||
2049 | GlobalInitWorklist.pop_back(); | |||
2050 | } | |||
2051 | ||||
2052 | while (!IndirectSymbolInitWorklist.empty()) { | |||
2053 | unsigned ValID = IndirectSymbolInitWorklist.back().second; | |||
2054 | if (ValID >= ValueList.size()) { | |||
2055 | IndirectSymbolInits.push_back(IndirectSymbolInitWorklist.back()); | |||
2056 | } else { | |||
2057 | Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]); | |||
2058 | if (!C) | |||
2059 | return error("Expected a constant"); | |||
2060 | GlobalIndirectSymbol *GIS = IndirectSymbolInitWorklist.back().first; | |||
2061 | if (isa<GlobalAlias>(GIS) && C->getType() != GIS->getType()) | |||
2062 | return error("Alias and aliasee types don't match"); | |||
2063 | GIS->setIndirectSymbol(C); | |||
2064 | } | |||
2065 | IndirectSymbolInitWorklist.pop_back(); | |||
2066 | } | |||
2067 | ||||
2068 | while (!FunctionPrefixWorklist.empty()) { | |||
2069 | unsigned ValID = FunctionPrefixWorklist.back().second; | |||
2070 | if (ValID >= ValueList.size()) { | |||
2071 | FunctionPrefixes.push_back(FunctionPrefixWorklist.back()); | |||
2072 | } else { | |||
2073 | if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) | |||
2074 | FunctionPrefixWorklist.back().first->setPrefixData(C); | |||
2075 | else | |||
2076 | return error("Expected a constant"); | |||
2077 | } | |||
2078 | FunctionPrefixWorklist.pop_back(); | |||
2079 | } | |||
2080 | ||||
2081 | while (!FunctionPrologueWorklist.empty()) { | |||
2082 | unsigned ValID = FunctionPrologueWorklist.back().second; | |||
2083 | if (ValID >= ValueList.size()) { | |||
2084 | FunctionPrologues.push_back(FunctionPrologueWorklist.back()); | |||
2085 | } else { | |||
2086 | if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) | |||
2087 | FunctionPrologueWorklist.back().first->setPrologueData(C); | |||
2088 | else | |||
2089 | return error("Expected a constant"); | |||
2090 | } | |||
2091 | FunctionPrologueWorklist.pop_back(); | |||
2092 | } | |||
2093 | ||||
2094 | while (!FunctionPersonalityFnWorklist.empty()) { | |||
2095 | unsigned ValID = FunctionPersonalityFnWorklist.back().second; | |||
2096 | if (ValID >= ValueList.size()) { | |||
2097 | FunctionPersonalityFns.push_back(FunctionPersonalityFnWorklist.back()); | |||
2098 | } else { | |||
2099 | if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID])) | |||
2100 | FunctionPersonalityFnWorklist.back().first->setPersonalityFn(C); | |||
2101 | else | |||
2102 | return error("Expected a constant"); | |||
2103 | } | |||
2104 | FunctionPersonalityFnWorklist.pop_back(); | |||
2105 | } | |||
2106 | ||||
2107 | return Error::success(); | |||
2108 | } | |||
2109 | ||||
2110 | static APInt readWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) { | |||
2111 | SmallVector<uint64_t, 8> Words(Vals.size()); | |||
2112 | transform(Vals, Words.begin(), | |||
2113 | BitcodeReader::decodeSignRotatedValue); | |||
2114 | ||||
2115 | return APInt(TypeBits, Words); | |||
2116 | } | |||
2117 | ||||
2118 | Error BitcodeReader::parseConstants() { | |||
2119 | if (Stream.EnterSubBlock(bitc::CONSTANTS_BLOCK_ID)) | |||
2120 | return error("Invalid record"); | |||
2121 | ||||
2122 | SmallVector<uint64_t, 64> Record; | |||
2123 | ||||
2124 | // Read all the records for this value table. | |||
2125 | Type *CurTy = Type::getInt32Ty(Context); | |||
2126 | unsigned NextCstNo = ValueList.size(); | |||
2127 | ||||
2128 | while (true) { | |||
2129 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
2130 | ||||
2131 | switch (Entry.Kind) { | |||
2132 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
2133 | case BitstreamEntry::Error: | |||
2134 | return error("Malformed block"); | |||
2135 | case BitstreamEntry::EndBlock: | |||
2136 | if (NextCstNo != ValueList.size()) | |||
2137 | return error("Invalid constant reference"); | |||
2138 | ||||
2139 | // Once all the constants have been read, go through and resolve forward | |||
2140 | // references. | |||
2141 | ValueList.resolveConstantForwardRefs(); | |||
2142 | return Error::success(); | |||
2143 | case BitstreamEntry::Record: | |||
2144 | // The interesting case. | |||
2145 | break; | |||
2146 | } | |||
2147 | ||||
2148 | // Read a record. | |||
2149 | Record.clear(); | |||
2150 | Type *VoidType = Type::getVoidTy(Context); | |||
2151 | Value *V = nullptr; | |||
2152 | unsigned BitCode = Stream.readRecord(Entry.ID, Record); | |||
2153 | switch (BitCode) { | |||
2154 | default: // Default behavior: unknown constant | |||
2155 | case bitc::CST_CODE_UNDEF: // UNDEF | |||
2156 | V = UndefValue::get(CurTy); | |||
2157 | break; | |||
2158 | case bitc::CST_CODE_SETTYPE: // SETTYPE: [typeid] | |||
2159 | if (Record.empty()) | |||
2160 | return error("Invalid record"); | |||
2161 | if (Record[0] >= TypeList.size() || !TypeList[Record[0]]) | |||
2162 | return error("Invalid record"); | |||
2163 | if (TypeList[Record[0]] == VoidType) | |||
2164 | return error("Invalid constant type"); | |||
2165 | CurTy = TypeList[Record[0]]; | |||
2166 | continue; // Skip the ValueList manipulation. | |||
2167 | case bitc::CST_CODE_NULL: // NULL | |||
2168 | V = Constant::getNullValue(CurTy); | |||
2169 | break; | |||
2170 | case bitc::CST_CODE_INTEGER: // INTEGER: [intval] | |||
2171 | if (!CurTy->isIntegerTy() || Record.empty()) | |||
2172 | return error("Invalid record"); | |||
2173 | V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0])); | |||
2174 | break; | |||
2175 | case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval] | |||
2176 | if (!CurTy->isIntegerTy() || Record.empty()) | |||
2177 | return error("Invalid record"); | |||
2178 | ||||
2179 | APInt VInt = | |||
2180 | readWideAPInt(Record, cast<IntegerType>(CurTy)->getBitWidth()); | |||
2181 | V = ConstantInt::get(Context, VInt); | |||
2182 | ||||
2183 | break; | |||
2184 | } | |||
2185 | case bitc::CST_CODE_FLOAT: { // FLOAT: [fpval] | |||
2186 | if (Record.empty()) | |||
2187 | return error("Invalid record"); | |||
2188 | if (CurTy->isHalfTy()) | |||
2189 | V = ConstantFP::get(Context, APFloat(APFloat::IEEEhalf(), | |||
2190 | APInt(16, (uint16_t)Record[0]))); | |||
2191 | else if (CurTy->isFloatTy()) | |||
2192 | V = ConstantFP::get(Context, APFloat(APFloat::IEEEsingle(), | |||
2193 | APInt(32, (uint32_t)Record[0]))); | |||
2194 | else if (CurTy->isDoubleTy()) | |||
2195 | V = ConstantFP::get(Context, APFloat(APFloat::IEEEdouble(), | |||
2196 | APInt(64, Record[0]))); | |||
2197 | else if (CurTy->isX86_FP80Ty()) { | |||
2198 | // Bits are not stored the same way as a normal i80 APInt, compensate. | |||
2199 | uint64_t Rearrange[2]; | |||
2200 | Rearrange[0] = (Record[1] & 0xffffLL) | (Record[0] << 16); | |||
2201 | Rearrange[1] = Record[0] >> 48; | |||
2202 | V = ConstantFP::get(Context, APFloat(APFloat::x87DoubleExtended(), | |||
2203 | APInt(80, Rearrange))); | |||
2204 | } else if (CurTy->isFP128Ty()) | |||
2205 | V = ConstantFP::get(Context, APFloat(APFloat::IEEEquad(), | |||
2206 | APInt(128, Record))); | |||
2207 | else if (CurTy->isPPC_FP128Ty()) | |||
2208 | V = ConstantFP::get(Context, APFloat(APFloat::PPCDoubleDouble(), | |||
2209 | APInt(128, Record))); | |||
2210 | else | |||
2211 | V = UndefValue::get(CurTy); | |||
2212 | break; | |||
2213 | } | |||
2214 | ||||
2215 | case bitc::CST_CODE_AGGREGATE: {// AGGREGATE: [n x value number] | |||
2216 | if (Record.empty()) | |||
2217 | return error("Invalid record"); | |||
2218 | ||||
2219 | unsigned Size = Record.size(); | |||
2220 | SmallVector<Constant*, 16> Elts; | |||
2221 | ||||
2222 | if (StructType *STy = dyn_cast<StructType>(CurTy)) { | |||
2223 | for (unsigned i = 0; i != Size; ++i) | |||
2224 | Elts.push_back(ValueList.getConstantFwdRef(Record[i], | |||
2225 | STy->getElementType(i))); | |||
2226 | V = ConstantStruct::get(STy, Elts); | |||
2227 | } else if (ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) { | |||
2228 | Type *EltTy = ATy->getElementType(); | |||
2229 | for (unsigned i = 0; i != Size; ++i) | |||
2230 | Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy)); | |||
2231 | V = ConstantArray::get(ATy, Elts); | |||
2232 | } else if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) { | |||
2233 | Type *EltTy = VTy->getElementType(); | |||
2234 | for (unsigned i = 0; i != Size; ++i) | |||
2235 | Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy)); | |||
2236 | V = ConstantVector::get(Elts); | |||
2237 | } else { | |||
2238 | V = UndefValue::get(CurTy); | |||
2239 | } | |||
2240 | break; | |||
2241 | } | |||
2242 | case bitc::CST_CODE_STRING: // STRING: [values] | |||
2243 | case bitc::CST_CODE_CSTRING: { // CSTRING: [values] | |||
2244 | if (Record.empty()) | |||
2245 | return error("Invalid record"); | |||
2246 | ||||
2247 | SmallString<16> Elts(Record.begin(), Record.end()); | |||
2248 | V = ConstantDataArray::getString(Context, Elts, | |||
2249 | BitCode == bitc::CST_CODE_CSTRING); | |||
2250 | break; | |||
2251 | } | |||
2252 | case bitc::CST_CODE_DATA: {// DATA: [n x value] | |||
2253 | if (Record.empty()) | |||
2254 | return error("Invalid record"); | |||
2255 | ||||
2256 | Type *EltTy = cast<SequentialType>(CurTy)->getElementType(); | |||
2257 | if (EltTy->isIntegerTy(8)) { | |||
2258 | SmallVector<uint8_t, 16> Elts(Record.begin(), Record.end()); | |||
2259 | if (isa<VectorType>(CurTy)) | |||
2260 | V = ConstantDataVector::get(Context, Elts); | |||
2261 | else | |||
2262 | V = ConstantDataArray::get(Context, Elts); | |||
2263 | } else if (EltTy->isIntegerTy(16)) { | |||
2264 | SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end()); | |||
2265 | if (isa<VectorType>(CurTy)) | |||
2266 | V = ConstantDataVector::get(Context, Elts); | |||
2267 | else | |||
2268 | V = ConstantDataArray::get(Context, Elts); | |||
2269 | } else if (EltTy->isIntegerTy(32)) { | |||
2270 | SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end()); | |||
2271 | if (isa<VectorType>(CurTy)) | |||
2272 | V = ConstantDataVector::get(Context, Elts); | |||
2273 | else | |||
2274 | V = ConstantDataArray::get(Context, Elts); | |||
2275 | } else if (EltTy->isIntegerTy(64)) { | |||
2276 | SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end()); | |||
2277 | if (isa<VectorType>(CurTy)) | |||
2278 | V = ConstantDataVector::get(Context, Elts); | |||
2279 | else | |||
2280 | V = ConstantDataArray::get(Context, Elts); | |||
2281 | } else if (EltTy->isHalfTy()) { | |||
2282 | SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end()); | |||
2283 | if (isa<VectorType>(CurTy)) | |||
2284 | V = ConstantDataVector::getFP(Context, Elts); | |||
2285 | else | |||
2286 | V = ConstantDataArray::getFP(Context, Elts); | |||
2287 | } else if (EltTy->isFloatTy()) { | |||
2288 | SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end()); | |||
2289 | if (isa<VectorType>(CurTy)) | |||
2290 | V = ConstantDataVector::getFP(Context, Elts); | |||
2291 | else | |||
2292 | V = ConstantDataArray::getFP(Context, Elts); | |||
2293 | } else if (EltTy->isDoubleTy()) { | |||
2294 | SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end()); | |||
2295 | if (isa<VectorType>(CurTy)) | |||
2296 | V = ConstantDataVector::getFP(Context, Elts); | |||
2297 | else | |||
2298 | V = ConstantDataArray::getFP(Context, Elts); | |||
2299 | } else { | |||
2300 | return error("Invalid type for value"); | |||
2301 | } | |||
2302 | break; | |||
2303 | } | |||
2304 | case bitc::CST_CODE_CE_BINOP: { // CE_BINOP: [opcode, opval, opval] | |||
2305 | if (Record.size() < 3) | |||
2306 | return error("Invalid record"); | |||
2307 | int Opc = getDecodedBinaryOpcode(Record[0], CurTy); | |||
2308 | if (Opc < 0) { | |||
2309 | V = UndefValue::get(CurTy); // Unknown binop. | |||
2310 | } else { | |||
2311 | Constant *LHS = ValueList.getConstantFwdRef(Record[1], CurTy); | |||
2312 | Constant *RHS = ValueList.getConstantFwdRef(Record[2], CurTy); | |||
2313 | unsigned Flags = 0; | |||
2314 | if (Record.size() >= 4) { | |||
2315 | if (Opc == Instruction::Add || | |||
2316 | Opc == Instruction::Sub || | |||
2317 | Opc == Instruction::Mul || | |||
2318 | Opc == Instruction::Shl) { | |||
2319 | if (Record[3] & (1 << bitc::OBO_NO_SIGNED_WRAP)) | |||
2320 | Flags |= OverflowingBinaryOperator::NoSignedWrap; | |||
2321 | if (Record[3] & (1 << bitc::OBO_NO_UNSIGNED_WRAP)) | |||
2322 | Flags |= OverflowingBinaryOperator::NoUnsignedWrap; | |||
2323 | } else if (Opc == Instruction::SDiv || | |||
2324 | Opc == Instruction::UDiv || | |||
2325 | Opc == Instruction::LShr || | |||
2326 | Opc == Instruction::AShr) { | |||
2327 | if (Record[3] & (1 << bitc::PEO_EXACT)) | |||
2328 | Flags |= SDivOperator::IsExact; | |||
2329 | } | |||
2330 | } | |||
2331 | V = ConstantExpr::get(Opc, LHS, RHS, Flags); | |||
2332 | } | |||
2333 | break; | |||
2334 | } | |||
2335 | case bitc::CST_CODE_CE_CAST: { // CE_CAST: [opcode, opty, opval] | |||
2336 | if (Record.size() < 3) | |||
2337 | return error("Invalid record"); | |||
2338 | int Opc = getDecodedCastOpcode(Record[0]); | |||
2339 | if (Opc < 0) { | |||
2340 | V = UndefValue::get(CurTy); // Unknown cast. | |||
2341 | } else { | |||
2342 | Type *OpTy = getTypeByID(Record[1]); | |||
2343 | if (!OpTy) | |||
2344 | return error("Invalid record"); | |||
2345 | Constant *Op = ValueList.getConstantFwdRef(Record[2], OpTy); | |||
2346 | V = UpgradeBitCastExpr(Opc, Op, CurTy); | |||
2347 | if (!V) V = ConstantExpr::getCast(Opc, Op, CurTy); | |||
2348 | } | |||
2349 | break; | |||
2350 | } | |||
2351 | case bitc::CST_CODE_CE_INBOUNDS_GEP: // [ty, n x operands] | |||
2352 | case bitc::CST_CODE_CE_GEP: // [ty, n x operands] | |||
2353 | case bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX: { // [ty, flags, n x | |||
2354 | // operands] | |||
2355 | unsigned OpNum = 0; | |||
2356 | Type *PointeeType = nullptr; | |||
2357 | if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX || | |||
2358 | Record.size() % 2) | |||
2359 | PointeeType = getTypeByID(Record[OpNum++]); | |||
2360 | ||||
2361 | bool InBounds = false; | |||
2362 | Optional<unsigned> InRangeIndex; | |||
2363 | if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX) { | |||
2364 | uint64_t Op = Record[OpNum++]; | |||
2365 | InBounds = Op & 1; | |||
2366 | InRangeIndex = Op >> 1; | |||
2367 | } else if (BitCode == bitc::CST_CODE_CE_INBOUNDS_GEP) | |||
2368 | InBounds = true; | |||
2369 | ||||
2370 | SmallVector<Constant*, 16> Elts; | |||
2371 | while (OpNum != Record.size()) { | |||
2372 | Type *ElTy = getTypeByID(Record[OpNum++]); | |||
2373 | if (!ElTy) | |||
2374 | return error("Invalid record"); | |||
2375 | Elts.push_back(ValueList.getConstantFwdRef(Record[OpNum++], ElTy)); | |||
2376 | } | |||
2377 | ||||
2378 | if (PointeeType && | |||
2379 | PointeeType != | |||
2380 | cast<PointerType>(Elts[0]->getType()->getScalarType()) | |||
2381 | ->getElementType()) | |||
2382 | return error("Explicit gep operator type does not match pointee type " | |||
2383 | "of pointer operand"); | |||
2384 | ||||
2385 | if (Elts.size() < 1) | |||
2386 | return error("Invalid gep with no operands"); | |||
2387 | ||||
2388 | ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end()); | |||
2389 | V = ConstantExpr::getGetElementPtr(PointeeType, Elts[0], Indices, | |||
2390 | InBounds, InRangeIndex); | |||
2391 | break; | |||
2392 | } | |||
2393 | case bitc::CST_CODE_CE_SELECT: { // CE_SELECT: [opval#, opval#, opval#] | |||
2394 | if (Record.size() < 3) | |||
2395 | return error("Invalid record"); | |||
2396 | ||||
2397 | Type *SelectorTy = Type::getInt1Ty(Context); | |||
2398 | ||||
2399 | // The selector might be an i1 or an <n x i1> | |||
2400 | // Get the type from the ValueList before getting a forward ref. | |||
2401 | if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) | |||
2402 | if (Value *V = ValueList[Record[0]]) | |||
2403 | if (SelectorTy != V->getType()) | |||
2404 | SelectorTy = VectorType::get(SelectorTy, VTy->getNumElements()); | |||
2405 | ||||
2406 | V = ConstantExpr::getSelect(ValueList.getConstantFwdRef(Record[0], | |||
2407 | SelectorTy), | |||
2408 | ValueList.getConstantFwdRef(Record[1],CurTy), | |||
2409 | ValueList.getConstantFwdRef(Record[2],CurTy)); | |||
2410 | break; | |||
2411 | } | |||
2412 | case bitc::CST_CODE_CE_EXTRACTELT | |||
2413 | : { // CE_EXTRACTELT: [opty, opval, opty, opval] | |||
2414 | if (Record.size() < 3) | |||
2415 | return error("Invalid record"); | |||
2416 | VectorType *OpTy = | |||
2417 | dyn_cast_or_null<VectorType>(getTypeByID(Record[0])); | |||
2418 | if (!OpTy) | |||
2419 | return error("Invalid record"); | |||
2420 | Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy); | |||
2421 | Constant *Op1 = nullptr; | |||
2422 | if (Record.size() == 4) { | |||
2423 | Type *IdxTy = getTypeByID(Record[2]); | |||
2424 | if (!IdxTy) | |||
2425 | return error("Invalid record"); | |||
2426 | Op1 = ValueList.getConstantFwdRef(Record[3], IdxTy); | |||
2427 | } else // TODO: Remove with llvm 4.0 | |||
2428 | Op1 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context)); | |||
2429 | if (!Op1) | |||
2430 | return error("Invalid record"); | |||
2431 | V = ConstantExpr::getExtractElement(Op0, Op1); | |||
2432 | break; | |||
2433 | } | |||
2434 | case bitc::CST_CODE_CE_INSERTELT | |||
2435 | : { // CE_INSERTELT: [opval, opval, opty, opval] | |||
2436 | VectorType *OpTy = dyn_cast<VectorType>(CurTy); | |||
2437 | if (Record.size() < 3 || !OpTy) | |||
2438 | return error("Invalid record"); | |||
2439 | Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy); | |||
2440 | Constant *Op1 = ValueList.getConstantFwdRef(Record[1], | |||
2441 | OpTy->getElementType()); | |||
2442 | Constant *Op2 = nullptr; | |||
2443 | if (Record.size() == 4) { | |||
2444 | Type *IdxTy = getTypeByID(Record[2]); | |||
2445 | if (!IdxTy) | |||
2446 | return error("Invalid record"); | |||
2447 | Op2 = ValueList.getConstantFwdRef(Record[3], IdxTy); | |||
2448 | } else // TODO: Remove with llvm 4.0 | |||
2449 | Op2 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context)); | |||
2450 | if (!Op2) | |||
2451 | return error("Invalid record"); | |||
2452 | V = ConstantExpr::getInsertElement(Op0, Op1, Op2); | |||
2453 | break; | |||
2454 | } | |||
2455 | case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval] | |||
2456 | VectorType *OpTy = dyn_cast<VectorType>(CurTy); | |||
2457 | if (Record.size() < 3 || !OpTy) | |||
2458 | return error("Invalid record"); | |||
2459 | Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy); | |||
2460 | Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy); | |||
2461 | Type *ShufTy = VectorType::get(Type::getInt32Ty(Context), | |||
2462 | OpTy->getNumElements()); | |||
2463 | Constant *Op2 = ValueList.getConstantFwdRef(Record[2], ShufTy); | |||
2464 | V = ConstantExpr::getShuffleVector(Op0, Op1, Op2); | |||
2465 | break; | |||
2466 | } | |||
2467 | case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval] | |||
2468 | VectorType *RTy = dyn_cast<VectorType>(CurTy); | |||
2469 | VectorType *OpTy = | |||
2470 | dyn_cast_or_null<VectorType>(getTypeByID(Record[0])); | |||
2471 | if (Record.size() < 4 || !RTy || !OpTy) | |||
2472 | return error("Invalid record"); | |||
2473 | Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy); | |||
2474 | Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy); | |||
2475 | Type *ShufTy = VectorType::get(Type::getInt32Ty(Context), | |||
2476 | RTy->getNumElements()); | |||
2477 | Constant *Op2 = ValueList.getConstantFwdRef(Record[3], ShufTy); | |||
2478 | V = ConstantExpr::getShuffleVector(Op0, Op1, Op2); | |||
2479 | break; | |||
2480 | } | |||
2481 | case bitc::CST_CODE_CE_CMP: { // CE_CMP: [opty, opval, opval, pred] | |||
2482 | if (Record.size() < 4) | |||
2483 | return error("Invalid record"); | |||
2484 | Type *OpTy = getTypeByID(Record[0]); | |||
2485 | if (!OpTy) | |||
2486 | return error("Invalid record"); | |||
2487 | Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy); | |||
2488 | Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy); | |||
2489 | ||||
2490 | if (OpTy->isFPOrFPVectorTy()) | |||
2491 | V = ConstantExpr::getFCmp(Record[3], Op0, Op1); | |||
2492 | else | |||
2493 | V = ConstantExpr::getICmp(Record[3], Op0, Op1); | |||
2494 | break; | |||
2495 | } | |||
2496 | // This maintains backward compatibility, pre-asm dialect keywords. | |||
2497 | // FIXME: Remove with the 4.0 release. | |||
2498 | case bitc::CST_CODE_INLINEASM_OLD: { | |||
2499 | if (Record.size() < 2) | |||
2500 | return error("Invalid record"); | |||
2501 | std::string AsmStr, ConstrStr; | |||
2502 | bool HasSideEffects = Record[0] & 1; | |||
2503 | bool IsAlignStack = Record[0] >> 1; | |||
2504 | unsigned AsmStrSize = Record[1]; | |||
2505 | if (2+AsmStrSize >= Record.size()) | |||
2506 | return error("Invalid record"); | |||
2507 | unsigned ConstStrSize = Record[2+AsmStrSize]; | |||
2508 | if (3+AsmStrSize+ConstStrSize > Record.size()) | |||
2509 | return error("Invalid record"); | |||
2510 | ||||
2511 | for (unsigned i = 0; i != AsmStrSize; ++i) | |||
2512 | AsmStr += (char)Record[2+i]; | |||
2513 | for (unsigned i = 0; i != ConstStrSize; ++i) | |||
2514 | ConstrStr += (char)Record[3+AsmStrSize+i]; | |||
2515 | PointerType *PTy = cast<PointerType>(CurTy); | |||
2516 | V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()), | |||
2517 | AsmStr, ConstrStr, HasSideEffects, IsAlignStack); | |||
2518 | break; | |||
2519 | } | |||
2520 | // This version adds support for the asm dialect keywords (e.g., | |||
2521 | // inteldialect). | |||
2522 | case bitc::CST_CODE_INLINEASM: { | |||
2523 | if (Record.size() < 2) | |||
2524 | return error("Invalid record"); | |||
2525 | std::string AsmStr, ConstrStr; | |||
2526 | bool HasSideEffects = Record[0] & 1; | |||
2527 | bool IsAlignStack = (Record[0] >> 1) & 1; | |||
2528 | unsigned AsmDialect = Record[0] >> 2; | |||
2529 | unsigned AsmStrSize = Record[1]; | |||
2530 | if (2+AsmStrSize >= Record.size()) | |||
2531 | return error("Invalid record"); | |||
2532 | unsigned ConstStrSize = Record[2+AsmStrSize]; | |||
2533 | if (3+AsmStrSize+ConstStrSize > Record.size()) | |||
2534 | return error("Invalid record"); | |||
2535 | ||||
2536 | for (unsigned i = 0; i != AsmStrSize; ++i) | |||
2537 | AsmStr += (char)Record[2+i]; | |||
2538 | for (unsigned i = 0; i != ConstStrSize; ++i) | |||
2539 | ConstrStr += (char)Record[3+AsmStrSize+i]; | |||
2540 | PointerType *PTy = cast<PointerType>(CurTy); | |||
2541 | V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()), | |||
2542 | AsmStr, ConstrStr, HasSideEffects, IsAlignStack, | |||
2543 | InlineAsm::AsmDialect(AsmDialect)); | |||
2544 | break; | |||
2545 | } | |||
2546 | case bitc::CST_CODE_BLOCKADDRESS:{ | |||
2547 | if (Record.size() < 3) | |||
2548 | return error("Invalid record"); | |||
2549 | Type *FnTy = getTypeByID(Record[0]); | |||
2550 | if (!FnTy) | |||
2551 | return error("Invalid record"); | |||
2552 | Function *Fn = | |||
2553 | dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy)); | |||
2554 | if (!Fn) | |||
2555 | return error("Invalid record"); | |||
2556 | ||||
2557 | // If the function is already parsed we can insert the block address right | |||
2558 | // away. | |||
2559 | BasicBlock *BB; | |||
2560 | unsigned BBID = Record[2]; | |||
2561 | if (!BBID) | |||
2562 | // Invalid reference to entry block. | |||
2563 | return error("Invalid ID"); | |||
2564 | if (!Fn->empty()) { | |||
2565 | Function::iterator BBI = Fn->begin(), BBE = Fn->end(); | |||
2566 | for (size_t I = 0, E = BBID; I != E; ++I) { | |||
2567 | if (BBI == BBE) | |||
2568 | return error("Invalid ID"); | |||
2569 | ++BBI; | |||
2570 | } | |||
2571 | BB = &*BBI; | |||
2572 | } else { | |||
2573 | // Otherwise insert a placeholder and remember it so it can be inserted | |||
2574 | // when the function is parsed. | |||
2575 | auto &FwdBBs = BasicBlockFwdRefs[Fn]; | |||
2576 | if (FwdBBs.empty()) | |||
2577 | BasicBlockFwdRefQueue.push_back(Fn); | |||
2578 | if (FwdBBs.size() < BBID + 1) | |||
2579 | FwdBBs.resize(BBID + 1); | |||
2580 | if (!FwdBBs[BBID]) | |||
2581 | FwdBBs[BBID] = BasicBlock::Create(Context); | |||
2582 | BB = FwdBBs[BBID]; | |||
2583 | } | |||
2584 | V = BlockAddress::get(Fn, BB); | |||
2585 | break; | |||
2586 | } | |||
2587 | } | |||
2588 | ||||
2589 | ValueList.assignValue(V, NextCstNo); | |||
2590 | ++NextCstNo; | |||
2591 | } | |||
2592 | } | |||
2593 | ||||
2594 | Error BitcodeReader::parseUseLists() { | |||
2595 | if (Stream.EnterSubBlock(bitc::USELIST_BLOCK_ID)) | |||
2596 | return error("Invalid record"); | |||
2597 | ||||
2598 | // Read all the records. | |||
2599 | SmallVector<uint64_t, 64> Record; | |||
2600 | ||||
2601 | while (true) { | |||
2602 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
2603 | ||||
2604 | switch (Entry.Kind) { | |||
2605 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
2606 | case BitstreamEntry::Error: | |||
2607 | return error("Malformed block"); | |||
2608 | case BitstreamEntry::EndBlock: | |||
2609 | return Error::success(); | |||
2610 | case BitstreamEntry::Record: | |||
2611 | // The interesting case. | |||
2612 | break; | |||
2613 | } | |||
2614 | ||||
2615 | // Read a use list record. | |||
2616 | Record.clear(); | |||
2617 | bool IsBB = false; | |||
2618 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
2619 | default: // Default behavior: unknown type. | |||
2620 | break; | |||
2621 | case bitc::USELIST_CODE_BB: | |||
2622 | IsBB = true; | |||
2623 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
2624 | case bitc::USELIST_CODE_DEFAULT: { | |||
2625 | unsigned RecordLength = Record.size(); | |||
2626 | if (RecordLength < 3) | |||
2627 | // Records should have at least an ID and two indexes. | |||
2628 | return error("Invalid record"); | |||
2629 | unsigned ID = Record.back(); | |||
2630 | Record.pop_back(); | |||
2631 | ||||
2632 | Value *V; | |||
2633 | if (IsBB) { | |||
2634 | assert(ID < FunctionBBs.size() && "Basic block not found")(static_cast <bool> (ID < FunctionBBs.size() && "Basic block not found") ? void (0) : __assert_fail ("ID < FunctionBBs.size() && \"Basic block not found\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 2634, __extension__ __PRETTY_FUNCTION__)); | |||
2635 | V = FunctionBBs[ID]; | |||
2636 | } else | |||
2637 | V = ValueList[ID]; | |||
2638 | unsigned NumUses = 0; | |||
2639 | SmallDenseMap<const Use *, unsigned, 16> Order; | |||
2640 | for (const Use &U : V->materialized_uses()) { | |||
2641 | if (++NumUses > Record.size()) | |||
2642 | break; | |||
2643 | Order[&U] = Record[NumUses - 1]; | |||
2644 | } | |||
2645 | if (Order.size() != Record.size() || NumUses > Record.size()) | |||
2646 | // Mismatches can happen if the functions are being materialized lazily | |||
2647 | // (out-of-order), or a value has been upgraded. | |||
2648 | break; | |||
2649 | ||||
2650 | V->sortUseList([&](const Use &L, const Use &R) { | |||
2651 | return Order.lookup(&L) < Order.lookup(&R); | |||
2652 | }); | |||
2653 | break; | |||
2654 | } | |||
2655 | } | |||
2656 | } | |||
2657 | } | |||
2658 | ||||
2659 | /// When we see the block for metadata, remember where it is and then skip it. | |||
2660 | /// This lets us lazily deserialize the metadata. | |||
2661 | Error BitcodeReader::rememberAndSkipMetadata() { | |||
2662 | // Save the current stream state. | |||
2663 | uint64_t CurBit = Stream.GetCurrentBitNo(); | |||
2664 | DeferredMetadataInfo.push_back(CurBit); | |||
2665 | ||||
2666 | // Skip over the block for now. | |||
2667 | if (Stream.SkipBlock()) | |||
2668 | return error("Invalid record"); | |||
2669 | return Error::success(); | |||
2670 | } | |||
2671 | ||||
2672 | Error BitcodeReader::materializeMetadata() { | |||
2673 | for (uint64_t BitPos : DeferredMetadataInfo) { | |||
2674 | // Move the bit stream to the saved position. | |||
2675 | Stream.JumpToBit(BitPos); | |||
2676 | if (Error Err = MDLoader->parseModuleMetadata()) | |||
2677 | return Err; | |||
2678 | } | |||
2679 | ||||
2680 | // Upgrade "Linker Options" module flag to "llvm.linker.options" module-level | |||
2681 | // metadata. | |||
2682 | if (Metadata *Val = TheModule->getModuleFlag("Linker Options")) { | |||
2683 | NamedMDNode *LinkerOpts = | |||
2684 | TheModule->getOrInsertNamedMetadata("llvm.linker.options"); | |||
2685 | for (const MDOperand &MDOptions : cast<MDNode>(Val)->operands()) | |||
2686 | LinkerOpts->addOperand(cast<MDNode>(MDOptions)); | |||
2687 | } | |||
2688 | ||||
2689 | DeferredMetadataInfo.clear(); | |||
2690 | return Error::success(); | |||
2691 | } | |||
2692 | ||||
2693 | void BitcodeReader::setStripDebugInfo() { StripDebugInfo = true; } | |||
2694 | ||||
2695 | /// When we see the block for a function body, remember where it is and then | |||
2696 | /// skip it. This lets us lazily deserialize the functions. | |||
2697 | Error BitcodeReader::rememberAndSkipFunctionBody() { | |||
2698 | // Get the function we are talking about. | |||
2699 | if (FunctionsWithBodies.empty()) | |||
2700 | return error("Insufficient function protos"); | |||
2701 | ||||
2702 | Function *Fn = FunctionsWithBodies.back(); | |||
2703 | FunctionsWithBodies.pop_back(); | |||
2704 | ||||
2705 | // Save the current stream state. | |||
2706 | uint64_t CurBit = Stream.GetCurrentBitNo(); | |||
2707 | assert((static_cast <bool> ((DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo [Fn] == CurBit) && "Mismatch between VST and scanned function offsets" ) ? void (0) : __assert_fail ("(DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo[Fn] == CurBit) && \"Mismatch between VST and scanned function offsets\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 2709, __extension__ __PRETTY_FUNCTION__)) | |||
2708 | (DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo[Fn] == CurBit) &&(static_cast <bool> ((DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo [Fn] == CurBit) && "Mismatch between VST and scanned function offsets" ) ? void (0) : __assert_fail ("(DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo[Fn] == CurBit) && \"Mismatch between VST and scanned function offsets\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 2709, __extension__ __PRETTY_FUNCTION__)) | |||
2709 | "Mismatch between VST and scanned function offsets")(static_cast <bool> ((DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo [Fn] == CurBit) && "Mismatch between VST and scanned function offsets" ) ? void (0) : __assert_fail ("(DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo[Fn] == CurBit) && \"Mismatch between VST and scanned function offsets\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 2709, __extension__ __PRETTY_FUNCTION__)); | |||
2710 | DeferredFunctionInfo[Fn] = CurBit; | |||
2711 | ||||
2712 | // Skip over the function block for now. | |||
2713 | if (Stream.SkipBlock()) | |||
2714 | return error("Invalid record"); | |||
2715 | return Error::success(); | |||
2716 | } | |||
2717 | ||||
2718 | Error BitcodeReader::globalCleanup() { | |||
2719 | // Patch the initializers for globals and aliases up. | |||
2720 | if (Error Err = resolveGlobalAndIndirectSymbolInits()) | |||
2721 | return Err; | |||
2722 | if (!GlobalInits.empty() || !IndirectSymbolInits.empty()) | |||
2723 | return error("Malformed global initializer set"); | |||
2724 | ||||
2725 | // Look for intrinsic functions which need to be upgraded at some point | |||
2726 | for (Function &F : *TheModule) { | |||
2727 | MDLoader->upgradeDebugIntrinsics(F); | |||
2728 | Function *NewFn; | |||
2729 | if (UpgradeIntrinsicFunction(&F, NewFn)) | |||
2730 | UpgradedIntrinsics[&F] = NewFn; | |||
2731 | else if (auto Remangled = Intrinsic::remangleIntrinsicFunction(&F)) | |||
2732 | // Some types could be renamed during loading if several modules are | |||
2733 | // loaded in the same LLVMContext (LTO scenario). In this case we should | |||
2734 | // remangle intrinsics names as well. | |||
2735 | RemangledIntrinsics[&F] = Remangled.getValue(); | |||
2736 | } | |||
2737 | ||||
2738 | // Look for global variables which need to be renamed. | |||
2739 | for (GlobalVariable &GV : TheModule->globals()) | |||
2740 | UpgradeGlobalVariable(&GV); | |||
2741 | ||||
2742 | // Force deallocation of memory for these vectors to favor the client that | |||
2743 | // want lazy deserialization. | |||
2744 | std::vector<std::pair<GlobalVariable *, unsigned>>().swap(GlobalInits); | |||
2745 | std::vector<std::pair<GlobalIndirectSymbol *, unsigned>>().swap( | |||
2746 | IndirectSymbolInits); | |||
2747 | return Error::success(); | |||
2748 | } | |||
2749 | ||||
2750 | /// Support for lazy parsing of function bodies. This is required if we | |||
2751 | /// either have an old bitcode file without a VST forward declaration record, | |||
2752 | /// or if we have an anonymous function being materialized, since anonymous | |||
2753 | /// functions do not have a name and are therefore not in the VST. | |||
2754 | Error BitcodeReader::rememberAndSkipFunctionBodies() { | |||
2755 | Stream.JumpToBit(NextUnreadBit); | |||
2756 | ||||
2757 | if (Stream.AtEndOfStream()) | |||
2758 | return error("Could not find function in stream"); | |||
2759 | ||||
2760 | if (!SeenFirstFunctionBody) | |||
2761 | return error("Trying to materialize functions before seeing function blocks"); | |||
2762 | ||||
2763 | // An old bitcode file with the symbol table at the end would have | |||
2764 | // finished the parse greedily. | |||
2765 | assert(SeenValueSymbolTable)(static_cast <bool> (SeenValueSymbolTable) ? void (0) : __assert_fail ("SeenValueSymbolTable", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 2765, __extension__ __PRETTY_FUNCTION__)); | |||
2766 | ||||
2767 | SmallVector<uint64_t, 64> Record; | |||
2768 | ||||
2769 | while (true) { | |||
2770 | BitstreamEntry Entry = Stream.advance(); | |||
2771 | switch (Entry.Kind) { | |||
2772 | default: | |||
2773 | return error("Expect SubBlock"); | |||
2774 | case BitstreamEntry::SubBlock: | |||
2775 | switch (Entry.ID) { | |||
2776 | default: | |||
2777 | return error("Expect function block"); | |||
2778 | case bitc::FUNCTION_BLOCK_ID: | |||
2779 | if (Error Err = rememberAndSkipFunctionBody()) | |||
2780 | return Err; | |||
2781 | NextUnreadBit = Stream.GetCurrentBitNo(); | |||
2782 | return Error::success(); | |||
2783 | } | |||
2784 | } | |||
2785 | } | |||
2786 | } | |||
2787 | ||||
2788 | bool BitcodeReaderBase::readBlockInfo() { | |||
2789 | Optional<BitstreamBlockInfo> NewBlockInfo = Stream.ReadBlockInfoBlock(); | |||
2790 | if (!NewBlockInfo) | |||
2791 | return true; | |||
2792 | BlockInfo = std::move(*NewBlockInfo); | |||
2793 | return false; | |||
2794 | } | |||
2795 | ||||
2796 | Error BitcodeReader::parseComdatRecord(ArrayRef<uint64_t> Record) { | |||
2797 | // v1: [selection_kind, name] | |||
2798 | // v2: [strtab_offset, strtab_size, selection_kind] | |||
2799 | StringRef Name; | |||
2800 | std::tie(Name, Record) = readNameFromStrtab(Record); | |||
2801 | ||||
2802 | if (Record.empty()) | |||
2803 | return error("Invalid record"); | |||
2804 | Comdat::SelectionKind SK = getDecodedComdatSelectionKind(Record[0]); | |||
2805 | std::string OldFormatName; | |||
2806 | if (!UseStrtab) { | |||
2807 | if (Record.size() < 2) | |||
2808 | return error("Invalid record"); | |||
2809 | unsigned ComdatNameSize = Record[1]; | |||
2810 | OldFormatName.reserve(ComdatNameSize); | |||
2811 | for (unsigned i = 0; i != ComdatNameSize; ++i) | |||
2812 | OldFormatName += (char)Record[2 + i]; | |||
2813 | Name = OldFormatName; | |||
2814 | } | |||
2815 | Comdat *C = TheModule->getOrInsertComdat(Name); | |||
2816 | C->setSelectionKind(SK); | |||
2817 | ComdatList.push_back(C); | |||
2818 | return Error::success(); | |||
2819 | } | |||
2820 | ||||
2821 | Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) { | |||
2822 | // v1: [pointer type, isconst, initid, linkage, alignment, section, | |||
2823 | // visibility, threadlocal, unnamed_addr, externally_initialized, | |||
2824 | // dllstorageclass, comdat, attributes, preemption specifier] (name in VST) | |||
2825 | // v2: [strtab_offset, strtab_size, v1] | |||
2826 | StringRef Name; | |||
2827 | std::tie(Name, Record) = readNameFromStrtab(Record); | |||
2828 | ||||
2829 | if (Record.size() < 6) | |||
| ||||
2830 | return error("Invalid record"); | |||
2831 | Type *Ty = getTypeByID(Record[0]); | |||
2832 | if (!Ty) | |||
2833 | return error("Invalid record"); | |||
2834 | bool isConstant = Record[1] & 1; | |||
2835 | bool explicitType = Record[1] & 2; | |||
2836 | unsigned AddressSpace; | |||
2837 | if (explicitType) { | |||
2838 | AddressSpace = Record[1] >> 2; | |||
2839 | } else { | |||
2840 | if (!Ty->isPointerTy()) | |||
2841 | return error("Invalid type for value"); | |||
2842 | AddressSpace = cast<PointerType>(Ty)->getAddressSpace(); | |||
2843 | Ty = cast<PointerType>(Ty)->getElementType(); | |||
2844 | } | |||
2845 | ||||
2846 | uint64_t RawLinkage = Record[3]; | |||
2847 | GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage); | |||
2848 | unsigned Alignment; | |||
2849 | if (Error Err = parseAlignmentValue(Record[4], Alignment)) | |||
2850 | return Err; | |||
2851 | std::string Section; | |||
2852 | if (Record[5]) { | |||
2853 | if (Record[5] - 1 >= SectionTable.size()) | |||
2854 | return error("Invalid ID"); | |||
2855 | Section = SectionTable[Record[5] - 1]; | |||
2856 | } | |||
2857 | GlobalValue::VisibilityTypes Visibility = GlobalValue::DefaultVisibility; | |||
2858 | // Local linkage must have default visibility. | |||
2859 | if (Record.size() > 6 && !GlobalValue::isLocalLinkage(Linkage)) | |||
2860 | // FIXME: Change to an error if non-default in 4.0. | |||
2861 | Visibility = getDecodedVisibility(Record[6]); | |||
2862 | ||||
2863 | GlobalVariable::ThreadLocalMode TLM = GlobalVariable::NotThreadLocal; | |||
2864 | if (Record.size() > 7) | |||
2865 | TLM = getDecodedThreadLocalMode(Record[7]); | |||
2866 | ||||
2867 | GlobalValue::UnnamedAddr UnnamedAddr = GlobalValue::UnnamedAddr::None; | |||
2868 | if (Record.size() > 8) | |||
2869 | UnnamedAddr = getDecodedUnnamedAddrType(Record[8]); | |||
2870 | ||||
2871 | bool ExternallyInitialized = false; | |||
2872 | if (Record.size() > 9) | |||
2873 | ExternallyInitialized = Record[9]; | |||
2874 | ||||
2875 | GlobalVariable *NewGV = | |||
2876 | new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, Name, | |||
2877 | nullptr, TLM, AddressSpace, ExternallyInitialized); | |||
2878 | NewGV->setAlignment(Alignment); | |||
| ||||
2879 | if (!Section.empty()) | |||
2880 | NewGV->setSection(Section); | |||
2881 | NewGV->setVisibility(Visibility); | |||
2882 | NewGV->setUnnamedAddr(UnnamedAddr); | |||
2883 | ||||
2884 | if (Record.size() > 10) | |||
2885 | NewGV->setDLLStorageClass(getDecodedDLLStorageClass(Record[10])); | |||
2886 | else | |||
2887 | upgradeDLLImportExportLinkage(NewGV, RawLinkage); | |||
2888 | ||||
2889 | ValueList.push_back(NewGV); | |||
2890 | ||||
2891 | // Remember which value to use for the global initializer. | |||
2892 | if (unsigned InitID = Record[2]) | |||
2893 | GlobalInits.push_back(std::make_pair(NewGV, InitID - 1)); | |||
2894 | ||||
2895 | if (Record.size() > 11) { | |||
2896 | if (unsigned ComdatID = Record[11]) { | |||
2897 | if (ComdatID > ComdatList.size()) | |||
2898 | return error("Invalid global variable comdat ID"); | |||
2899 | NewGV->setComdat(ComdatList[ComdatID - 1]); | |||
2900 | } | |||
2901 | } else if (hasImplicitComdat(RawLinkage)) { | |||
2902 | NewGV->setComdat(reinterpret_cast<Comdat *>(1)); | |||
2903 | } | |||
2904 | ||||
2905 | if (Record.size() > 12) { | |||
2906 | auto AS = getAttributes(Record[12]).getFnAttributes(); | |||
2907 | NewGV->setAttributes(AS); | |||
2908 | } | |||
2909 | ||||
2910 | if (Record.size() > 13) { | |||
2911 | NewGV->setDSOLocal(getDecodedDSOLocal(Record[13])); | |||
2912 | } | |||
2913 | ||||
2914 | return Error::success(); | |||
2915 | } | |||
2916 | ||||
2917 | Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) { | |||
2918 | // v1: [type, callingconv, isproto, linkage, paramattr, alignment, section, | |||
2919 | // visibility, gc, unnamed_addr, prologuedata, dllstorageclass, comdat, | |||
2920 | // prefixdata, personalityfn, preemption specifier] (name in VST) | |||
2921 | // v2: [strtab_offset, strtab_size, v1] | |||
2922 | StringRef Name; | |||
2923 | std::tie(Name, Record) = readNameFromStrtab(Record); | |||
2924 | ||||
2925 | if (Record.size() < 8) | |||
2926 | return error("Invalid record"); | |||
2927 | Type *Ty = getTypeByID(Record[0]); | |||
2928 | if (!Ty) | |||
2929 | return error("Invalid record"); | |||
2930 | if (auto *PTy = dyn_cast<PointerType>(Ty)) | |||
2931 | Ty = PTy->getElementType(); | |||
2932 | auto *FTy = dyn_cast<FunctionType>(Ty); | |||
2933 | if (!FTy) | |||
2934 | return error("Invalid type for value"); | |||
2935 | auto CC = static_cast<CallingConv::ID>(Record[1]); | |||
2936 | if (CC & ~CallingConv::MaxID) | |||
2937 | return error("Invalid calling convention ID"); | |||
2938 | ||||
2939 | Function *Func = | |||
2940 | Function::Create(FTy, GlobalValue::ExternalLinkage, Name, TheModule); | |||
2941 | ||||
2942 | Func->setCallingConv(CC); | |||
2943 | bool isProto = Record[2]; | |||
2944 | uint64_t RawLinkage = Record[3]; | |||
2945 | Func->setLinkage(getDecodedLinkage(RawLinkage)); | |||
2946 | Func->setAttributes(getAttributes(Record[4])); | |||
2947 | ||||
2948 | unsigned Alignment; | |||
2949 | if (Error Err = parseAlignmentValue(Record[5], Alignment)) | |||
2950 | return Err; | |||
2951 | Func->setAlignment(Alignment); | |||
2952 | if (Record[6]) { | |||
2953 | if (Record[6] - 1 >= SectionTable.size()) | |||
2954 | return error("Invalid ID"); | |||
2955 | Func->setSection(SectionTable[Record[6] - 1]); | |||
2956 | } | |||
2957 | // Local linkage must have default visibility. | |||
2958 | if (!Func->hasLocalLinkage()) | |||
2959 | // FIXME: Change to an error if non-default in 4.0. | |||
2960 | Func->setVisibility(getDecodedVisibility(Record[7])); | |||
2961 | if (Record.size() > 8 && Record[8]) { | |||
2962 | if (Record[8] - 1 >= GCTable.size()) | |||
2963 | return error("Invalid ID"); | |||
2964 | Func->setGC(GCTable[Record[8] - 1]); | |||
2965 | } | |||
2966 | GlobalValue::UnnamedAddr UnnamedAddr = GlobalValue::UnnamedAddr::None; | |||
2967 | if (Record.size() > 9) | |||
2968 | UnnamedAddr = getDecodedUnnamedAddrType(Record[9]); | |||
2969 | Func->setUnnamedAddr(UnnamedAddr); | |||
2970 | if (Record.size() > 10 && Record[10] != 0) | |||
2971 | FunctionPrologues.push_back(std::make_pair(Func, Record[10] - 1)); | |||
2972 | ||||
2973 | if (Record.size() > 11) | |||
2974 | Func->setDLLStorageClass(getDecodedDLLStorageClass(Record[11])); | |||
2975 | else | |||
2976 | upgradeDLLImportExportLinkage(Func, RawLinkage); | |||
2977 | ||||
2978 | if (Record.size() > 12) { | |||
2979 | if (unsigned ComdatID = Record[12]) { | |||
2980 | if (ComdatID > ComdatList.size()) | |||
2981 | return error("Invalid function comdat ID"); | |||
2982 | Func->setComdat(ComdatList[ComdatID - 1]); | |||
2983 | } | |||
2984 | } else if (hasImplicitComdat(RawLinkage)) { | |||
2985 | Func->setComdat(reinterpret_cast<Comdat *>(1)); | |||
2986 | } | |||
2987 | ||||
2988 | if (Record.size() > 13 && Record[13] != 0) | |||
2989 | FunctionPrefixes.push_back(std::make_pair(Func, Record[13] - 1)); | |||
2990 | ||||
2991 | if (Record.size() > 14 && Record[14] != 0) | |||
2992 | FunctionPersonalityFns.push_back(std::make_pair(Func, Record[14] - 1)); | |||
2993 | ||||
2994 | if (Record.size() > 15) { | |||
2995 | Func->setDSOLocal(getDecodedDSOLocal(Record[15])); | |||
2996 | } | |||
2997 | ||||
2998 | ValueList.push_back(Func); | |||
2999 | ||||
3000 | // If this is a function with a body, remember the prototype we are | |||
3001 | // creating now, so that we can match up the body with them later. | |||
3002 | if (!isProto) { | |||
3003 | Func->setIsMaterializable(true); | |||
3004 | FunctionsWithBodies.push_back(Func); | |||
3005 | DeferredFunctionInfo[Func] = 0; | |||
3006 | } | |||
3007 | return Error::success(); | |||
3008 | } | |||
3009 | ||||
3010 | Error BitcodeReader::parseGlobalIndirectSymbolRecord( | |||
3011 | unsigned BitCode, ArrayRef<uint64_t> Record) { | |||
3012 | // v1 ALIAS_OLD: [alias type, aliasee val#, linkage] (name in VST) | |||
3013 | // v1 ALIAS: [alias type, addrspace, aliasee val#, linkage, visibility, | |||
3014 | // dllstorageclass, threadlocal, unnamed_addr, | |||
3015 | // preemption specifier] (name in VST) | |||
3016 | // v1 IFUNC: [alias type, addrspace, aliasee val#, linkage, | |||
3017 | // visibility, dllstorageclass, threadlocal, unnamed_addr, | |||
3018 | // preemption specifier] (name in VST) | |||
3019 | // v2: [strtab_offset, strtab_size, v1] | |||
3020 | StringRef Name; | |||
3021 | std::tie(Name, Record) = readNameFromStrtab(Record); | |||
3022 | ||||
3023 | bool NewRecord = BitCode != bitc::MODULE_CODE_ALIAS_OLD; | |||
3024 | if (Record.size() < (3 + (unsigned)NewRecord)) | |||
3025 | return error("Invalid record"); | |||
3026 | unsigned OpNum = 0; | |||
3027 | Type *Ty = getTypeByID(Record[OpNum++]); | |||
3028 | if (!Ty) | |||
3029 | return error("Invalid record"); | |||
3030 | ||||
3031 | unsigned AddrSpace; | |||
3032 | if (!NewRecord) { | |||
3033 | auto *PTy = dyn_cast<PointerType>(Ty); | |||
3034 | if (!PTy) | |||
3035 | return error("Invalid type for value"); | |||
3036 | Ty = PTy->getElementType(); | |||
3037 | AddrSpace = PTy->getAddressSpace(); | |||
3038 | } else { | |||
3039 | AddrSpace = Record[OpNum++]; | |||
3040 | } | |||
3041 | ||||
3042 | auto Val = Record[OpNum++]; | |||
3043 | auto Linkage = Record[OpNum++]; | |||
3044 | GlobalIndirectSymbol *NewGA; | |||
3045 | if (BitCode == bitc::MODULE_CODE_ALIAS || | |||
3046 | BitCode == bitc::MODULE_CODE_ALIAS_OLD) | |||
3047 | NewGA = GlobalAlias::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name, | |||
3048 | TheModule); | |||
3049 | else | |||
3050 | NewGA = GlobalIFunc::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name, | |||
3051 | nullptr, TheModule); | |||
3052 | // Old bitcode files didn't have visibility field. | |||
3053 | // Local linkage must have default visibility. | |||
3054 | if (OpNum != Record.size()) { | |||
3055 | auto VisInd = OpNum++; | |||
3056 | if (!NewGA->hasLocalLinkage()) | |||
3057 | // FIXME: Change to an error if non-default in 4.0. | |||
3058 | NewGA->setVisibility(getDecodedVisibility(Record[VisInd])); | |||
3059 | } | |||
3060 | if (BitCode == bitc::MODULE_CODE_ALIAS || | |||
3061 | BitCode == bitc::MODULE_CODE_ALIAS_OLD) { | |||
3062 | if (OpNum != Record.size()) | |||
3063 | NewGA->setDLLStorageClass(getDecodedDLLStorageClass(Record[OpNum++])); | |||
3064 | else | |||
3065 | upgradeDLLImportExportLinkage(NewGA, Linkage); | |||
3066 | if (OpNum != Record.size()) | |||
3067 | NewGA->setThreadLocalMode(getDecodedThreadLocalMode(Record[OpNum++])); | |||
3068 | if (OpNum != Record.size()) | |||
3069 | NewGA->setUnnamedAddr(getDecodedUnnamedAddrType(Record[OpNum++])); | |||
3070 | } | |||
3071 | if (OpNum != Record.size()) | |||
3072 | NewGA->setDSOLocal(getDecodedDSOLocal(Record[OpNum++])); | |||
3073 | ValueList.push_back(NewGA); | |||
3074 | IndirectSymbolInits.push_back(std::make_pair(NewGA, Val)); | |||
3075 | return Error::success(); | |||
3076 | } | |||
3077 | ||||
3078 | Error BitcodeReader::parseModule(uint64_t ResumeBit, | |||
3079 | bool ShouldLazyLoadMetadata) { | |||
3080 | if (ResumeBit) | |||
3081 | Stream.JumpToBit(ResumeBit); | |||
3082 | else if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) | |||
3083 | return error("Invalid record"); | |||
3084 | ||||
3085 | SmallVector<uint64_t, 64> Record; | |||
3086 | ||||
3087 | // Read all the records for this module. | |||
3088 | while (true) { | |||
3089 | BitstreamEntry Entry = Stream.advance(); | |||
3090 | ||||
3091 | switch (Entry.Kind) { | |||
3092 | case BitstreamEntry::Error: | |||
3093 | return error("Malformed block"); | |||
3094 | case BitstreamEntry::EndBlock: | |||
3095 | return globalCleanup(); | |||
3096 | ||||
3097 | case BitstreamEntry::SubBlock: | |||
3098 | switch (Entry.ID) { | |||
3099 | default: // Skip unknown content. | |||
3100 | if (Stream.SkipBlock()) | |||
3101 | return error("Invalid record"); | |||
3102 | break; | |||
3103 | case bitc::BLOCKINFO_BLOCK_ID: | |||
3104 | if (readBlockInfo()) | |||
3105 | return error("Malformed block"); | |||
3106 | break; | |||
3107 | case bitc::PARAMATTR_BLOCK_ID: | |||
3108 | if (Error Err = parseAttributeBlock()) | |||
3109 | return Err; | |||
3110 | break; | |||
3111 | case bitc::PARAMATTR_GROUP_BLOCK_ID: | |||
3112 | if (Error Err = parseAttributeGroupBlock()) | |||
3113 | return Err; | |||
3114 | break; | |||
3115 | case bitc::TYPE_BLOCK_ID_NEW: | |||
3116 | if (Error Err = parseTypeTable()) | |||
3117 | return Err; | |||
3118 | break; | |||
3119 | case bitc::VALUE_SYMTAB_BLOCK_ID: | |||
3120 | if (!SeenValueSymbolTable) { | |||
3121 | // Either this is an old form VST without function index and an | |||
3122 | // associated VST forward declaration record (which would have caused | |||
3123 | // the VST to be jumped to and parsed before it was encountered | |||
3124 | // normally in the stream), or there were no function blocks to | |||
3125 | // trigger an earlier parsing of the VST. | |||
3126 | assert(VSTOffset == 0 || FunctionsWithBodies.empty())(static_cast <bool> (VSTOffset == 0 || FunctionsWithBodies .empty()) ? void (0) : __assert_fail ("VSTOffset == 0 || FunctionsWithBodies.empty()" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 3126, __extension__ __PRETTY_FUNCTION__)); | |||
3127 | if (Error Err = parseValueSymbolTable()) | |||
3128 | return Err; | |||
3129 | SeenValueSymbolTable = true; | |||
3130 | } else { | |||
3131 | // We must have had a VST forward declaration record, which caused | |||
3132 | // the parser to jump to and parse the VST earlier. | |||
3133 | assert(VSTOffset > 0)(static_cast <bool> (VSTOffset > 0) ? void (0) : __assert_fail ("VSTOffset > 0", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 3133, __extension__ __PRETTY_FUNCTION__)); | |||
3134 | if (Stream.SkipBlock()) | |||
3135 | return error("Invalid record"); | |||
3136 | } | |||
3137 | break; | |||
3138 | case bitc::CONSTANTS_BLOCK_ID: | |||
3139 | if (Error Err = parseConstants()) | |||
3140 | return Err; | |||
3141 | if (Error Err = resolveGlobalAndIndirectSymbolInits()) | |||
3142 | return Err; | |||
3143 | break; | |||
3144 | case bitc::METADATA_BLOCK_ID: | |||
3145 | if (ShouldLazyLoadMetadata) { | |||
3146 | if (Error Err = rememberAndSkipMetadata()) | |||
3147 | return Err; | |||
3148 | break; | |||
3149 | } | |||
3150 | assert(DeferredMetadataInfo.empty() && "Unexpected deferred metadata")(static_cast <bool> (DeferredMetadataInfo.empty() && "Unexpected deferred metadata") ? void (0) : __assert_fail ( "DeferredMetadataInfo.empty() && \"Unexpected deferred metadata\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 3150, __extension__ __PRETTY_FUNCTION__)); | |||
3151 | if (Error Err = MDLoader->parseModuleMetadata()) | |||
3152 | return Err; | |||
3153 | break; | |||
3154 | case bitc::METADATA_KIND_BLOCK_ID: | |||
3155 | if (Error Err = MDLoader->parseMetadataKinds()) | |||
3156 | return Err; | |||
3157 | break; | |||
3158 | case bitc::FUNCTION_BLOCK_ID: | |||
3159 | // If this is the first function body we've seen, reverse the | |||
3160 | // FunctionsWithBodies list. | |||
3161 | if (!SeenFirstFunctionBody) { | |||
3162 | std::reverse(FunctionsWithBodies.begin(), FunctionsWithBodies.end()); | |||
3163 | if (Error Err = globalCleanup()) | |||
3164 | return Err; | |||
3165 | SeenFirstFunctionBody = true; | |||
3166 | } | |||
3167 | ||||
3168 | if (VSTOffset > 0) { | |||
3169 | // If we have a VST forward declaration record, make sure we | |||
3170 | // parse the VST now if we haven't already. It is needed to | |||
3171 | // set up the DeferredFunctionInfo vector for lazy reading. | |||
3172 | if (!SeenValueSymbolTable) { | |||
3173 | if (Error Err = BitcodeReader::parseValueSymbolTable(VSTOffset)) | |||
3174 | return Err; | |||
3175 | SeenValueSymbolTable = true; | |||
3176 | // Fall through so that we record the NextUnreadBit below. | |||
3177 | // This is necessary in case we have an anonymous function that | |||
3178 | // is later materialized. Since it will not have a VST entry we | |||
3179 | // need to fall back to the lazy parse to find its offset. | |||
3180 | } else { | |||
3181 | // If we have a VST forward declaration record, but have already | |||
3182 | // parsed the VST (just above, when the first function body was | |||
3183 | // encountered here), then we are resuming the parse after | |||
3184 | // materializing functions. The ResumeBit points to the | |||
3185 | // start of the last function block recorded in the | |||
3186 | // DeferredFunctionInfo map. Skip it. | |||
3187 | if (Stream.SkipBlock()) | |||
3188 | return error("Invalid record"); | |||
3189 | continue; | |||
3190 | } | |||
3191 | } | |||
3192 | ||||
3193 | // Support older bitcode files that did not have the function | |||
3194 | // index in the VST, nor a VST forward declaration record, as | |||
3195 | // well as anonymous functions that do not have VST entries. | |||
3196 | // Build the DeferredFunctionInfo vector on the fly. | |||
3197 | if (Error Err = rememberAndSkipFunctionBody()) | |||
3198 | return Err; | |||
3199 | ||||
3200 | // Suspend parsing when we reach the function bodies. Subsequent | |||
3201 | // materialization calls will resume it when necessary. If the bitcode | |||
3202 | // file is old, the symbol table will be at the end instead and will not | |||
3203 | // have been seen yet. In this case, just finish the parse now. | |||
3204 | if (SeenValueSymbolTable) { | |||
3205 | NextUnreadBit = Stream.GetCurrentBitNo(); | |||
3206 | // After the VST has been parsed, we need to make sure intrinsic name | |||
3207 | // are auto-upgraded. | |||
3208 | return globalCleanup(); | |||
3209 | } | |||
3210 | break; | |||
3211 | case bitc::USELIST_BLOCK_ID: | |||
3212 | if (Error Err = parseUseLists()) | |||
3213 | return Err; | |||
3214 | break; | |||
3215 | case bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID: | |||
3216 | if (Error Err = parseOperandBundleTags()) | |||
3217 | return Err; | |||
3218 | break; | |||
3219 | case bitc::SYNC_SCOPE_NAMES_BLOCK_ID: | |||
3220 | if (Error Err = parseSyncScopeNames()) | |||
3221 | return Err; | |||
3222 | break; | |||
3223 | } | |||
3224 | continue; | |||
3225 | ||||
3226 | case BitstreamEntry::Record: | |||
3227 | // The interesting case. | |||
3228 | break; | |||
3229 | } | |||
3230 | ||||
3231 | // Read a record. | |||
3232 | auto BitCode = Stream.readRecord(Entry.ID, Record); | |||
3233 | switch (BitCode) { | |||
3234 | default: break; // Default behavior, ignore unknown content. | |||
3235 | case bitc::MODULE_CODE_VERSION: { | |||
3236 | Expected<unsigned> VersionOrErr = parseVersionRecord(Record); | |||
3237 | if (!VersionOrErr) | |||
3238 | return VersionOrErr.takeError(); | |||
3239 | UseRelativeIDs = *VersionOrErr >= 1; | |||
3240 | break; | |||
3241 | } | |||
3242 | case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N] | |||
3243 | std::string S; | |||
3244 | if (convertToString(Record, 0, S)) | |||
3245 | return error("Invalid record"); | |||
3246 | TheModule->setTargetTriple(S); | |||
3247 | break; | |||
3248 | } | |||
3249 | case bitc::MODULE_CODE_DATALAYOUT: { // DATALAYOUT: [strchr x N] | |||
3250 | std::string S; | |||
3251 | if (convertToString(Record, 0, S)) | |||
3252 | return error("Invalid record"); | |||
3253 | TheModule->setDataLayout(S); | |||
3254 | break; | |||
3255 | } | |||
3256 | case bitc::MODULE_CODE_ASM: { // ASM: [strchr x N] | |||
3257 | std::string S; | |||
3258 | if (convertToString(Record, 0, S)) | |||
3259 | return error("Invalid record"); | |||
3260 | TheModule->setModuleInlineAsm(S); | |||
3261 | break; | |||
3262 | } | |||
3263 | case bitc::MODULE_CODE_DEPLIB: { // DEPLIB: [strchr x N] | |||
3264 | // FIXME: Remove in 4.0. | |||
3265 | std::string S; | |||
3266 | if (convertToString(Record, 0, S)) | |||
3267 | return error("Invalid record"); | |||
3268 | // Ignore value. | |||
3269 | break; | |||
3270 | } | |||
3271 | case bitc::MODULE_CODE_SECTIONNAME: { // SECTIONNAME: [strchr x N] | |||
3272 | std::string S; | |||
3273 | if (convertToString(Record, 0, S)) | |||
3274 | return error("Invalid record"); | |||
3275 | SectionTable.push_back(S); | |||
3276 | break; | |||
3277 | } | |||
3278 | case bitc::MODULE_CODE_GCNAME: { // SECTIONNAME: [strchr x N] | |||
3279 | std::string S; | |||
3280 | if (convertToString(Record, 0, S)) | |||
3281 | return error("Invalid record"); | |||
3282 | GCTable.push_back(S); | |||
3283 | break; | |||
3284 | } | |||
3285 | case bitc::MODULE_CODE_COMDAT: | |||
3286 | if (Error Err = parseComdatRecord(Record)) | |||
3287 | return Err; | |||
3288 | break; | |||
3289 | case bitc::MODULE_CODE_GLOBALVAR: | |||
3290 | if (Error Err = parseGlobalVarRecord(Record)) | |||
3291 | return Err; | |||
3292 | break; | |||
3293 | case bitc::MODULE_CODE_FUNCTION: | |||
3294 | if (Error Err = parseFunctionRecord(Record)) | |||
3295 | return Err; | |||
3296 | break; | |||
3297 | case bitc::MODULE_CODE_IFUNC: | |||
3298 | case bitc::MODULE_CODE_ALIAS: | |||
3299 | case bitc::MODULE_CODE_ALIAS_OLD: | |||
3300 | if (Error Err = parseGlobalIndirectSymbolRecord(BitCode, Record)) | |||
3301 | return Err; | |||
3302 | break; | |||
3303 | /// MODULE_CODE_VSTOFFSET: [offset] | |||
3304 | case bitc::MODULE_CODE_VSTOFFSET: | |||
3305 | if (Record.size() < 1) | |||
3306 | return error("Invalid record"); | |||
3307 | // Note that we subtract 1 here because the offset is relative to one word | |||
3308 | // before the start of the identification or module block, which was | |||
3309 | // historically always the start of the regular bitcode header. | |||
3310 | VSTOffset = Record[0] - 1; | |||
3311 | break; | |||
3312 | /// MODULE_CODE_SOURCE_FILENAME: [namechar x N] | |||
3313 | case bitc::MODULE_CODE_SOURCE_FILENAME: | |||
3314 | SmallString<128> ValueName; | |||
3315 | if (convertToString(Record, 0, ValueName)) | |||
3316 | return error("Invalid record"); | |||
3317 | TheModule->setSourceFileName(ValueName); | |||
3318 | break; | |||
3319 | } | |||
3320 | Record.clear(); | |||
3321 | } | |||
3322 | } | |||
3323 | ||||
3324 | Error BitcodeReader::parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata, | |||
3325 | bool IsImporting) { | |||
3326 | TheModule = M; | |||
3327 | MDLoader = MetadataLoader(Stream, *M, ValueList, IsImporting, | |||
3328 | [&](unsigned ID) { return getTypeByID(ID); }); | |||
3329 | return parseModule(0, ShouldLazyLoadMetadata); | |||
3330 | } | |||
3331 | ||||
3332 | Error BitcodeReader::typeCheckLoadStoreInst(Type *ValType, Type *PtrType) { | |||
3333 | if (!isa<PointerType>(PtrType)) | |||
3334 | return error("Load/Store operand is not a pointer type"); | |||
3335 | Type *ElemType = cast<PointerType>(PtrType)->getElementType(); | |||
3336 | ||||
3337 | if (ValType && ValType != ElemType) | |||
3338 | return error("Explicit load/store type does not match pointee " | |||
3339 | "type of pointer operand"); | |||
3340 | if (!PointerType::isLoadableOrStorableType(ElemType)) | |||
3341 | return error("Cannot load/store from pointer"); | |||
3342 | return Error::success(); | |||
3343 | } | |||
3344 | ||||
3345 | /// Lazily parse the specified function body block. | |||
3346 | Error BitcodeReader::parseFunctionBody(Function *F) { | |||
3347 | if (Stream.EnterSubBlock(bitc::FUNCTION_BLOCK_ID)) | |||
3348 | return error("Invalid record"); | |||
3349 | ||||
3350 | // Unexpected unresolved metadata when parsing function. | |||
3351 | if (MDLoader->hasFwdRefs()) | |||
3352 | return error("Invalid function metadata: incoming forward references"); | |||
3353 | ||||
3354 | InstructionList.clear(); | |||
3355 | unsigned ModuleValueListSize = ValueList.size(); | |||
3356 | unsigned ModuleMDLoaderSize = MDLoader->size(); | |||
3357 | ||||
3358 | // Add all the function arguments to the value table. | |||
3359 | for (Argument &I : F->args()) | |||
3360 | ValueList.push_back(&I); | |||
3361 | ||||
3362 | unsigned NextValueNo = ValueList.size(); | |||
3363 | BasicBlock *CurBB = nullptr; | |||
3364 | unsigned CurBBNo = 0; | |||
3365 | ||||
3366 | DebugLoc LastLoc; | |||
3367 | auto getLastInstruction = [&]() -> Instruction * { | |||
3368 | if (CurBB && !CurBB->empty()) | |||
3369 | return &CurBB->back(); | |||
3370 | else if (CurBBNo && FunctionBBs[CurBBNo - 1] && | |||
3371 | !FunctionBBs[CurBBNo - 1]->empty()) | |||
3372 | return &FunctionBBs[CurBBNo - 1]->back(); | |||
3373 | return nullptr; | |||
3374 | }; | |||
3375 | ||||
3376 | std::vector<OperandBundleDef> OperandBundles; | |||
3377 | ||||
3378 | // Read all the records. | |||
3379 | SmallVector<uint64_t, 64> Record; | |||
3380 | ||||
3381 | while (true) { | |||
3382 | BitstreamEntry Entry = Stream.advance(); | |||
3383 | ||||
3384 | switch (Entry.Kind) { | |||
3385 | case BitstreamEntry::Error: | |||
3386 | return error("Malformed block"); | |||
3387 | case BitstreamEntry::EndBlock: | |||
3388 | goto OutOfRecordLoop; | |||
3389 | ||||
3390 | case BitstreamEntry::SubBlock: | |||
3391 | switch (Entry.ID) { | |||
3392 | default: // Skip unknown content. | |||
3393 | if (Stream.SkipBlock()) | |||
3394 | return error("Invalid record"); | |||
3395 | break; | |||
3396 | case bitc::CONSTANTS_BLOCK_ID: | |||
3397 | if (Error Err = parseConstants()) | |||
3398 | return Err; | |||
3399 | NextValueNo = ValueList.size(); | |||
3400 | break; | |||
3401 | case bitc::VALUE_SYMTAB_BLOCK_ID: | |||
3402 | if (Error Err = parseValueSymbolTable()) | |||
3403 | return Err; | |||
3404 | break; | |||
3405 | case bitc::METADATA_ATTACHMENT_ID: | |||
3406 | if (Error Err = MDLoader->parseMetadataAttachment(*F, InstructionList)) | |||
3407 | return Err; | |||
3408 | break; | |||
3409 | case bitc::METADATA_BLOCK_ID: | |||
3410 | assert(DeferredMetadataInfo.empty() &&(static_cast <bool> (DeferredMetadataInfo.empty() && "Must read all module-level metadata before function-level") ? void (0) : __assert_fail ("DeferredMetadataInfo.empty() && \"Must read all module-level metadata before function-level\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 3411, __extension__ __PRETTY_FUNCTION__)) | |||
3411 | "Must read all module-level metadata before function-level")(static_cast <bool> (DeferredMetadataInfo.empty() && "Must read all module-level metadata before function-level") ? void (0) : __assert_fail ("DeferredMetadataInfo.empty() && \"Must read all module-level metadata before function-level\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 3411, __extension__ __PRETTY_FUNCTION__)); | |||
3412 | if (Error Err = MDLoader->parseFunctionMetadata()) | |||
3413 | return Err; | |||
3414 | break; | |||
3415 | case bitc::USELIST_BLOCK_ID: | |||
3416 | if (Error Err = parseUseLists()) | |||
3417 | return Err; | |||
3418 | break; | |||
3419 | } | |||
3420 | continue; | |||
3421 | ||||
3422 | case BitstreamEntry::Record: | |||
3423 | // The interesting case. | |||
3424 | break; | |||
3425 | } | |||
3426 | ||||
3427 | // Read a record. | |||
3428 | Record.clear(); | |||
3429 | Instruction *I = nullptr; | |||
3430 | unsigned BitCode = Stream.readRecord(Entry.ID, Record); | |||
3431 | switch (BitCode) { | |||
3432 | default: // Default behavior: reject | |||
3433 | return error("Invalid value"); | |||
3434 | case bitc::FUNC_CODE_DECLAREBLOCKS: { // DECLAREBLOCKS: [nblocks] | |||
3435 | if (Record.size() < 1 || Record[0] == 0) | |||
3436 | return error("Invalid record"); | |||
3437 | // Create all the basic blocks for the function. | |||
3438 | FunctionBBs.resize(Record[0]); | |||
3439 | ||||
3440 | // See if anything took the address of blocks in this function. | |||
3441 | auto BBFRI = BasicBlockFwdRefs.find(F); | |||
3442 | if (BBFRI == BasicBlockFwdRefs.end()) { | |||
3443 | for (unsigned i = 0, e = FunctionBBs.size(); i != e; ++i) | |||
3444 | FunctionBBs[i] = BasicBlock::Create(Context, "", F); | |||
3445 | } else { | |||
3446 | auto &BBRefs = BBFRI->second; | |||
3447 | // Check for invalid basic block references. | |||
3448 | if (BBRefs.size() > FunctionBBs.size()) | |||
3449 | return error("Invalid ID"); | |||
3450 | assert(!BBRefs.empty() && "Unexpected empty array")(static_cast <bool> (!BBRefs.empty() && "Unexpected empty array" ) ? void (0) : __assert_fail ("!BBRefs.empty() && \"Unexpected empty array\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 3450, __extension__ __PRETTY_FUNCTION__)); | |||
3451 | assert(!BBRefs.front() && "Invalid reference to entry block")(static_cast <bool> (!BBRefs.front() && "Invalid reference to entry block" ) ? void (0) : __assert_fail ("!BBRefs.front() && \"Invalid reference to entry block\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 3451, __extension__ __PRETTY_FUNCTION__)); | |||
3452 | for (unsigned I = 0, E = FunctionBBs.size(), RE = BBRefs.size(); I != E; | |||
3453 | ++I) | |||
3454 | if (I < RE && BBRefs[I]) { | |||
3455 | BBRefs[I]->insertInto(F); | |||
3456 | FunctionBBs[I] = BBRefs[I]; | |||
3457 | } else { | |||
3458 | FunctionBBs[I] = BasicBlock::Create(Context, "", F); | |||
3459 | } | |||
3460 | ||||
3461 | // Erase from the table. | |||
3462 | BasicBlockFwdRefs.erase(BBFRI); | |||
3463 | } | |||
3464 | ||||
3465 | CurBB = FunctionBBs[0]; | |||
3466 | continue; | |||
3467 | } | |||
3468 | ||||
3469 | case bitc::FUNC_CODE_DEBUG_LOC_AGAIN: // DEBUG_LOC_AGAIN | |||
3470 | // This record indicates that the last instruction is at the same | |||
3471 | // location as the previous instruction with a location. | |||
3472 | I = getLastInstruction(); | |||
3473 | ||||
3474 | if (!I) | |||
3475 | return error("Invalid record"); | |||
3476 | I->setDebugLoc(LastLoc); | |||
3477 | I = nullptr; | |||
3478 | continue; | |||
3479 | ||||
3480 | case bitc::FUNC_CODE_DEBUG_LOC: { // DEBUG_LOC: [line, col, scope, ia] | |||
3481 | I = getLastInstruction(); | |||
3482 | if (!I || Record.size() < 4) | |||
3483 | return error("Invalid record"); | |||
3484 | ||||
3485 | unsigned Line = Record[0], Col = Record[1]; | |||
3486 | unsigned ScopeID = Record[2], IAID = Record[3]; | |||
3487 | ||||
3488 | MDNode *Scope = nullptr, *IA = nullptr; | |||
3489 | if (ScopeID) { | |||
3490 | Scope = MDLoader->getMDNodeFwdRefOrNull(ScopeID - 1); | |||
3491 | if (!Scope) | |||
3492 | return error("Invalid record"); | |||
3493 | } | |||
3494 | if (IAID) { | |||
3495 | IA = MDLoader->getMDNodeFwdRefOrNull(IAID - 1); | |||
3496 | if (!IA) | |||
3497 | return error("Invalid record"); | |||
3498 | } | |||
3499 | LastLoc = DebugLoc::get(Line, Col, Scope, IA); | |||
3500 | I->setDebugLoc(LastLoc); | |||
3501 | I = nullptr; | |||
3502 | continue; | |||
3503 | } | |||
3504 | ||||
3505 | case bitc::FUNC_CODE_INST_BINOP: { // BINOP: [opval, ty, opval, opcode] | |||
3506 | unsigned OpNum = 0; | |||
3507 | Value *LHS, *RHS; | |||
3508 | if (getValueTypePair(Record, OpNum, NextValueNo, LHS) || | |||
3509 | popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) || | |||
3510 | OpNum+1 > Record.size()) | |||
3511 | return error("Invalid record"); | |||
3512 | ||||
3513 | int Opc = getDecodedBinaryOpcode(Record[OpNum++], LHS->getType()); | |||
3514 | if (Opc == -1) | |||
3515 | return error("Invalid record"); | |||
3516 | I = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); | |||
3517 | InstructionList.push_back(I); | |||
3518 | if (OpNum < Record.size()) { | |||
3519 | if (Opc == Instruction::Add || | |||
3520 | Opc == Instruction::Sub || | |||
3521 | Opc == Instruction::Mul || | |||
3522 | Opc == Instruction::Shl) { | |||
3523 | if (Record[OpNum] & (1 << bitc::OBO_NO_SIGNED_WRAP)) | |||
3524 | cast<BinaryOperator>(I)->setHasNoSignedWrap(true); | |||
3525 | if (Record[OpNum] & (1 << bitc::OBO_NO_UNSIGNED_WRAP)) | |||
3526 | cast<BinaryOperator>(I)->setHasNoUnsignedWrap(true); | |||
3527 | } else if (Opc == Instruction::SDiv || | |||
3528 | Opc == Instruction::UDiv || | |||
3529 | Opc == Instruction::LShr || | |||
3530 | Opc == Instruction::AShr) { | |||
3531 | if (Record[OpNum] & (1 << bitc::PEO_EXACT)) | |||
3532 | cast<BinaryOperator>(I)->setIsExact(true); | |||
3533 | } else if (isa<FPMathOperator>(I)) { | |||
3534 | FastMathFlags FMF = getDecodedFastMathFlags(Record[OpNum]); | |||
3535 | if (FMF.any()) | |||
3536 | I->setFastMathFlags(FMF); | |||
3537 | } | |||
3538 | ||||
3539 | } | |||
3540 | break; | |||
3541 | } | |||
3542 | case bitc::FUNC_CODE_INST_CAST: { // CAST: [opval, opty, destty, castopc] | |||
3543 | unsigned OpNum = 0; | |||
3544 | Value *Op; | |||
3545 | if (getValueTypePair(Record, OpNum, NextValueNo, Op) || | |||
3546 | OpNum+2 != Record.size()) | |||
3547 | return error("Invalid record"); | |||
3548 | ||||
3549 | Type *ResTy = getTypeByID(Record[OpNum]); | |||
3550 | int Opc = getDecodedCastOpcode(Record[OpNum + 1]); | |||
3551 | if (Opc == -1 || !ResTy) | |||
3552 | return error("Invalid record"); | |||
3553 | Instruction *Temp = nullptr; | |||
3554 | if ((I = UpgradeBitCastInst(Opc, Op, ResTy, Temp))) { | |||
3555 | if (Temp) { | |||
3556 | InstructionList.push_back(Temp); | |||
3557 | CurBB->getInstList().push_back(Temp); | |||
3558 | } | |||
3559 | } else { | |||
3560 | auto CastOp = (Instruction::CastOps)Opc; | |||
3561 | if (!CastInst::castIsValid(CastOp, Op, ResTy)) | |||
3562 | return error("Invalid cast"); | |||
3563 | I = CastInst::Create(CastOp, Op, ResTy); | |||
3564 | } | |||
3565 | InstructionList.push_back(I); | |||
3566 | break; | |||
3567 | } | |||
3568 | case bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD: | |||
3569 | case bitc::FUNC_CODE_INST_GEP_OLD: | |||
3570 | case bitc::FUNC_CODE_INST_GEP: { // GEP: type, [n x operands] | |||
3571 | unsigned OpNum = 0; | |||
3572 | ||||
3573 | Type *Ty; | |||
3574 | bool InBounds; | |||
3575 | ||||
3576 | if (BitCode == bitc::FUNC_CODE_INST_GEP) { | |||
3577 | InBounds = Record[OpNum++]; | |||
3578 | Ty = getTypeByID(Record[OpNum++]); | |||
3579 | } else { | |||
3580 | InBounds = BitCode == bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD; | |||
3581 | Ty = nullptr; | |||
3582 | } | |||
3583 | ||||
3584 | Value *BasePtr; | |||
3585 | if (getValueTypePair(Record, OpNum, NextValueNo, BasePtr)) | |||
3586 | return error("Invalid record"); | |||
3587 | ||||
3588 | if (!Ty) | |||
3589 | Ty = cast<PointerType>(BasePtr->getType()->getScalarType()) | |||
3590 | ->getElementType(); | |||
3591 | else if (Ty != | |||
3592 | cast<PointerType>(BasePtr->getType()->getScalarType()) | |||
3593 | ->getElementType()) | |||
3594 | return error( | |||
3595 | "Explicit gep type does not match pointee type of pointer operand"); | |||
3596 | ||||
3597 | SmallVector<Value*, 16> GEPIdx; | |||
3598 | while (OpNum != Record.size()) { | |||
3599 | Value *Op; | |||
3600 | if (getValueTypePair(Record, OpNum, NextValueNo, Op)) | |||
3601 | return error("Invalid record"); | |||
3602 | GEPIdx.push_back(Op); | |||
3603 | } | |||
3604 | ||||
3605 | I = GetElementPtrInst::Create(Ty, BasePtr, GEPIdx); | |||
3606 | ||||
3607 | InstructionList.push_back(I); | |||
3608 | if (InBounds) | |||
3609 | cast<GetElementPtrInst>(I)->setIsInBounds(true); | |||
3610 | break; | |||
3611 | } | |||
3612 | ||||
3613 | case bitc::FUNC_CODE_INST_EXTRACTVAL: { | |||
3614 | // EXTRACTVAL: [opty, opval, n x indices] | |||
3615 | unsigned OpNum = 0; | |||
3616 | Value *Agg; | |||
3617 | if (getValueTypePair(Record, OpNum, NextValueNo, Agg)) | |||
3618 | return error("Invalid record"); | |||
3619 | ||||
3620 | unsigned RecSize = Record.size(); | |||
3621 | if (OpNum == RecSize) | |||
3622 | return error("EXTRACTVAL: Invalid instruction with 0 indices"); | |||
3623 | ||||
3624 | SmallVector<unsigned, 4> EXTRACTVALIdx; | |||
3625 | Type *CurTy = Agg->getType(); | |||
3626 | for (; OpNum != RecSize; ++OpNum) { | |||
3627 | bool IsArray = CurTy->isArrayTy(); | |||
3628 | bool IsStruct = CurTy->isStructTy(); | |||
3629 | uint64_t Index = Record[OpNum]; | |||
3630 | ||||
3631 | if (!IsStruct && !IsArray) | |||
3632 | return error("EXTRACTVAL: Invalid type"); | |||
3633 | if ((unsigned)Index != Index) | |||
3634 | return error("Invalid value"); | |||
3635 | if (IsStruct && Index >= CurTy->subtypes().size()) | |||
3636 | return error("EXTRACTVAL: Invalid struct index"); | |||
3637 | if (IsArray && Index >= CurTy->getArrayNumElements()) | |||
3638 | return error("EXTRACTVAL: Invalid array index"); | |||
3639 | EXTRACTVALIdx.push_back((unsigned)Index); | |||
3640 | ||||
3641 | if (IsStruct) | |||
3642 | CurTy = CurTy->subtypes()[Index]; | |||
3643 | else | |||
3644 | CurTy = CurTy->subtypes()[0]; | |||
3645 | } | |||
3646 | ||||
3647 | I = ExtractValueInst::Create(Agg, EXTRACTVALIdx); | |||
3648 | InstructionList.push_back(I); | |||
3649 | break; | |||
3650 | } | |||
3651 | ||||
3652 | case bitc::FUNC_CODE_INST_INSERTVAL: { | |||
3653 | // INSERTVAL: [opty, opval, opty, opval, n x indices] | |||
3654 | unsigned OpNum = 0; | |||
3655 | Value *Agg; | |||
3656 | if (getValueTypePair(Record, OpNum, NextValueNo, Agg)) | |||
3657 | return error("Invalid record"); | |||
3658 | Value *Val; | |||
3659 | if (getValueTypePair(Record, OpNum, NextValueNo, Val)) | |||
3660 | return error("Invalid record"); | |||
3661 | ||||
3662 | unsigned RecSize = Record.size(); | |||
3663 | if (OpNum == RecSize) | |||
3664 | return error("INSERTVAL: Invalid instruction with 0 indices"); | |||
3665 | ||||
3666 | SmallVector<unsigned, 4> INSERTVALIdx; | |||
3667 | Type *CurTy = Agg->getType(); | |||
3668 | for (; OpNum != RecSize; ++OpNum) { | |||
3669 | bool IsArray = CurTy->isArrayTy(); | |||
3670 | bool IsStruct = CurTy->isStructTy(); | |||
3671 | uint64_t Index = Record[OpNum]; | |||
3672 | ||||
3673 | if (!IsStruct && !IsArray) | |||
3674 | return error("INSERTVAL: Invalid type"); | |||
3675 | if ((unsigned)Index != Index) | |||
3676 | return error("Invalid value"); | |||
3677 | if (IsStruct && Index >= CurTy->subtypes().size()) | |||
3678 | return error("INSERTVAL: Invalid struct index"); | |||
3679 | if (IsArray && Index >= CurTy->getArrayNumElements()) | |||
3680 | return error("INSERTVAL: Invalid array index"); | |||
3681 | ||||
3682 | INSERTVALIdx.push_back((unsigned)Index); | |||
3683 | if (IsStruct) | |||
3684 | CurTy = CurTy->subtypes()[Index]; | |||
3685 | else | |||
3686 | CurTy = CurTy->subtypes()[0]; | |||
3687 | } | |||
3688 | ||||
3689 | if (CurTy != Val->getType()) | |||
3690 | return error("Inserted value type doesn't match aggregate type"); | |||
3691 | ||||
3692 | I = InsertValueInst::Create(Agg, Val, INSERTVALIdx); | |||
3693 | InstructionList.push_back(I); | |||
3694 | break; | |||
3695 | } | |||
3696 | ||||
3697 | case bitc::FUNC_CODE_INST_SELECT: { // SELECT: [opval, ty, opval, opval] | |||
3698 | // obsolete form of select | |||
3699 | // handles select i1 ... in old bitcode | |||
3700 | unsigned OpNum = 0; | |||
3701 | Value *TrueVal, *FalseVal, *Cond; | |||
3702 | if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) || | |||
3703 | popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) || | |||
3704 | popValue(Record, OpNum, NextValueNo, Type::getInt1Ty(Context), Cond)) | |||
3705 | return error("Invalid record"); | |||
3706 | ||||
3707 | I = SelectInst::Create(Cond, TrueVal, FalseVal); | |||
3708 | InstructionList.push_back(I); | |||
3709 | break; | |||
3710 | } | |||
3711 | ||||
3712 | case bitc::FUNC_CODE_INST_VSELECT: {// VSELECT: [ty,opval,opval,predty,pred] | |||
3713 | // new form of select | |||
3714 | // handles select i1 or select [N x i1] | |||
3715 | unsigned OpNum = 0; | |||
3716 | Value *TrueVal, *FalseVal, *Cond; | |||
3717 | if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) || | |||
3718 | popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) || | |||
3719 | getValueTypePair(Record, OpNum, NextValueNo, Cond)) | |||
3720 | return error("Invalid record"); | |||
3721 | ||||
3722 | // select condition can be either i1 or [N x i1] | |||
3723 | if (VectorType* vector_type = | |||
3724 | dyn_cast<VectorType>(Cond->getType())) { | |||
3725 | // expect <n x i1> | |||
3726 | if (vector_type->getElementType() != Type::getInt1Ty(Context)) | |||
3727 | return error("Invalid type for value"); | |||
3728 | } else { | |||
3729 | // expect i1 | |||
3730 | if (Cond->getType() != Type::getInt1Ty(Context)) | |||
3731 | return error("Invalid type for value"); | |||
3732 | } | |||
3733 | ||||
3734 | I = SelectInst::Create(Cond, TrueVal, FalseVal); | |||
3735 | InstructionList.push_back(I); | |||
3736 | break; | |||
3737 | } | |||
3738 | ||||
3739 | case bitc::FUNC_CODE_INST_EXTRACTELT: { // EXTRACTELT: [opty, opval, opval] | |||
3740 | unsigned OpNum = 0; | |||
3741 | Value *Vec, *Idx; | |||
3742 | if (getValueTypePair(Record, OpNum, NextValueNo, Vec) || | |||
3743 | getValueTypePair(Record, OpNum, NextValueNo, Idx)) | |||
3744 | return error("Invalid record"); | |||
3745 | if (!Vec->getType()->isVectorTy()) | |||
3746 | return error("Invalid type for value"); | |||
3747 | I = ExtractElementInst::Create(Vec, Idx); | |||
3748 | InstructionList.push_back(I); | |||
3749 | break; | |||
3750 | } | |||
3751 | ||||
3752 | case bitc::FUNC_CODE_INST_INSERTELT: { // INSERTELT: [ty, opval,opval,opval] | |||
3753 | unsigned OpNum = 0; | |||
3754 | Value *Vec, *Elt, *Idx; | |||
3755 | if (getValueTypePair(Record, OpNum, NextValueNo, Vec)) | |||
3756 | return error("Invalid record"); | |||
3757 | if (!Vec->getType()->isVectorTy()) | |||
3758 | return error("Invalid type for value"); | |||
3759 | if (popValue(Record, OpNum, NextValueNo, | |||
3760 | cast<VectorType>(Vec->getType())->getElementType(), Elt) || | |||
3761 | getValueTypePair(Record, OpNum, NextValueNo, Idx)) | |||
3762 | return error("Invalid record"); | |||
3763 | I = InsertElementInst::Create(Vec, Elt, Idx); | |||
3764 | InstructionList.push_back(I); | |||
3765 | break; | |||
3766 | } | |||
3767 | ||||
3768 | case bitc::FUNC_CODE_INST_SHUFFLEVEC: {// SHUFFLEVEC: [opval,ty,opval,opval] | |||
3769 | unsigned OpNum = 0; | |||
3770 | Value *Vec1, *Vec2, *Mask; | |||
3771 | if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) || | |||
3772 | popValue(Record, OpNum, NextValueNo, Vec1->getType(), Vec2)) | |||
3773 | return error("Invalid record"); | |||
3774 | ||||
3775 | if (getValueTypePair(Record, OpNum, NextValueNo, Mask)) | |||
3776 | return error("Invalid record"); | |||
3777 | if (!Vec1->getType()->isVectorTy() || !Vec2->getType()->isVectorTy()) | |||
3778 | return error("Invalid type for value"); | |||
3779 | I = new ShuffleVectorInst(Vec1, Vec2, Mask); | |||
3780 | InstructionList.push_back(I); | |||
3781 | break; | |||
3782 | } | |||
3783 | ||||
3784 | case bitc::FUNC_CODE_INST_CMP: // CMP: [opty, opval, opval, pred] | |||
3785 | // Old form of ICmp/FCmp returning bool | |||
3786 | // Existed to differentiate between icmp/fcmp and vicmp/vfcmp which were | |||
3787 | // both legal on vectors but had different behaviour. | |||
3788 | case bitc::FUNC_CODE_INST_CMP2: { // CMP2: [opty, opval, opval, pred] | |||
3789 | // FCmp/ICmp returning bool or vector of bool | |||
3790 | ||||
3791 | unsigned OpNum = 0; | |||
3792 | Value *LHS, *RHS; | |||
3793 | if (getValueTypePair(Record, OpNum, NextValueNo, LHS) || | |||
3794 | popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS)) | |||
3795 | return error("Invalid record"); | |||
3796 | ||||
3797 | unsigned PredVal = Record[OpNum]; | |||
3798 | bool IsFP = LHS->getType()->isFPOrFPVectorTy(); | |||
3799 | FastMathFlags FMF; | |||
3800 | if (IsFP && Record.size() > OpNum+1) | |||
3801 | FMF = getDecodedFastMathFlags(Record[++OpNum]); | |||
3802 | ||||
3803 | if (OpNum+1 != Record.size()) | |||
3804 | return error("Invalid record"); | |||
3805 | ||||
3806 | if (LHS->getType()->isFPOrFPVectorTy()) | |||
3807 | I = new FCmpInst((FCmpInst::Predicate)PredVal, LHS, RHS); | |||
3808 | else | |||
3809 | I = new ICmpInst((ICmpInst::Predicate)PredVal, LHS, RHS); | |||
3810 | ||||
3811 | if (FMF.any()) | |||
3812 | I->setFastMathFlags(FMF); | |||
3813 | InstructionList.push_back(I); | |||
3814 | break; | |||
3815 | } | |||
3816 | ||||
3817 | case bitc::FUNC_CODE_INST_RET: // RET: [opty,opval<optional>] | |||
3818 | { | |||
3819 | unsigned Size = Record.size(); | |||
3820 | if (Size == 0) { | |||
3821 | I = ReturnInst::Create(Context); | |||
3822 | InstructionList.push_back(I); | |||
3823 | break; | |||
3824 | } | |||
3825 | ||||
3826 | unsigned OpNum = 0; | |||
3827 | Value *Op = nullptr; | |||
3828 | if (getValueTypePair(Record, OpNum, NextValueNo, Op)) | |||
3829 | return error("Invalid record"); | |||
3830 | if (OpNum != Record.size()) | |||
3831 | return error("Invalid record"); | |||
3832 | ||||
3833 | I = ReturnInst::Create(Context, Op); | |||
3834 | InstructionList.push_back(I); | |||
3835 | break; | |||
3836 | } | |||
3837 | case bitc::FUNC_CODE_INST_BR: { // BR: [bb#, bb#, opval] or [bb#] | |||
3838 | if (Record.size() != 1 && Record.size() != 3) | |||
3839 | return error("Invalid record"); | |||
3840 | BasicBlock *TrueDest = getBasicBlock(Record[0]); | |||
3841 | if (!TrueDest) | |||
3842 | return error("Invalid record"); | |||
3843 | ||||
3844 | if (Record.size() == 1) { | |||
3845 | I = BranchInst::Create(TrueDest); | |||
3846 | InstructionList.push_back(I); | |||
3847 | } | |||
3848 | else { | |||
3849 | BasicBlock *FalseDest = getBasicBlock(Record[1]); | |||
3850 | Value *Cond = getValue(Record, 2, NextValueNo, | |||
3851 | Type::getInt1Ty(Context)); | |||
3852 | if (!FalseDest || !Cond) | |||
3853 | return error("Invalid record"); | |||
3854 | I = BranchInst::Create(TrueDest, FalseDest, Cond); | |||
3855 | InstructionList.push_back(I); | |||
3856 | } | |||
3857 | break; | |||
3858 | } | |||
3859 | case bitc::FUNC_CODE_INST_CLEANUPRET: { // CLEANUPRET: [val] or [val,bb#] | |||
3860 | if (Record.size() != 1 && Record.size() != 2) | |||
3861 | return error("Invalid record"); | |||
3862 | unsigned Idx = 0; | |||
3863 | Value *CleanupPad = | |||
3864 | getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context)); | |||
3865 | if (!CleanupPad) | |||
3866 | return error("Invalid record"); | |||
3867 | BasicBlock *UnwindDest = nullptr; | |||
3868 | if (Record.size() == 2) { | |||
3869 | UnwindDest = getBasicBlock(Record[Idx++]); | |||
3870 | if (!UnwindDest) | |||
3871 | return error("Invalid record"); | |||
3872 | } | |||
3873 | ||||
3874 | I = CleanupReturnInst::Create(CleanupPad, UnwindDest); | |||
3875 | InstructionList.push_back(I); | |||
3876 | break; | |||
3877 | } | |||
3878 | case bitc::FUNC_CODE_INST_CATCHRET: { // CATCHRET: [val,bb#] | |||
3879 | if (Record.size() != 2) | |||
3880 | return error("Invalid record"); | |||
3881 | unsigned Idx = 0; | |||
3882 | Value *CatchPad = | |||
3883 | getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context)); | |||
3884 | if (!CatchPad) | |||
3885 | return error("Invalid record"); | |||
3886 | BasicBlock *BB = getBasicBlock(Record[Idx++]); | |||
3887 | if (!BB) | |||
3888 | return error("Invalid record"); | |||
3889 | ||||
3890 | I = CatchReturnInst::Create(CatchPad, BB); | |||
3891 | InstructionList.push_back(I); | |||
3892 | break; | |||
3893 | } | |||
3894 | case bitc::FUNC_CODE_INST_CATCHSWITCH: { // CATCHSWITCH: [tok,num,(bb)*,bb?] | |||
3895 | // We must have, at minimum, the outer scope and the number of arguments. | |||
3896 | if (Record.size() < 2) | |||
3897 | return error("Invalid record"); | |||
3898 | ||||
3899 | unsigned Idx = 0; | |||
3900 | ||||
3901 | Value *ParentPad = | |||
3902 | getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context)); | |||
3903 | ||||
3904 | unsigned NumHandlers = Record[Idx++]; | |||
3905 | ||||
3906 | SmallVector<BasicBlock *, 2> Handlers; | |||
3907 | for (unsigned Op = 0; Op != NumHandlers; ++Op) { | |||
3908 | BasicBlock *BB = getBasicBlock(Record[Idx++]); | |||
3909 | if (!BB) | |||
3910 | return error("Invalid record"); | |||
3911 | Handlers.push_back(BB); | |||
3912 | } | |||
3913 | ||||
3914 | BasicBlock *UnwindDest = nullptr; | |||
3915 | if (Idx + 1 == Record.size()) { | |||
3916 | UnwindDest = getBasicBlock(Record[Idx++]); | |||
3917 | if (!UnwindDest) | |||
3918 | return error("Invalid record"); | |||
3919 | } | |||
3920 | ||||
3921 | if (Record.size() != Idx) | |||
3922 | return error("Invalid record"); | |||
3923 | ||||
3924 | auto *CatchSwitch = | |||
3925 | CatchSwitchInst::Create(ParentPad, UnwindDest, NumHandlers); | |||
3926 | for (BasicBlock *Handler : Handlers) | |||
3927 | CatchSwitch->addHandler(Handler); | |||
3928 | I = CatchSwitch; | |||
3929 | InstructionList.push_back(I); | |||
3930 | break; | |||
3931 | } | |||
3932 | case bitc::FUNC_CODE_INST_CATCHPAD: | |||
3933 | case bitc::FUNC_CODE_INST_CLEANUPPAD: { // [tok,num,(ty,val)*] | |||
3934 | // We must have, at minimum, the outer scope and the number of arguments. | |||
3935 | if (Record.size() < 2) | |||
3936 | return error("Invalid record"); | |||
3937 | ||||
3938 | unsigned Idx = 0; | |||
3939 | ||||
3940 | Value *ParentPad = | |||
3941 | getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context)); | |||
3942 | ||||
3943 | unsigned NumArgOperands = Record[Idx++]; | |||
3944 | ||||
3945 | SmallVector<Value *, 2> Args; | |||
3946 | for (unsigned Op = 0; Op != NumArgOperands; ++Op) { | |||
3947 | Value *Val; | |||
3948 | if (getValueTypePair(Record, Idx, NextValueNo, Val)) | |||
3949 | return error("Invalid record"); | |||
3950 | Args.push_back(Val); | |||
3951 | } | |||
3952 | ||||
3953 | if (Record.size() != Idx) | |||
3954 | return error("Invalid record"); | |||
3955 | ||||
3956 | if (BitCode == bitc::FUNC_CODE_INST_CLEANUPPAD) | |||
3957 | I = CleanupPadInst::Create(ParentPad, Args); | |||
3958 | else | |||
3959 | I = CatchPadInst::Create(ParentPad, Args); | |||
3960 | InstructionList.push_back(I); | |||
3961 | break; | |||
3962 | } | |||
3963 | case bitc::FUNC_CODE_INST_SWITCH: { // SWITCH: [opty, op0, op1, ...] | |||
3964 | // Check magic | |||
3965 | if ((Record[0] >> 16) == SWITCH_INST_MAGIC) { | |||
3966 | // "New" SwitchInst format with case ranges. The changes to write this | |||
3967 | // format were reverted but we still recognize bitcode that uses it. | |||
3968 | // Hopefully someday we will have support for case ranges and can use | |||
3969 | // this format again. | |||
3970 | ||||
3971 | Type *OpTy = getTypeByID(Record[1]); | |||
3972 | unsigned ValueBitWidth = cast<IntegerType>(OpTy)->getBitWidth(); | |||
3973 | ||||
3974 | Value *Cond = getValue(Record, 2, NextValueNo, OpTy); | |||
3975 | BasicBlock *Default = getBasicBlock(Record[3]); | |||
3976 | if (!OpTy || !Cond || !Default) | |||
3977 | return error("Invalid record"); | |||
3978 | ||||
3979 | unsigned NumCases = Record[4]; | |||
3980 | ||||
3981 | SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases); | |||
3982 | InstructionList.push_back(SI); | |||
3983 | ||||
3984 | unsigned CurIdx = 5; | |||
3985 | for (unsigned i = 0; i != NumCases; ++i) { | |||
3986 | SmallVector<ConstantInt*, 1> CaseVals; | |||
3987 | unsigned NumItems = Record[CurIdx++]; | |||
3988 | for (unsigned ci = 0; ci != NumItems; ++ci) { | |||
3989 | bool isSingleNumber = Record[CurIdx++]; | |||
3990 | ||||
3991 | APInt Low; | |||
3992 | unsigned ActiveWords = 1; | |||
3993 | if (ValueBitWidth > 64) | |||
3994 | ActiveWords = Record[CurIdx++]; | |||
3995 | Low = readWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords), | |||
3996 | ValueBitWidth); | |||
3997 | CurIdx += ActiveWords; | |||
3998 | ||||
3999 | if (!isSingleNumber) { | |||
4000 | ActiveWords = 1; | |||
4001 | if (ValueBitWidth > 64) | |||
4002 | ActiveWords = Record[CurIdx++]; | |||
4003 | APInt High = readWideAPInt( | |||
4004 | makeArrayRef(&Record[CurIdx], ActiveWords), ValueBitWidth); | |||
4005 | CurIdx += ActiveWords; | |||
4006 | ||||
4007 | // FIXME: It is not clear whether values in the range should be | |||
4008 | // compared as signed or unsigned values. The partially | |||
4009 | // implemented changes that used this format in the past used | |||
4010 | // unsigned comparisons. | |||
4011 | for ( ; Low.ule(High); ++Low) | |||
4012 | CaseVals.push_back(ConstantInt::get(Context, Low)); | |||
4013 | } else | |||
4014 | CaseVals.push_back(ConstantInt::get(Context, Low)); | |||
4015 | } | |||
4016 | BasicBlock *DestBB = getBasicBlock(Record[CurIdx++]); | |||
4017 | for (SmallVector<ConstantInt*, 1>::iterator cvi = CaseVals.begin(), | |||
4018 | cve = CaseVals.end(); cvi != cve; ++cvi) | |||
4019 | SI->addCase(*cvi, DestBB); | |||
4020 | } | |||
4021 | I = SI; | |||
4022 | break; | |||
4023 | } | |||
4024 | ||||
4025 | // Old SwitchInst format without case ranges. | |||
4026 | ||||
4027 | if (Record.size() < 3 || (Record.size() & 1) == 0) | |||
4028 | return error("Invalid record"); | |||
4029 | Type *OpTy = getTypeByID(Record[0]); | |||
4030 | Value *Cond = getValue(Record, 1, NextValueNo, OpTy); | |||
4031 | BasicBlock *Default = getBasicBlock(Record[2]); | |||
4032 | if (!OpTy || !Cond || !Default) | |||
4033 | return error("Invalid record"); | |||
4034 | unsigned NumCases = (Record.size()-3)/2; | |||
4035 | SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases); | |||
4036 | InstructionList.push_back(SI); | |||
4037 | for (unsigned i = 0, e = NumCases; i != e; ++i) { | |||
4038 | ConstantInt *CaseVal = | |||
4039 | dyn_cast_or_null<ConstantInt>(getFnValueByID(Record[3+i*2], OpTy)); | |||
4040 | BasicBlock *DestBB = getBasicBlock(Record[1+3+i*2]); | |||
4041 | if (!CaseVal || !DestBB) { | |||
4042 | delete SI; | |||
4043 | return error("Invalid record"); | |||
4044 | } | |||
4045 | SI->addCase(CaseVal, DestBB); | |||
4046 | } | |||
4047 | I = SI; | |||
4048 | break; | |||
4049 | } | |||
4050 | case bitc::FUNC_CODE_INST_INDIRECTBR: { // INDIRECTBR: [opty, op0, op1, ...] | |||
4051 | if (Record.size() < 2) | |||
4052 | return error("Invalid record"); | |||
4053 | Type *OpTy = getTypeByID(Record[0]); | |||
4054 | Value *Address = getValue(Record, 1, NextValueNo, OpTy); | |||
4055 | if (!OpTy || !Address) | |||
4056 | return error("Invalid record"); | |||
4057 | unsigned NumDests = Record.size()-2; | |||
4058 | IndirectBrInst *IBI = IndirectBrInst::Create(Address, NumDests); | |||
4059 | InstructionList.push_back(IBI); | |||
4060 | for (unsigned i = 0, e = NumDests; i != e; ++i) { | |||
4061 | if (BasicBlock *DestBB = getBasicBlock(Record[2+i])) { | |||
4062 | IBI->addDestination(DestBB); | |||
4063 | } else { | |||
4064 | delete IBI; | |||
4065 | return error("Invalid record"); | |||
4066 | } | |||
4067 | } | |||
4068 | I = IBI; | |||
4069 | break; | |||
4070 | } | |||
4071 | ||||
4072 | case bitc::FUNC_CODE_INST_INVOKE: { | |||
4073 | // INVOKE: [attrs, cc, normBB, unwindBB, fnty, op0,op1,op2, ...] | |||
4074 | if (Record.size() < 4) | |||
4075 | return error("Invalid record"); | |||
4076 | unsigned OpNum = 0; | |||
4077 | AttributeList PAL = getAttributes(Record[OpNum++]); | |||
4078 | unsigned CCInfo = Record[OpNum++]; | |||
4079 | BasicBlock *NormalBB = getBasicBlock(Record[OpNum++]); | |||
4080 | BasicBlock *UnwindBB = getBasicBlock(Record[OpNum++]); | |||
4081 | ||||
4082 | FunctionType *FTy = nullptr; | |||
4083 | if (CCInfo >> 13 & 1 && | |||
4084 | !(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++])))) | |||
4085 | return error("Explicit invoke type is not a function type"); | |||
4086 | ||||
4087 | Value *Callee; | |||
4088 | if (getValueTypePair(Record, OpNum, NextValueNo, Callee)) | |||
4089 | return error("Invalid record"); | |||
4090 | ||||
4091 | PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType()); | |||
4092 | if (!CalleeTy) | |||
4093 | return error("Callee is not a pointer"); | |||
4094 | if (!FTy) { | |||
4095 | FTy = dyn_cast<FunctionType>(CalleeTy->getElementType()); | |||
4096 | if (!FTy) | |||
4097 | return error("Callee is not of pointer to function type"); | |||
4098 | } else if (CalleeTy->getElementType() != FTy) | |||
4099 | return error("Explicit invoke type does not match pointee type of " | |||
4100 | "callee operand"); | |||
4101 | if (Record.size() < FTy->getNumParams() + OpNum) | |||
4102 | return error("Insufficient operands to call"); | |||
4103 | ||||
4104 | SmallVector<Value*, 16> Ops; | |||
4105 | for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) { | |||
4106 | Ops.push_back(getValue(Record, OpNum, NextValueNo, | |||
4107 | FTy->getParamType(i))); | |||
4108 | if (!Ops.back()) | |||
4109 | return error("Invalid record"); | |||
4110 | } | |||
4111 | ||||
4112 | if (!FTy->isVarArg()) { | |||
4113 | if (Record.size() != OpNum) | |||
4114 | return error("Invalid record"); | |||
4115 | } else { | |||
4116 | // Read type/value pairs for varargs params. | |||
4117 | while (OpNum != Record.size()) { | |||
4118 | Value *Op; | |||
4119 | if (getValueTypePair(Record, OpNum, NextValueNo, Op)) | |||
4120 | return error("Invalid record"); | |||
4121 | Ops.push_back(Op); | |||
4122 | } | |||
4123 | } | |||
4124 | ||||
4125 | I = InvokeInst::Create(Callee, NormalBB, UnwindBB, Ops, OperandBundles); | |||
4126 | OperandBundles.clear(); | |||
4127 | InstructionList.push_back(I); | |||
4128 | cast<InvokeInst>(I)->setCallingConv( | |||
4129 | static_cast<CallingConv::ID>(CallingConv::MaxID & CCInfo)); | |||
4130 | cast<InvokeInst>(I)->setAttributes(PAL); | |||
4131 | break; | |||
4132 | } | |||
4133 | case bitc::FUNC_CODE_INST_RESUME: { // RESUME: [opval] | |||
4134 | unsigned Idx = 0; | |||
4135 | Value *Val = nullptr; | |||
4136 | if (getValueTypePair(Record, Idx, NextValueNo, Val)) | |||
4137 | return error("Invalid record"); | |||
4138 | I = ResumeInst::Create(Val); | |||
4139 | InstructionList.push_back(I); | |||
4140 | break; | |||
4141 | } | |||
4142 | case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE | |||
4143 | I = new UnreachableInst(Context); | |||
4144 | InstructionList.push_back(I); | |||
4145 | break; | |||
4146 | case bitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...] | |||
4147 | if (Record.size() < 1 || ((Record.size()-1)&1)) | |||
4148 | return error("Invalid record"); | |||
4149 | Type *Ty = getTypeByID(Record[0]); | |||
4150 | if (!Ty) | |||
4151 | return error("Invalid record"); | |||
4152 | ||||
4153 | PHINode *PN = PHINode::Create(Ty, (Record.size()-1)/2); | |||
4154 | InstructionList.push_back(PN); | |||
4155 | ||||
4156 | for (unsigned i = 0, e = Record.size()-1; i != e; i += 2) { | |||
4157 | Value *V; | |||
4158 | // With the new function encoding, it is possible that operands have | |||
4159 | // negative IDs (for forward references). Use a signed VBR | |||
4160 | // representation to keep the encoding small. | |||
4161 | if (UseRelativeIDs) | |||
4162 | V = getValueSigned(Record, 1+i, NextValueNo, Ty); | |||
4163 | else | |||
4164 | V = getValue(Record, 1+i, NextValueNo, Ty); | |||
4165 | BasicBlock *BB = getBasicBlock(Record[2+i]); | |||
4166 | if (!V || !BB) | |||
4167 | return error("Invalid record"); | |||
4168 | PN->addIncoming(V, BB); | |||
4169 | } | |||
4170 | I = PN; | |||
4171 | break; | |||
4172 | } | |||
4173 | ||||
4174 | case bitc::FUNC_CODE_INST_LANDINGPAD: | |||
4175 | case bitc::FUNC_CODE_INST_LANDINGPAD_OLD: { | |||
4176 | // LANDINGPAD: [ty, val, val, num, (id0,val0 ...)?] | |||
4177 | unsigned Idx = 0; | |||
4178 | if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD) { | |||
4179 | if (Record.size() < 3) | |||
4180 | return error("Invalid record"); | |||
4181 | } else { | |||
4182 | assert(BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD)(static_cast <bool> (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD ) ? void (0) : __assert_fail ("BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4182, __extension__ __PRETTY_FUNCTION__)); | |||
4183 | if (Record.size() < 4) | |||
4184 | return error("Invalid record"); | |||
4185 | } | |||
4186 | Type *Ty = getTypeByID(Record[Idx++]); | |||
4187 | if (!Ty) | |||
4188 | return error("Invalid record"); | |||
4189 | if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD) { | |||
4190 | Value *PersFn = nullptr; | |||
4191 | if (getValueTypePair(Record, Idx, NextValueNo, PersFn)) | |||
4192 | return error("Invalid record"); | |||
4193 | ||||
4194 | if (!F->hasPersonalityFn()) | |||
4195 | F->setPersonalityFn(cast<Constant>(PersFn)); | |||
4196 | else if (F->getPersonalityFn() != cast<Constant>(PersFn)) | |||
4197 | return error("Personality function mismatch"); | |||
4198 | } | |||
4199 | ||||
4200 | bool IsCleanup = !!Record[Idx++]; | |||
4201 | unsigned NumClauses = Record[Idx++]; | |||
4202 | LandingPadInst *LP = LandingPadInst::Create(Ty, NumClauses); | |||
4203 | LP->setCleanup(IsCleanup); | |||
4204 | for (unsigned J = 0; J != NumClauses; ++J) { | |||
4205 | LandingPadInst::ClauseType CT = | |||
4206 | LandingPadInst::ClauseType(Record[Idx++]); (void)CT; | |||
4207 | Value *Val; | |||
4208 | ||||
4209 | if (getValueTypePair(Record, Idx, NextValueNo, Val)) { | |||
4210 | delete LP; | |||
4211 | return error("Invalid record"); | |||
4212 | } | |||
4213 | ||||
4214 | assert((CT != LandingPadInst::Catch ||(static_cast <bool> ((CT != LandingPadInst::Catch || !isa <ArrayType>(Val->getType())) && "Catch clause has a invalid type!" ) ? void (0) : __assert_fail ("(CT != LandingPadInst::Catch || !isa<ArrayType>(Val->getType())) && \"Catch clause has a invalid type!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4216, __extension__ __PRETTY_FUNCTION__)) | |||
4215 | !isa<ArrayType>(Val->getType())) &&(static_cast <bool> ((CT != LandingPadInst::Catch || !isa <ArrayType>(Val->getType())) && "Catch clause has a invalid type!" ) ? void (0) : __assert_fail ("(CT != LandingPadInst::Catch || !isa<ArrayType>(Val->getType())) && \"Catch clause has a invalid type!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4216, __extension__ __PRETTY_FUNCTION__)) | |||
4216 | "Catch clause has a invalid type!")(static_cast <bool> ((CT != LandingPadInst::Catch || !isa <ArrayType>(Val->getType())) && "Catch clause has a invalid type!" ) ? void (0) : __assert_fail ("(CT != LandingPadInst::Catch || !isa<ArrayType>(Val->getType())) && \"Catch clause has a invalid type!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4216, __extension__ __PRETTY_FUNCTION__)); | |||
4217 | assert((CT != LandingPadInst::Filter ||(static_cast <bool> ((CT != LandingPadInst::Filter || isa <ArrayType>(Val->getType())) && "Filter clause has invalid type!" ) ? void (0) : __assert_fail ("(CT != LandingPadInst::Filter || isa<ArrayType>(Val->getType())) && \"Filter clause has invalid type!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4219, __extension__ __PRETTY_FUNCTION__)) | |||
4218 | isa<ArrayType>(Val->getType())) &&(static_cast <bool> ((CT != LandingPadInst::Filter || isa <ArrayType>(Val->getType())) && "Filter clause has invalid type!" ) ? void (0) : __assert_fail ("(CT != LandingPadInst::Filter || isa<ArrayType>(Val->getType())) && \"Filter clause has invalid type!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4219, __extension__ __PRETTY_FUNCTION__)) | |||
4219 | "Filter clause has invalid type!")(static_cast <bool> ((CT != LandingPadInst::Filter || isa <ArrayType>(Val->getType())) && "Filter clause has invalid type!" ) ? void (0) : __assert_fail ("(CT != LandingPadInst::Filter || isa<ArrayType>(Val->getType())) && \"Filter clause has invalid type!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4219, __extension__ __PRETTY_FUNCTION__)); | |||
4220 | LP->addClause(cast<Constant>(Val)); | |||
4221 | } | |||
4222 | ||||
4223 | I = LP; | |||
4224 | InstructionList.push_back(I); | |||
4225 | break; | |||
4226 | } | |||
4227 | ||||
4228 | case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align] | |||
4229 | if (Record.size() != 4) | |||
4230 | return error("Invalid record"); | |||
4231 | uint64_t AlignRecord = Record[3]; | |||
4232 | const uint64_t InAllocaMask = uint64_t(1) << 5; | |||
4233 | const uint64_t ExplicitTypeMask = uint64_t(1) << 6; | |||
4234 | const uint64_t SwiftErrorMask = uint64_t(1) << 7; | |||
4235 | const uint64_t FlagMask = InAllocaMask | ExplicitTypeMask | | |||
4236 | SwiftErrorMask; | |||
4237 | bool InAlloca = AlignRecord & InAllocaMask; | |||
4238 | bool SwiftError = AlignRecord & SwiftErrorMask; | |||
4239 | Type *Ty = getTypeByID(Record[0]); | |||
4240 | if ((AlignRecord & ExplicitTypeMask) == 0) { | |||
4241 | auto *PTy = dyn_cast_or_null<PointerType>(Ty); | |||
4242 | if (!PTy) | |||
4243 | return error("Old-style alloca with a non-pointer type"); | |||
4244 | Ty = PTy->getElementType(); | |||
4245 | } | |||
4246 | Type *OpTy = getTypeByID(Record[1]); | |||
4247 | Value *Size = getFnValueByID(Record[2], OpTy); | |||
4248 | unsigned Align; | |||
4249 | if (Error Err = parseAlignmentValue(AlignRecord & ~FlagMask, Align)) { | |||
4250 | return Err; | |||
4251 | } | |||
4252 | if (!Ty || !Size) | |||
4253 | return error("Invalid record"); | |||
4254 | ||||
4255 | // FIXME: Make this an optional field. | |||
4256 | const DataLayout &DL = TheModule->getDataLayout(); | |||
4257 | unsigned AS = DL.getAllocaAddrSpace(); | |||
4258 | ||||
4259 | AllocaInst *AI = new AllocaInst(Ty, AS, Size, Align); | |||
4260 | AI->setUsedWithInAlloca(InAlloca); | |||
4261 | AI->setSwiftError(SwiftError); | |||
4262 | I = AI; | |||
4263 | InstructionList.push_back(I); | |||
4264 | break; | |||
4265 | } | |||
4266 | case bitc::FUNC_CODE_INST_LOAD: { // LOAD: [opty, op, align, vol] | |||
4267 | unsigned OpNum = 0; | |||
4268 | Value *Op; | |||
4269 | if (getValueTypePair(Record, OpNum, NextValueNo, Op) || | |||
4270 | (OpNum + 2 != Record.size() && OpNum + 3 != Record.size())) | |||
4271 | return error("Invalid record"); | |||
4272 | ||||
4273 | Type *Ty = nullptr; | |||
4274 | if (OpNum + 3 == Record.size()) | |||
4275 | Ty = getTypeByID(Record[OpNum++]); | |||
4276 | if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType())) | |||
4277 | return Err; | |||
4278 | if (!Ty) | |||
4279 | Ty = cast<PointerType>(Op->getType())->getElementType(); | |||
4280 | ||||
4281 | unsigned Align; | |||
4282 | if (Error Err = parseAlignmentValue(Record[OpNum], Align)) | |||
4283 | return Err; | |||
4284 | I = new LoadInst(Ty, Op, "", Record[OpNum + 1], Align); | |||
4285 | ||||
4286 | InstructionList.push_back(I); | |||
4287 | break; | |||
4288 | } | |||
4289 | case bitc::FUNC_CODE_INST_LOADATOMIC: { | |||
4290 | // LOADATOMIC: [opty, op, align, vol, ordering, ssid] | |||
4291 | unsigned OpNum = 0; | |||
4292 | Value *Op; | |||
4293 | if (getValueTypePair(Record, OpNum, NextValueNo, Op) || | |||
4294 | (OpNum + 4 != Record.size() && OpNum + 5 != Record.size())) | |||
4295 | return error("Invalid record"); | |||
4296 | ||||
4297 | Type *Ty = nullptr; | |||
4298 | if (OpNum + 5 == Record.size()) | |||
4299 | Ty = getTypeByID(Record[OpNum++]); | |||
4300 | if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType())) | |||
4301 | return Err; | |||
4302 | if (!Ty) | |||
4303 | Ty = cast<PointerType>(Op->getType())->getElementType(); | |||
4304 | ||||
4305 | AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); | |||
4306 | if (Ordering == AtomicOrdering::NotAtomic || | |||
4307 | Ordering == AtomicOrdering::Release || | |||
4308 | Ordering == AtomicOrdering::AcquireRelease) | |||
4309 | return error("Invalid record"); | |||
4310 | if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) | |||
4311 | return error("Invalid record"); | |||
4312 | SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]); | |||
4313 | ||||
4314 | unsigned Align; | |||
4315 | if (Error Err = parseAlignmentValue(Record[OpNum], Align)) | |||
4316 | return Err; | |||
4317 | I = new LoadInst(Op, "", Record[OpNum+1], Align, Ordering, SSID); | |||
4318 | ||||
4319 | InstructionList.push_back(I); | |||
4320 | break; | |||
4321 | } | |||
4322 | case bitc::FUNC_CODE_INST_STORE: | |||
4323 | case bitc::FUNC_CODE_INST_STORE_OLD: { // STORE2:[ptrty, ptr, val, align, vol] | |||
4324 | unsigned OpNum = 0; | |||
4325 | Value *Val, *Ptr; | |||
4326 | if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || | |||
4327 | (BitCode == bitc::FUNC_CODE_INST_STORE | |||
4328 | ? getValueTypePair(Record, OpNum, NextValueNo, Val) | |||
4329 | : popValue(Record, OpNum, NextValueNo, | |||
4330 | cast<PointerType>(Ptr->getType())->getElementType(), | |||
4331 | Val)) || | |||
4332 | OpNum + 2 != Record.size()) | |||
4333 | return error("Invalid record"); | |||
4334 | ||||
4335 | if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType())) | |||
4336 | return Err; | |||
4337 | unsigned Align; | |||
4338 | if (Error Err = parseAlignmentValue(Record[OpNum], Align)) | |||
4339 | return Err; | |||
4340 | I = new StoreInst(Val, Ptr, Record[OpNum+1], Align); | |||
4341 | InstructionList.push_back(I); | |||
4342 | break; | |||
4343 | } | |||
4344 | case bitc::FUNC_CODE_INST_STOREATOMIC: | |||
4345 | case bitc::FUNC_CODE_INST_STOREATOMIC_OLD: { | |||
4346 | // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, ssid] | |||
4347 | unsigned OpNum = 0; | |||
4348 | Value *Val, *Ptr; | |||
4349 | if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || | |||
4350 | !isa<PointerType>(Ptr->getType()) || | |||
4351 | (BitCode == bitc::FUNC_CODE_INST_STOREATOMIC | |||
4352 | ? getValueTypePair(Record, OpNum, NextValueNo, Val) | |||
4353 | : popValue(Record, OpNum, NextValueNo, | |||
4354 | cast<PointerType>(Ptr->getType())->getElementType(), | |||
4355 | Val)) || | |||
4356 | OpNum + 4 != Record.size()) | |||
4357 | return error("Invalid record"); | |||
4358 | ||||
4359 | if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType())) | |||
4360 | return Err; | |||
4361 | AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); | |||
4362 | if (Ordering == AtomicOrdering::NotAtomic || | |||
4363 | Ordering == AtomicOrdering::Acquire || | |||
4364 | Ordering == AtomicOrdering::AcquireRelease) | |||
4365 | return error("Invalid record"); | |||
4366 | SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]); | |||
4367 | if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) | |||
4368 | return error("Invalid record"); | |||
4369 | ||||
4370 | unsigned Align; | |||
4371 | if (Error Err = parseAlignmentValue(Record[OpNum], Align)) | |||
4372 | return Err; | |||
4373 | I = new StoreInst(Val, Ptr, Record[OpNum+1], Align, Ordering, SSID); | |||
4374 | InstructionList.push_back(I); | |||
4375 | break; | |||
4376 | } | |||
4377 | case bitc::FUNC_CODE_INST_CMPXCHG_OLD: | |||
4378 | case bitc::FUNC_CODE_INST_CMPXCHG: { | |||
4379 | // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, ssid, | |||
4380 | // failureordering?, isweak?] | |||
4381 | unsigned OpNum = 0; | |||
4382 | Value *Ptr, *Cmp, *New; | |||
4383 | if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || | |||
4384 | (BitCode == bitc::FUNC_CODE_INST_CMPXCHG | |||
4385 | ? getValueTypePair(Record, OpNum, NextValueNo, Cmp) | |||
4386 | : popValue(Record, OpNum, NextValueNo, | |||
4387 | cast<PointerType>(Ptr->getType())->getElementType(), | |||
4388 | Cmp)) || | |||
4389 | popValue(Record, OpNum, NextValueNo, Cmp->getType(), New) || | |||
4390 | Record.size() < OpNum + 3 || Record.size() > OpNum + 5) | |||
4391 | return error("Invalid record"); | |||
4392 | AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]); | |||
4393 | if (SuccessOrdering == AtomicOrdering::NotAtomic || | |||
4394 | SuccessOrdering == AtomicOrdering::Unordered) | |||
4395 | return error("Invalid record"); | |||
4396 | SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 2]); | |||
4397 | ||||
4398 | if (Error Err = typeCheckLoadStoreInst(Cmp->getType(), Ptr->getType())) | |||
4399 | return Err; | |||
4400 | AtomicOrdering FailureOrdering; | |||
4401 | if (Record.size() < 7) | |||
4402 | FailureOrdering = | |||
4403 | AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering); | |||
4404 | else | |||
4405 | FailureOrdering = getDecodedOrdering(Record[OpNum + 3]); | |||
4406 | ||||
4407 | I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering, | |||
4408 | SSID); | |||
4409 | cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]); | |||
4410 | ||||
4411 | if (Record.size() < 8) { | |||
4412 | // Before weak cmpxchgs existed, the instruction simply returned the | |||
4413 | // value loaded from memory, so bitcode files from that era will be | |||
4414 | // expecting the first component of a modern cmpxchg. | |||
4415 | CurBB->getInstList().push_back(I); | |||
4416 | I = ExtractValueInst::Create(I, 0); | |||
4417 | } else { | |||
4418 | cast<AtomicCmpXchgInst>(I)->setWeak(Record[OpNum+4]); | |||
4419 | } | |||
4420 | ||||
4421 | InstructionList.push_back(I); | |||
4422 | break; | |||
4423 | } | |||
4424 | case bitc::FUNC_CODE_INST_ATOMICRMW: { | |||
4425 | // ATOMICRMW:[ptrty, ptr, val, op, vol, ordering, ssid] | |||
4426 | unsigned OpNum = 0; | |||
4427 | Value *Ptr, *Val; | |||
4428 | if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) || | |||
4429 | !isa<PointerType>(Ptr->getType()) || | |||
4430 | popValue(Record, OpNum, NextValueNo, | |||
4431 | cast<PointerType>(Ptr->getType())->getElementType(), Val) || | |||
4432 | OpNum+4 != Record.size()) | |||
4433 | return error("Invalid record"); | |||
4434 | AtomicRMWInst::BinOp Operation = getDecodedRMWOperation(Record[OpNum]); | |||
4435 | if (Operation < AtomicRMWInst::FIRST_BINOP || | |||
4436 | Operation > AtomicRMWInst::LAST_BINOP) | |||
4437 | return error("Invalid record"); | |||
4438 | AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); | |||
4439 | if (Ordering == AtomicOrdering::NotAtomic || | |||
4440 | Ordering == AtomicOrdering::Unordered) | |||
4441 | return error("Invalid record"); | |||
4442 | SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]); | |||
4443 | I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SSID); | |||
4444 | cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]); | |||
4445 | InstructionList.push_back(I); | |||
4446 | break; | |||
4447 | } | |||
4448 | case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, ssid] | |||
4449 | if (2 != Record.size()) | |||
4450 | return error("Invalid record"); | |||
4451 | AtomicOrdering Ordering = getDecodedOrdering(Record[0]); | |||
4452 | if (Ordering == AtomicOrdering::NotAtomic || | |||
4453 | Ordering == AtomicOrdering::Unordered || | |||
4454 | Ordering == AtomicOrdering::Monotonic) | |||
4455 | return error("Invalid record"); | |||
4456 | SyncScope::ID SSID = getDecodedSyncScopeID(Record[1]); | |||
4457 | I = new FenceInst(Context, Ordering, SSID); | |||
4458 | InstructionList.push_back(I); | |||
4459 | break; | |||
4460 | } | |||
4461 | case bitc::FUNC_CODE_INST_CALL: { | |||
4462 | // CALL: [paramattrs, cc, fmf, fnty, fnid, arg0, arg1...] | |||
4463 | if (Record.size() < 3) | |||
4464 | return error("Invalid record"); | |||
4465 | ||||
4466 | unsigned OpNum = 0; | |||
4467 | AttributeList PAL = getAttributes(Record[OpNum++]); | |||
4468 | unsigned CCInfo = Record[OpNum++]; | |||
4469 | ||||
4470 | FastMathFlags FMF; | |||
4471 | if ((CCInfo >> bitc::CALL_FMF) & 1) { | |||
4472 | FMF = getDecodedFastMathFlags(Record[OpNum++]); | |||
4473 | if (!FMF.any()) | |||
4474 | return error("Fast math flags indicator set for call with no FMF"); | |||
4475 | } | |||
4476 | ||||
4477 | FunctionType *FTy = nullptr; | |||
4478 | if (CCInfo >> bitc::CALL_EXPLICIT_TYPE & 1 && | |||
4479 | !(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++])))) | |||
4480 | return error("Explicit call type is not a function type"); | |||
4481 | ||||
4482 | Value *Callee; | |||
4483 | if (getValueTypePair(Record, OpNum, NextValueNo, Callee)) | |||
4484 | return error("Invalid record"); | |||
4485 | ||||
4486 | PointerType *OpTy = dyn_cast<PointerType>(Callee->getType()); | |||
4487 | if (!OpTy) | |||
4488 | return error("Callee is not a pointer type"); | |||
4489 | if (!FTy) { | |||
4490 | FTy = dyn_cast<FunctionType>(OpTy->getElementType()); | |||
4491 | if (!FTy) | |||
4492 | return error("Callee is not of pointer to function type"); | |||
4493 | } else if (OpTy->getElementType() != FTy) | |||
4494 | return error("Explicit call type does not match pointee type of " | |||
4495 | "callee operand"); | |||
4496 | if (Record.size() < FTy->getNumParams() + OpNum) | |||
4497 | return error("Insufficient operands to call"); | |||
4498 | ||||
4499 | SmallVector<Value*, 16> Args; | |||
4500 | // Read the fixed params. | |||
4501 | for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) { | |||
4502 | if (FTy->getParamType(i)->isLabelTy()) | |||
4503 | Args.push_back(getBasicBlock(Record[OpNum])); | |||
4504 | else | |||
4505 | Args.push_back(getValue(Record, OpNum, NextValueNo, | |||
4506 | FTy->getParamType(i))); | |||
4507 | if (!Args.back()) | |||
4508 | return error("Invalid record"); | |||
4509 | } | |||
4510 | ||||
4511 | // Read type/value pairs for varargs params. | |||
4512 | if (!FTy->isVarArg()) { | |||
4513 | if (OpNum != Record.size()) | |||
4514 | return error("Invalid record"); | |||
4515 | } else { | |||
4516 | while (OpNum != Record.size()) { | |||
4517 | Value *Op; | |||
4518 | if (getValueTypePair(Record, OpNum, NextValueNo, Op)) | |||
4519 | return error("Invalid record"); | |||
4520 | Args.push_back(Op); | |||
4521 | } | |||
4522 | } | |||
4523 | ||||
4524 | I = CallInst::Create(FTy, Callee, Args, OperandBundles); | |||
4525 | OperandBundles.clear(); | |||
4526 | InstructionList.push_back(I); | |||
4527 | cast<CallInst>(I)->setCallingConv( | |||
4528 | static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV)); | |||
4529 | CallInst::TailCallKind TCK = CallInst::TCK_None; | |||
4530 | if (CCInfo & 1 << bitc::CALL_TAIL) | |||
4531 | TCK = CallInst::TCK_Tail; | |||
4532 | if (CCInfo & (1 << bitc::CALL_MUSTTAIL)) | |||
4533 | TCK = CallInst::TCK_MustTail; | |||
4534 | if (CCInfo & (1 << bitc::CALL_NOTAIL)) | |||
4535 | TCK = CallInst::TCK_NoTail; | |||
4536 | cast<CallInst>(I)->setTailCallKind(TCK); | |||
4537 | cast<CallInst>(I)->setAttributes(PAL); | |||
4538 | if (FMF.any()) { | |||
4539 | if (!isa<FPMathOperator>(I)) | |||
4540 | return error("Fast-math-flags specified for call without " | |||
4541 | "floating-point scalar or vector return type"); | |||
4542 | I->setFastMathFlags(FMF); | |||
4543 | } | |||
4544 | break; | |||
4545 | } | |||
4546 | case bitc::FUNC_CODE_INST_VAARG: { // VAARG: [valistty, valist, instty] | |||
4547 | if (Record.size() < 3) | |||
4548 | return error("Invalid record"); | |||
4549 | Type *OpTy = getTypeByID(Record[0]); | |||
4550 | Value *Op = getValue(Record, 1, NextValueNo, OpTy); | |||
4551 | Type *ResTy = getTypeByID(Record[2]); | |||
4552 | if (!OpTy || !Op || !ResTy) | |||
4553 | return error("Invalid record"); | |||
4554 | I = new VAArgInst(Op, ResTy); | |||
4555 | InstructionList.push_back(I); | |||
4556 | break; | |||
4557 | } | |||
4558 | ||||
4559 | case bitc::FUNC_CODE_OPERAND_BUNDLE: { | |||
4560 | // A call or an invoke can be optionally prefixed with some variable | |||
4561 | // number of operand bundle blocks. These blocks are read into | |||
4562 | // OperandBundles and consumed at the next call or invoke instruction. | |||
4563 | ||||
4564 | if (Record.size() < 1 || Record[0] >= BundleTags.size()) | |||
4565 | return error("Invalid record"); | |||
4566 | ||||
4567 | std::vector<Value *> Inputs; | |||
4568 | ||||
4569 | unsigned OpNum = 1; | |||
4570 | while (OpNum != Record.size()) { | |||
4571 | Value *Op; | |||
4572 | if (getValueTypePair(Record, OpNum, NextValueNo, Op)) | |||
4573 | return error("Invalid record"); | |||
4574 | Inputs.push_back(Op); | |||
4575 | } | |||
4576 | ||||
4577 | OperandBundles.emplace_back(BundleTags[Record[0]], std::move(Inputs)); | |||
4578 | continue; | |||
4579 | } | |||
4580 | } | |||
4581 | ||||
4582 | // Add instruction to end of current BB. If there is no current BB, reject | |||
4583 | // this file. | |||
4584 | if (!CurBB) { | |||
4585 | I->deleteValue(); | |||
4586 | return error("Invalid instruction with no BB"); | |||
4587 | } | |||
4588 | if (!OperandBundles.empty()) { | |||
4589 | I->deleteValue(); | |||
4590 | return error("Operand bundles found with no consumer"); | |||
4591 | } | |||
4592 | CurBB->getInstList().push_back(I); | |||
4593 | ||||
4594 | // If this was a terminator instruction, move to the next block. | |||
4595 | if (isa<TerminatorInst>(I)) { | |||
4596 | ++CurBBNo; | |||
4597 | CurBB = CurBBNo < FunctionBBs.size() ? FunctionBBs[CurBBNo] : nullptr; | |||
4598 | } | |||
4599 | ||||
4600 | // Non-void values get registered in the value table for future use. | |||
4601 | if (I && !I->getType()->isVoidTy()) | |||
4602 | ValueList.assignValue(I, NextValueNo++); | |||
4603 | } | |||
4604 | ||||
4605 | OutOfRecordLoop: | |||
4606 | ||||
4607 | if (!OperandBundles.empty()) | |||
4608 | return error("Operand bundles found with no consumer"); | |||
4609 | ||||
4610 | // Check the function list for unresolved values. | |||
4611 | if (Argument *A = dyn_cast<Argument>(ValueList.back())) { | |||
4612 | if (!A->getParent()) { | |||
4613 | // We found at least one unresolved value. Nuke them all to avoid leaks. | |||
4614 | for (unsigned i = ModuleValueListSize, e = ValueList.size(); i != e; ++i){ | |||
4615 | if ((A = dyn_cast_or_null<Argument>(ValueList[i])) && !A->getParent()) { | |||
4616 | A->replaceAllUsesWith(UndefValue::get(A->getType())); | |||
4617 | delete A; | |||
4618 | } | |||
4619 | } | |||
4620 | return error("Never resolved value found in function"); | |||
4621 | } | |||
4622 | } | |||
4623 | ||||
4624 | // Unexpected unresolved metadata about to be dropped. | |||
4625 | if (MDLoader->hasFwdRefs()) | |||
4626 | return error("Invalid function metadata: outgoing forward refs"); | |||
4627 | ||||
4628 | // Trim the value list down to the size it was before we parsed this function. | |||
4629 | ValueList.shrinkTo(ModuleValueListSize); | |||
4630 | MDLoader->shrinkTo(ModuleMDLoaderSize); | |||
4631 | std::vector<BasicBlock*>().swap(FunctionBBs); | |||
4632 | return Error::success(); | |||
4633 | } | |||
4634 | ||||
4635 | /// Find the function body in the bitcode stream | |||
4636 | Error BitcodeReader::findFunctionInStream( | |||
4637 | Function *F, | |||
4638 | DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator) { | |||
4639 | while (DeferredFunctionInfoIterator->second == 0) { | |||
4640 | // This is the fallback handling for the old format bitcode that | |||
4641 | // didn't contain the function index in the VST, or when we have | |||
4642 | // an anonymous function which would not have a VST entry. | |||
4643 | // Assert that we have one of those two cases. | |||
4644 | assert(VSTOffset == 0 || !F->hasName())(static_cast <bool> (VSTOffset == 0 || !F->hasName() ) ? void (0) : __assert_fail ("VSTOffset == 0 || !F->hasName()" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4644, __extension__ __PRETTY_FUNCTION__)); | |||
4645 | // Parse the next body in the stream and set its position in the | |||
4646 | // DeferredFunctionInfo map. | |||
4647 | if (Error Err = rememberAndSkipFunctionBodies()) | |||
4648 | return Err; | |||
4649 | } | |||
4650 | return Error::success(); | |||
4651 | } | |||
4652 | ||||
4653 | SyncScope::ID BitcodeReader::getDecodedSyncScopeID(unsigned Val) { | |||
4654 | if (Val == SyncScope::SingleThread || Val == SyncScope::System) | |||
4655 | return SyncScope::ID(Val); | |||
4656 | if (Val >= SSIDs.size()) | |||
4657 | return SyncScope::System; // Map unknown synchronization scopes to system. | |||
4658 | return SSIDs[Val]; | |||
4659 | } | |||
4660 | ||||
4661 | //===----------------------------------------------------------------------===// | |||
4662 | // GVMaterializer implementation | |||
4663 | //===----------------------------------------------------------------------===// | |||
4664 | ||||
4665 | Error BitcodeReader::materialize(GlobalValue *GV) { | |||
4666 | Function *F = dyn_cast<Function>(GV); | |||
4667 | // If it's not a function or is already material, ignore the request. | |||
4668 | if (!F || !F->isMaterializable()) | |||
4669 | return Error::success(); | |||
4670 | ||||
4671 | DenseMap<Function*, uint64_t>::iterator DFII = DeferredFunctionInfo.find(F); | |||
4672 | assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!")(static_cast <bool> (DFII != DeferredFunctionInfo.end() && "Deferred function not found!") ? void (0) : __assert_fail ("DFII != DeferredFunctionInfo.end() && \"Deferred function not found!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4672, __extension__ __PRETTY_FUNCTION__)); | |||
4673 | // If its position is recorded as 0, its body is somewhere in the stream | |||
4674 | // but we haven't seen it yet. | |||
4675 | if (DFII->second == 0) | |||
4676 | if (Error Err = findFunctionInStream(F, DFII)) | |||
4677 | return Err; | |||
4678 | ||||
4679 | // Materialize metadata before parsing any function bodies. | |||
4680 | if (Error Err = materializeMetadata()) | |||
4681 | return Err; | |||
4682 | ||||
4683 | // Move the bit stream to the saved position of the deferred function body. | |||
4684 | Stream.JumpToBit(DFII->second); | |||
4685 | ||||
4686 | if (Error Err = parseFunctionBody(F)) | |||
4687 | return Err; | |||
4688 | F->setIsMaterializable(false); | |||
4689 | ||||
4690 | if (StripDebugInfo) | |||
4691 | stripDebugInfo(*F); | |||
4692 | ||||
4693 | // Upgrade any old intrinsic calls in the function. | |||
4694 | for (auto &I : UpgradedIntrinsics) { | |||
4695 | for (auto UI = I.first->materialized_user_begin(), UE = I.first->user_end(); | |||
4696 | UI != UE;) { | |||
4697 | User *U = *UI; | |||
4698 | ++UI; | |||
4699 | if (CallInst *CI = dyn_cast<CallInst>(U)) | |||
4700 | UpgradeIntrinsicCall(CI, I.second); | |||
4701 | } | |||
4702 | } | |||
4703 | ||||
4704 | // Update calls to the remangled intrinsics | |||
4705 | for (auto &I : RemangledIntrinsics) | |||
4706 | for (auto UI = I.first->materialized_user_begin(), UE = I.first->user_end(); | |||
4707 | UI != UE;) | |||
4708 | // Don't expect any other users than call sites | |||
4709 | CallSite(*UI++).setCalledFunction(I.second); | |||
4710 | ||||
4711 | // Finish fn->subprogram upgrade for materialized functions. | |||
4712 | if (DISubprogram *SP = MDLoader->lookupSubprogramForFunction(F)) | |||
4713 | F->setSubprogram(SP); | |||
4714 | ||||
4715 | // Check if the TBAA Metadata are valid, otherwise we will need to strip them. | |||
4716 | if (!MDLoader->isStrippingTBAA()) { | |||
4717 | for (auto &I : instructions(F)) { | |||
4718 | MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa); | |||
4719 | if (!TBAA || TBAAVerifyHelper.visitTBAAMetadata(I, TBAA)) | |||
4720 | continue; | |||
4721 | MDLoader->setStripTBAA(true); | |||
4722 | stripTBAA(F->getParent()); | |||
4723 | } | |||
4724 | } | |||
4725 | ||||
4726 | // Bring in any functions that this function forward-referenced via | |||
4727 | // blockaddresses. | |||
4728 | return materializeForwardReferencedFunctions(); | |||
4729 | } | |||
4730 | ||||
4731 | Error BitcodeReader::materializeModule() { | |||
4732 | if (Error Err = materializeMetadata()) | |||
4733 | return Err; | |||
4734 | ||||
4735 | // Promise to materialize all forward references. | |||
4736 | WillMaterializeAllForwardRefs = true; | |||
4737 | ||||
4738 | // Iterate over the module, deserializing any functions that are still on | |||
4739 | // disk. | |||
4740 | for (Function &F : *TheModule) { | |||
4741 | if (Error Err = materialize(&F)) | |||
4742 | return Err; | |||
4743 | } | |||
4744 | // At this point, if there are any function bodies, parse the rest of | |||
4745 | // the bits in the module past the last function block we have recorded | |||
4746 | // through either lazy scanning or the VST. | |||
4747 | if (LastFunctionBlockBit || NextUnreadBit) | |||
4748 | if (Error Err = parseModule(LastFunctionBlockBit > NextUnreadBit | |||
4749 | ? LastFunctionBlockBit | |||
4750 | : NextUnreadBit)) | |||
4751 | return Err; | |||
4752 | ||||
4753 | // Check that all block address forward references got resolved (as we | |||
4754 | // promised above). | |||
4755 | if (!BasicBlockFwdRefs.empty()) | |||
4756 | return error("Never resolved function from blockaddress"); | |||
4757 | ||||
4758 | // Upgrade any intrinsic calls that slipped through (should not happen!) and | |||
4759 | // delete the old functions to clean up. We can't do this unless the entire | |||
4760 | // module is materialized because there could always be another function body | |||
4761 | // with calls to the old function. | |||
4762 | for (auto &I : UpgradedIntrinsics) { | |||
4763 | for (auto *U : I.first->users()) { | |||
4764 | if (CallInst *CI = dyn_cast<CallInst>(U)) | |||
4765 | UpgradeIntrinsicCall(CI, I.second); | |||
4766 | } | |||
4767 | if (!I.first->use_empty()) | |||
4768 | I.first->replaceAllUsesWith(I.second); | |||
4769 | I.first->eraseFromParent(); | |||
4770 | } | |||
4771 | UpgradedIntrinsics.clear(); | |||
4772 | // Do the same for remangled intrinsics | |||
4773 | for (auto &I : RemangledIntrinsics) { | |||
4774 | I.first->replaceAllUsesWith(I.second); | |||
4775 | I.first->eraseFromParent(); | |||
4776 | } | |||
4777 | RemangledIntrinsics.clear(); | |||
4778 | ||||
4779 | UpgradeDebugInfo(*TheModule); | |||
4780 | ||||
4781 | UpgradeModuleFlags(*TheModule); | |||
4782 | return Error::success(); | |||
4783 | } | |||
4784 | ||||
4785 | std::vector<StructType *> BitcodeReader::getIdentifiedStructTypes() const { | |||
4786 | return IdentifiedStructTypes; | |||
4787 | } | |||
4788 | ||||
4789 | ModuleSummaryIndexBitcodeReader::ModuleSummaryIndexBitcodeReader( | |||
4790 | BitstreamCursor Cursor, StringRef Strtab, ModuleSummaryIndex &TheIndex, | |||
4791 | StringRef ModulePath, unsigned ModuleId) | |||
4792 | : BitcodeReaderBase(std::move(Cursor), Strtab), TheIndex(TheIndex), | |||
4793 | ModulePath(ModulePath), ModuleId(ModuleId) {} | |||
4794 | ||||
4795 | ModuleSummaryIndex::ModuleInfo * | |||
4796 | ModuleSummaryIndexBitcodeReader::addThisModule() { | |||
4797 | return TheIndex.addModule(ModulePath, ModuleId); | |||
4798 | } | |||
4799 | ||||
4800 | std::pair<ValueInfo, GlobalValue::GUID> | |||
4801 | ModuleSummaryIndexBitcodeReader::getValueInfoFromValueId(unsigned ValueId) { | |||
4802 | auto VGI = ValueIdToValueInfoMap[ValueId]; | |||
4803 | assert(VGI.first)(static_cast <bool> (VGI.first) ? void (0) : __assert_fail ("VGI.first", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4803, __extension__ __PRETTY_FUNCTION__)); | |||
4804 | return VGI; | |||
4805 | } | |||
4806 | ||||
4807 | void ModuleSummaryIndexBitcodeReader::setValueGUID( | |||
4808 | uint64_t ValueID, StringRef ValueName, GlobalValue::LinkageTypes Linkage, | |||
4809 | StringRef SourceFileName) { | |||
4810 | std::string GlobalId = | |||
4811 | GlobalValue::getGlobalIdentifier(ValueName, Linkage, SourceFileName); | |||
4812 | auto ValueGUID = GlobalValue::getGUID(GlobalId); | |||
4813 | auto OriginalNameID = ValueGUID; | |||
4814 | if (GlobalValue::isLocalLinkage(Linkage)) | |||
4815 | OriginalNameID = GlobalValue::getGUID(ValueName); | |||
4816 | if (PrintSummaryGUIDs) | |||
4817 | dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is " | |||
4818 | << ValueName << "\n"; | |||
4819 | ||||
4820 | // UseStrtab is false for legacy summary formats and value names are | |||
4821 | // created on stack. We can't use them outside of parseValueSymbolTable. | |||
4822 | ValueIdToValueInfoMap[ValueID] = std::make_pair( | |||
4823 | TheIndex.getOrInsertValueInfo(ValueGUID, UseStrtab ? ValueName : ""), | |||
4824 | OriginalNameID); | |||
4825 | } | |||
4826 | ||||
4827 | // Specialized value symbol table parser used when reading module index | |||
4828 | // blocks where we don't actually create global values. The parsed information | |||
4829 | // is saved in the bitcode reader for use when later parsing summaries. | |||
4830 | Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable( | |||
4831 | uint64_t Offset, | |||
4832 | DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap) { | |||
4833 | // With a strtab the VST is not required to parse the summary. | |||
4834 | if (UseStrtab) | |||
4835 | return Error::success(); | |||
4836 | ||||
4837 | assert(Offset > 0 && "Expected non-zero VST offset")(static_cast <bool> (Offset > 0 && "Expected non-zero VST offset" ) ? void (0) : __assert_fail ("Offset > 0 && \"Expected non-zero VST offset\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4837, __extension__ __PRETTY_FUNCTION__)); | |||
4838 | uint64_t CurrentBit = jumpToValueSymbolTable(Offset, Stream); | |||
4839 | ||||
4840 | if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID)) | |||
4841 | return error("Invalid record"); | |||
4842 | ||||
4843 | SmallVector<uint64_t, 64> Record; | |||
4844 | ||||
4845 | // Read all the records for this value table. | |||
4846 | SmallString<128> ValueName; | |||
4847 | ||||
4848 | while (true) { | |||
4849 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
4850 | ||||
4851 | switch (Entry.Kind) { | |||
4852 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
4853 | case BitstreamEntry::Error: | |||
4854 | return error("Malformed block"); | |||
4855 | case BitstreamEntry::EndBlock: | |||
4856 | // Done parsing VST, jump back to wherever we came from. | |||
4857 | Stream.JumpToBit(CurrentBit); | |||
4858 | return Error::success(); | |||
4859 | case BitstreamEntry::Record: | |||
4860 | // The interesting case. | |||
4861 | break; | |||
4862 | } | |||
4863 | ||||
4864 | // Read a record. | |||
4865 | Record.clear(); | |||
4866 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
4867 | default: // Default behavior: ignore (e.g. VST_CODE_BBENTRY records). | |||
4868 | break; | |||
4869 | case bitc::VST_CODE_ENTRY: { // VST_CODE_ENTRY: [valueid, namechar x N] | |||
4870 | if (convertToString(Record, 1, ValueName)) | |||
4871 | return error("Invalid record"); | |||
4872 | unsigned ValueID = Record[0]; | |||
4873 | assert(!SourceFileName.empty())(static_cast <bool> (!SourceFileName.empty()) ? void (0 ) : __assert_fail ("!SourceFileName.empty()", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4873, __extension__ __PRETTY_FUNCTION__)); | |||
4874 | auto VLI = ValueIdToLinkageMap.find(ValueID); | |||
4875 | assert(VLI != ValueIdToLinkageMap.end() &&(static_cast <bool> (VLI != ValueIdToLinkageMap.end() && "No linkage found for VST entry?") ? void (0) : __assert_fail ("VLI != ValueIdToLinkageMap.end() && \"No linkage found for VST entry?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4876, __extension__ __PRETTY_FUNCTION__)) | |||
4876 | "No linkage found for VST entry?")(static_cast <bool> (VLI != ValueIdToLinkageMap.end() && "No linkage found for VST entry?") ? void (0) : __assert_fail ("VLI != ValueIdToLinkageMap.end() && \"No linkage found for VST entry?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4876, __extension__ __PRETTY_FUNCTION__)); | |||
4877 | auto Linkage = VLI->second; | |||
4878 | setValueGUID(ValueID, ValueName, Linkage, SourceFileName); | |||
4879 | ValueName.clear(); | |||
4880 | break; | |||
4881 | } | |||
4882 | case bitc::VST_CODE_FNENTRY: { | |||
4883 | // VST_CODE_FNENTRY: [valueid, offset, namechar x N] | |||
4884 | if (convertToString(Record, 2, ValueName)) | |||
4885 | return error("Invalid record"); | |||
4886 | unsigned ValueID = Record[0]; | |||
4887 | assert(!SourceFileName.empty())(static_cast <bool> (!SourceFileName.empty()) ? void (0 ) : __assert_fail ("!SourceFileName.empty()", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4887, __extension__ __PRETTY_FUNCTION__)); | |||
4888 | auto VLI = ValueIdToLinkageMap.find(ValueID); | |||
4889 | assert(VLI != ValueIdToLinkageMap.end() &&(static_cast <bool> (VLI != ValueIdToLinkageMap.end() && "No linkage found for VST entry?") ? void (0) : __assert_fail ("VLI != ValueIdToLinkageMap.end() && \"No linkage found for VST entry?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4890, __extension__ __PRETTY_FUNCTION__)) | |||
4890 | "No linkage found for VST entry?")(static_cast <bool> (VLI != ValueIdToLinkageMap.end() && "No linkage found for VST entry?") ? void (0) : __assert_fail ("VLI != ValueIdToLinkageMap.end() && \"No linkage found for VST entry?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4890, __extension__ __PRETTY_FUNCTION__)); | |||
4891 | auto Linkage = VLI->second; | |||
4892 | setValueGUID(ValueID, ValueName, Linkage, SourceFileName); | |||
4893 | ValueName.clear(); | |||
4894 | break; | |||
4895 | } | |||
4896 | case bitc::VST_CODE_COMBINED_ENTRY: { | |||
4897 | // VST_CODE_COMBINED_ENTRY: [valueid, refguid] | |||
4898 | unsigned ValueID = Record[0]; | |||
4899 | GlobalValue::GUID RefGUID = Record[1]; | |||
4900 | // The "original name", which is the second value of the pair will be | |||
4901 | // overriden later by a FS_COMBINED_ORIGINAL_NAME in the combined index. | |||
4902 | ValueIdToValueInfoMap[ValueID] = | |||
4903 | std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID); | |||
4904 | break; | |||
4905 | } | |||
4906 | } | |||
4907 | } | |||
4908 | } | |||
4909 | ||||
4910 | // Parse just the blocks needed for building the index out of the module. | |||
4911 | // At the end of this routine the module Index is populated with a map | |||
4912 | // from global value id to GlobalValueSummary objects. | |||
4913 | Error ModuleSummaryIndexBitcodeReader::parseModule() { | |||
4914 | if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) | |||
4915 | return error("Invalid record"); | |||
4916 | ||||
4917 | SmallVector<uint64_t, 64> Record; | |||
4918 | DenseMap<unsigned, GlobalValue::LinkageTypes> ValueIdToLinkageMap; | |||
4919 | unsigned ValueId = 0; | |||
4920 | ||||
4921 | // Read the index for this module. | |||
4922 | while (true) { | |||
4923 | BitstreamEntry Entry = Stream.advance(); | |||
4924 | ||||
4925 | switch (Entry.Kind) { | |||
4926 | case BitstreamEntry::Error: | |||
4927 | return error("Malformed block"); | |||
4928 | case BitstreamEntry::EndBlock: | |||
4929 | return Error::success(); | |||
4930 | ||||
4931 | case BitstreamEntry::SubBlock: | |||
4932 | switch (Entry.ID) { | |||
4933 | default: // Skip unknown content. | |||
4934 | if (Stream.SkipBlock()) | |||
4935 | return error("Invalid record"); | |||
4936 | break; | |||
4937 | case bitc::BLOCKINFO_BLOCK_ID: | |||
4938 | // Need to parse these to get abbrev ids (e.g. for VST) | |||
4939 | if (readBlockInfo()) | |||
4940 | return error("Malformed block"); | |||
4941 | break; | |||
4942 | case bitc::VALUE_SYMTAB_BLOCK_ID: | |||
4943 | // Should have been parsed earlier via VSTOffset, unless there | |||
4944 | // is no summary section. | |||
4945 | assert(((SeenValueSymbolTable && VSTOffset > 0) ||(static_cast <bool> (((SeenValueSymbolTable && VSTOffset > 0) || !SeenGlobalValSummary) && "Expected early VST parse via VSTOffset record" ) ? void (0) : __assert_fail ("((SeenValueSymbolTable && VSTOffset > 0) || !SeenGlobalValSummary) && \"Expected early VST parse via VSTOffset record\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4947, __extension__ __PRETTY_FUNCTION__)) | |||
4946 | !SeenGlobalValSummary) &&(static_cast <bool> (((SeenValueSymbolTable && VSTOffset > 0) || !SeenGlobalValSummary) && "Expected early VST parse via VSTOffset record" ) ? void (0) : __assert_fail ("((SeenValueSymbolTable && VSTOffset > 0) || !SeenGlobalValSummary) && \"Expected early VST parse via VSTOffset record\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4947, __extension__ __PRETTY_FUNCTION__)) | |||
4947 | "Expected early VST parse via VSTOffset record")(static_cast <bool> (((SeenValueSymbolTable && VSTOffset > 0) || !SeenGlobalValSummary) && "Expected early VST parse via VSTOffset record" ) ? void (0) : __assert_fail ("((SeenValueSymbolTable && VSTOffset > 0) || !SeenGlobalValSummary) && \"Expected early VST parse via VSTOffset record\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4947, __extension__ __PRETTY_FUNCTION__)); | |||
4948 | if (Stream.SkipBlock()) | |||
4949 | return error("Invalid record"); | |||
4950 | break; | |||
4951 | case bitc::GLOBALVAL_SUMMARY_BLOCK_ID: | |||
4952 | case bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID: | |||
4953 | assert(!SeenValueSymbolTable &&(static_cast <bool> (!SeenValueSymbolTable && "Already read VST when parsing summary block?" ) ? void (0) : __assert_fail ("!SeenValueSymbolTable && \"Already read VST when parsing summary block?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4954, __extension__ __PRETTY_FUNCTION__)) | |||
4954 | "Already read VST when parsing summary block?")(static_cast <bool> (!SeenValueSymbolTable && "Already read VST when parsing summary block?" ) ? void (0) : __assert_fail ("!SeenValueSymbolTable && \"Already read VST when parsing summary block?\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 4954, __extension__ __PRETTY_FUNCTION__)); | |||
4955 | // We might not have a VST if there were no values in the | |||
4956 | // summary. An empty summary block generated when we are | |||
4957 | // performing ThinLTO compiles so we don't later invoke | |||
4958 | // the regular LTO process on them. | |||
4959 | if (VSTOffset > 0) { | |||
4960 | if (Error Err = parseValueSymbolTable(VSTOffset, ValueIdToLinkageMap)) | |||
4961 | return Err; | |||
4962 | SeenValueSymbolTable = true; | |||
4963 | } | |||
4964 | SeenGlobalValSummary = true; | |||
4965 | if (Error Err = parseEntireSummary(Entry.ID)) | |||
4966 | return Err; | |||
4967 | break; | |||
4968 | case bitc::MODULE_STRTAB_BLOCK_ID: | |||
4969 | if (Error Err = parseModuleStringTable()) | |||
4970 | return Err; | |||
4971 | break; | |||
4972 | } | |||
4973 | continue; | |||
4974 | ||||
4975 | case BitstreamEntry::Record: { | |||
4976 | Record.clear(); | |||
4977 | auto BitCode = Stream.readRecord(Entry.ID, Record); | |||
4978 | switch (BitCode) { | |||
4979 | default: | |||
4980 | break; // Default behavior, ignore unknown content. | |||
4981 | case bitc::MODULE_CODE_VERSION: { | |||
4982 | if (Error Err = parseVersionRecord(Record).takeError()) | |||
4983 | return Err; | |||
4984 | break; | |||
4985 | } | |||
4986 | /// MODULE_CODE_SOURCE_FILENAME: [namechar x N] | |||
4987 | case bitc::MODULE_CODE_SOURCE_FILENAME: { | |||
4988 | SmallString<128> ValueName; | |||
4989 | if (convertToString(Record, 0, ValueName)) | |||
4990 | return error("Invalid record"); | |||
4991 | SourceFileName = ValueName.c_str(); | |||
4992 | break; | |||
4993 | } | |||
4994 | /// MODULE_CODE_HASH: [5*i32] | |||
4995 | case bitc::MODULE_CODE_HASH: { | |||
4996 | if (Record.size() != 5) | |||
4997 | return error("Invalid hash length " + Twine(Record.size()).str()); | |||
4998 | auto &Hash = addThisModule()->second.second; | |||
4999 | int Pos = 0; | |||
5000 | for (auto &Val : Record) { | |||
5001 | assert(!(Val >> 32) && "Unexpected high bits set")(static_cast <bool> (!(Val >> 32) && "Unexpected high bits set" ) ? void (0) : __assert_fail ("!(Val >> 32) && \"Unexpected high bits set\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5001, __extension__ __PRETTY_FUNCTION__)); | |||
5002 | Hash[Pos++] = Val; | |||
5003 | } | |||
5004 | break; | |||
5005 | } | |||
5006 | /// MODULE_CODE_VSTOFFSET: [offset] | |||
5007 | case bitc::MODULE_CODE_VSTOFFSET: | |||
5008 | if (Record.size() < 1) | |||
5009 | return error("Invalid record"); | |||
5010 | // Note that we subtract 1 here because the offset is relative to one | |||
5011 | // word before the start of the identification or module block, which | |||
5012 | // was historically always the start of the regular bitcode header. | |||
5013 | VSTOffset = Record[0] - 1; | |||
5014 | break; | |||
5015 | // v1 GLOBALVAR: [pointer type, isconst, initid, linkage, ...] | |||
5016 | // v1 FUNCTION: [type, callingconv, isproto, linkage, ...] | |||
5017 | // v1 ALIAS: [alias type, addrspace, aliasee val#, linkage, ...] | |||
5018 | // v2: [strtab offset, strtab size, v1] | |||
5019 | case bitc::MODULE_CODE_GLOBALVAR: | |||
5020 | case bitc::MODULE_CODE_FUNCTION: | |||
5021 | case bitc::MODULE_CODE_ALIAS: { | |||
5022 | StringRef Name; | |||
5023 | ArrayRef<uint64_t> GVRecord; | |||
5024 | std::tie(Name, GVRecord) = readNameFromStrtab(Record); | |||
5025 | if (GVRecord.size() <= 3) | |||
5026 | return error("Invalid record"); | |||
5027 | uint64_t RawLinkage = GVRecord[3]; | |||
5028 | GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage); | |||
5029 | if (!UseStrtab) { | |||
5030 | ValueIdToLinkageMap[ValueId++] = Linkage; | |||
5031 | break; | |||
5032 | } | |||
5033 | ||||
5034 | setValueGUID(ValueId++, Name, Linkage, SourceFileName); | |||
5035 | break; | |||
5036 | } | |||
5037 | } | |||
5038 | } | |||
5039 | continue; | |||
5040 | } | |||
5041 | } | |||
5042 | } | |||
5043 | ||||
5044 | std::vector<ValueInfo> | |||
5045 | ModuleSummaryIndexBitcodeReader::makeRefList(ArrayRef<uint64_t> Record) { | |||
5046 | std::vector<ValueInfo> Ret; | |||
5047 | Ret.reserve(Record.size()); | |||
5048 | for (uint64_t RefValueId : Record) | |||
5049 | Ret.push_back(getValueInfoFromValueId(RefValueId).first); | |||
5050 | return Ret; | |||
5051 | } | |||
5052 | ||||
5053 | std::vector<FunctionSummary::EdgeTy> | |||
5054 | ModuleSummaryIndexBitcodeReader::makeCallList(ArrayRef<uint64_t> Record, | |||
5055 | bool IsOldProfileFormat, | |||
5056 | bool HasProfile, bool HasRelBF) { | |||
5057 | std::vector<FunctionSummary::EdgeTy> Ret; | |||
5058 | Ret.reserve(Record.size()); | |||
5059 | for (unsigned I = 0, E = Record.size(); I != E; ++I) { | |||
5060 | CalleeInfo::HotnessType Hotness = CalleeInfo::HotnessType::Unknown; | |||
5061 | uint64_t RelBF = 0; | |||
5062 | ValueInfo Callee = getValueInfoFromValueId(Record[I]).first; | |||
5063 | if (IsOldProfileFormat) { | |||
5064 | I += 1; // Skip old callsitecount field | |||
5065 | if (HasProfile) | |||
5066 | I += 1; // Skip old profilecount field | |||
5067 | } else if (HasProfile) | |||
5068 | Hotness = static_cast<CalleeInfo::HotnessType>(Record[++I]); | |||
5069 | else if (HasRelBF) | |||
5070 | RelBF = Record[++I]; | |||
5071 | Ret.push_back(FunctionSummary::EdgeTy{Callee, CalleeInfo(Hotness, RelBF)}); | |||
5072 | } | |||
5073 | return Ret; | |||
5074 | } | |||
5075 | ||||
5076 | static void | |||
5077 | parseWholeProgramDevirtResolutionByArg(ArrayRef<uint64_t> Record, size_t &Slot, | |||
5078 | WholeProgramDevirtResolution &Wpd) { | |||
5079 | uint64_t ArgNum = Record[Slot++]; | |||
5080 | WholeProgramDevirtResolution::ByArg &B = | |||
5081 | Wpd.ResByArg[{Record.begin() + Slot, Record.begin() + Slot + ArgNum}]; | |||
5082 | Slot += ArgNum; | |||
5083 | ||||
5084 | B.TheKind = | |||
5085 | static_cast<WholeProgramDevirtResolution::ByArg::Kind>(Record[Slot++]); | |||
5086 | B.Info = Record[Slot++]; | |||
5087 | B.Byte = Record[Slot++]; | |||
5088 | B.Bit = Record[Slot++]; | |||
5089 | } | |||
5090 | ||||
5091 | static void parseWholeProgramDevirtResolution(ArrayRef<uint64_t> Record, | |||
5092 | StringRef Strtab, size_t &Slot, | |||
5093 | TypeIdSummary &TypeId) { | |||
5094 | uint64_t Id = Record[Slot++]; | |||
5095 | WholeProgramDevirtResolution &Wpd = TypeId.WPDRes[Id]; | |||
5096 | ||||
5097 | Wpd.TheKind = static_cast<WholeProgramDevirtResolution::Kind>(Record[Slot++]); | |||
5098 | Wpd.SingleImplName = {Strtab.data() + Record[Slot], | |||
5099 | static_cast<size_t>(Record[Slot + 1])}; | |||
5100 | Slot += 2; | |||
5101 | ||||
5102 | uint64_t ResByArgNum = Record[Slot++]; | |||
5103 | for (uint64_t I = 0; I != ResByArgNum; ++I) | |||
5104 | parseWholeProgramDevirtResolutionByArg(Record, Slot, Wpd); | |||
5105 | } | |||
5106 | ||||
5107 | static void parseTypeIdSummaryRecord(ArrayRef<uint64_t> Record, | |||
5108 | StringRef Strtab, | |||
5109 | ModuleSummaryIndex &TheIndex) { | |||
5110 | size_t Slot = 0; | |||
5111 | TypeIdSummary &TypeId = TheIndex.getOrInsertTypeIdSummary( | |||
5112 | {Strtab.data() + Record[Slot], static_cast<size_t>(Record[Slot + 1])}); | |||
5113 | Slot += 2; | |||
5114 | ||||
5115 | TypeId.TTRes.TheKind = static_cast<TypeTestResolution::Kind>(Record[Slot++]); | |||
5116 | TypeId.TTRes.SizeM1BitWidth = Record[Slot++]; | |||
5117 | TypeId.TTRes.AlignLog2 = Record[Slot++]; | |||
5118 | TypeId.TTRes.SizeM1 = Record[Slot++]; | |||
5119 | TypeId.TTRes.BitMask = Record[Slot++]; | |||
5120 | TypeId.TTRes.InlineBits = Record[Slot++]; | |||
5121 | ||||
5122 | while (Slot < Record.size()) | |||
5123 | parseWholeProgramDevirtResolution(Record, Strtab, Slot, TypeId); | |||
5124 | } | |||
5125 | ||||
5126 | // Eagerly parse the entire summary block. This populates the GlobalValueSummary | |||
5127 | // objects in the index. | |||
5128 | Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) { | |||
5129 | if (Stream.EnterSubBlock(ID)) | |||
5130 | return error("Invalid record"); | |||
5131 | SmallVector<uint64_t, 64> Record; | |||
5132 | ||||
5133 | // Parse version | |||
5134 | { | |||
5135 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
5136 | if (Entry.Kind != BitstreamEntry::Record) | |||
5137 | return error("Invalid Summary Block: record for version expected"); | |||
5138 | if (Stream.readRecord(Entry.ID, Record) != bitc::FS_VERSION) | |||
5139 | return error("Invalid Summary Block: version expected"); | |||
5140 | } | |||
5141 | const uint64_t Version = Record[0]; | |||
5142 | const bool IsOldProfileFormat = Version == 1; | |||
5143 | if (Version < 1 || Version > 4) | |||
5144 | return error("Invalid summary version " + Twine(Version) + | |||
5145 | ", 1, 2, 3 or 4 expected"); | |||
5146 | Record.clear(); | |||
5147 | ||||
5148 | // Keep around the last seen summary to be used when we see an optional | |||
5149 | // "OriginalName" attachement. | |||
5150 | GlobalValueSummary *LastSeenSummary = nullptr; | |||
5151 | GlobalValue::GUID LastSeenGUID = 0; | |||
5152 | ||||
5153 | // We can expect to see any number of type ID information records before | |||
5154 | // each function summary records; these variables store the information | |||
5155 | // collected so far so that it can be used to create the summary object. | |||
5156 | std::vector<GlobalValue::GUID> PendingTypeTests; | |||
5157 | std::vector<FunctionSummary::VFuncId> PendingTypeTestAssumeVCalls, | |||
5158 | PendingTypeCheckedLoadVCalls; | |||
5159 | std::vector<FunctionSummary::ConstVCall> PendingTypeTestAssumeConstVCalls, | |||
5160 | PendingTypeCheckedLoadConstVCalls; | |||
5161 | ||||
5162 | while (true) { | |||
5163 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
5164 | ||||
5165 | switch (Entry.Kind) { | |||
5166 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
5167 | case BitstreamEntry::Error: | |||
5168 | return error("Malformed block"); | |||
5169 | case BitstreamEntry::EndBlock: | |||
5170 | return Error::success(); | |||
5171 | case BitstreamEntry::Record: | |||
5172 | // The interesting case. | |||
5173 | break; | |||
5174 | } | |||
5175 | ||||
5176 | // Read a record. The record format depends on whether this | |||
5177 | // is a per-module index or a combined index file. In the per-module | |||
5178 | // case the records contain the associated value's ID for correlation | |||
5179 | // with VST entries. In the combined index the correlation is done | |||
5180 | // via the bitcode offset of the summary records (which were saved | |||
5181 | // in the combined index VST entries). The records also contain | |||
5182 | // information used for ThinLTO renaming and importing. | |||
5183 | Record.clear(); | |||
5184 | auto BitCode = Stream.readRecord(Entry.ID, Record); | |||
5185 | switch (BitCode) { | |||
5186 | default: // Default behavior: ignore. | |||
5187 | break; | |||
5188 | case bitc::FS_FLAGS: { // [flags] | |||
5189 | uint64_t Flags = Record[0]; | |||
5190 | // Scan flags (set only on the combined index). | |||
5191 | assert(Flags <= 0x3 && "Unexpected bits in flag")(static_cast <bool> (Flags <= 0x3 && "Unexpected bits in flag" ) ? void (0) : __assert_fail ("Flags <= 0x3 && \"Unexpected bits in flag\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5191, __extension__ __PRETTY_FUNCTION__)); | |||
5192 | ||||
5193 | // 1 bit: WithGlobalValueDeadStripping flag. | |||
5194 | if (Flags & 0x1) | |||
5195 | TheIndex.setWithGlobalValueDeadStripping(); | |||
5196 | // 1 bit: SkipModuleByDistributedBackend flag. | |||
5197 | if (Flags & 0x2) | |||
5198 | TheIndex.setSkipModuleByDistributedBackend(); | |||
5199 | break; | |||
5200 | } | |||
5201 | case bitc::FS_VALUE_GUID: { // [valueid, refguid] | |||
5202 | uint64_t ValueID = Record[0]; | |||
5203 | GlobalValue::GUID RefGUID = Record[1]; | |||
5204 | ValueIdToValueInfoMap[ValueID] = | |||
5205 | std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID); | |||
5206 | break; | |||
5207 | } | |||
5208 | // FS_PERMODULE: [valueid, flags, instcount, fflags, numrefs, | |||
5209 | // numrefs x valueid, n x (valueid)] | |||
5210 | // FS_PERMODULE_PROFILE: [valueid, flags, instcount, fflags, numrefs, | |||
5211 | // numrefs x valueid, | |||
5212 | // n x (valueid, hotness)] | |||
5213 | // FS_PERMODULE_RELBF: [valueid, flags, instcount, fflags, numrefs, | |||
5214 | // numrefs x valueid, | |||
5215 | // n x (valueid, relblockfreq)] | |||
5216 | case bitc::FS_PERMODULE: | |||
5217 | case bitc::FS_PERMODULE_RELBF: | |||
5218 | case bitc::FS_PERMODULE_PROFILE: { | |||
5219 | unsigned ValueID = Record[0]; | |||
5220 | uint64_t RawFlags = Record[1]; | |||
5221 | unsigned InstCount = Record[2]; | |||
5222 | uint64_t RawFunFlags = 0; | |||
5223 | unsigned NumRefs = Record[3]; | |||
5224 | int RefListStartIndex = 4; | |||
5225 | if (Version >= 4) { | |||
5226 | RawFunFlags = Record[3]; | |||
5227 | NumRefs = Record[4]; | |||
5228 | RefListStartIndex = 5; | |||
5229 | } | |||
5230 | ||||
5231 | auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); | |||
5232 | // The module path string ref set in the summary must be owned by the | |||
5233 | // index's module string table. Since we don't have a module path | |||
5234 | // string table section in the per-module index, we create a single | |||
5235 | // module path string table entry with an empty (0) ID to take | |||
5236 | // ownership. | |||
5237 | int CallGraphEdgeStartIndex = RefListStartIndex + NumRefs; | |||
5238 | assert(Record.size() >= RefListStartIndex + NumRefs &&(static_cast <bool> (Record.size() >= RefListStartIndex + NumRefs && "Record size inconsistent with number of references" ) ? void (0) : __assert_fail ("Record.size() >= RefListStartIndex + NumRefs && \"Record size inconsistent with number of references\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5239, __extension__ __PRETTY_FUNCTION__)) | |||
5239 | "Record size inconsistent with number of references")(static_cast <bool> (Record.size() >= RefListStartIndex + NumRefs && "Record size inconsistent with number of references" ) ? void (0) : __assert_fail ("Record.size() >= RefListStartIndex + NumRefs && \"Record size inconsistent with number of references\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5239, __extension__ __PRETTY_FUNCTION__)); | |||
5240 | std::vector<ValueInfo> Refs = makeRefList( | |||
5241 | ArrayRef<uint64_t>(Record).slice(RefListStartIndex, NumRefs)); | |||
5242 | bool HasProfile = (BitCode == bitc::FS_PERMODULE_PROFILE); | |||
5243 | bool HasRelBF = (BitCode == bitc::FS_PERMODULE_RELBF); | |||
5244 | std::vector<FunctionSummary::EdgeTy> Calls = makeCallList( | |||
5245 | ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex), | |||
5246 | IsOldProfileFormat, HasProfile, HasRelBF); | |||
5247 | auto FS = llvm::make_unique<FunctionSummary>( | |||
5248 | Flags, InstCount, getDecodedFFlags(RawFunFlags), std::move(Refs), | |||
5249 | std::move(Calls), std::move(PendingTypeTests), | |||
5250 | std::move(PendingTypeTestAssumeVCalls), | |||
5251 | std::move(PendingTypeCheckedLoadVCalls), | |||
5252 | std::move(PendingTypeTestAssumeConstVCalls), | |||
5253 | std::move(PendingTypeCheckedLoadConstVCalls)); | |||
5254 | PendingTypeTests.clear(); | |||
5255 | PendingTypeTestAssumeVCalls.clear(); | |||
5256 | PendingTypeCheckedLoadVCalls.clear(); | |||
5257 | PendingTypeTestAssumeConstVCalls.clear(); | |||
5258 | PendingTypeCheckedLoadConstVCalls.clear(); | |||
5259 | auto VIAndOriginalGUID = getValueInfoFromValueId(ValueID); | |||
5260 | FS->setModulePath(addThisModule()->first()); | |||
5261 | FS->setOriginalName(VIAndOriginalGUID.second); | |||
5262 | TheIndex.addGlobalValueSummary(VIAndOriginalGUID.first, std::move(FS)); | |||
5263 | break; | |||
5264 | } | |||
5265 | // FS_ALIAS: [valueid, flags, valueid] | |||
5266 | // Aliases must be emitted (and parsed) after all FS_PERMODULE entries, as | |||
5267 | // they expect all aliasee summaries to be available. | |||
5268 | case bitc::FS_ALIAS: { | |||
5269 | unsigned ValueID = Record[0]; | |||
5270 | uint64_t RawFlags = Record[1]; | |||
5271 | unsigned AliaseeID = Record[2]; | |||
5272 | auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); | |||
5273 | auto AS = llvm::make_unique<AliasSummary>(Flags); | |||
5274 | // The module path string ref set in the summary must be owned by the | |||
5275 | // index's module string table. Since we don't have a module path | |||
5276 | // string table section in the per-module index, we create a single | |||
5277 | // module path string table entry with an empty (0) ID to take | |||
5278 | // ownership. | |||
5279 | AS->setModulePath(addThisModule()->first()); | |||
5280 | ||||
5281 | GlobalValue::GUID AliaseeGUID = | |||
5282 | getValueInfoFromValueId(AliaseeID).first.getGUID(); | |||
5283 | auto AliaseeInModule = | |||
5284 | TheIndex.findSummaryInModule(AliaseeGUID, ModulePath); | |||
5285 | if (!AliaseeInModule) | |||
5286 | return error("Alias expects aliasee summary to be parsed"); | |||
5287 | AS->setAliasee(AliaseeInModule); | |||
5288 | AS->setAliaseeGUID(AliaseeGUID); | |||
5289 | ||||
5290 | auto GUID = getValueInfoFromValueId(ValueID); | |||
5291 | AS->setOriginalName(GUID.second); | |||
5292 | TheIndex.addGlobalValueSummary(GUID.first, std::move(AS)); | |||
5293 | break; | |||
5294 | } | |||
5295 | // FS_PERMODULE_GLOBALVAR_INIT_REFS: [valueid, flags, n x valueid] | |||
5296 | case bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS: { | |||
5297 | unsigned ValueID = Record[0]; | |||
5298 | uint64_t RawFlags = Record[1]; | |||
5299 | auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); | |||
5300 | std::vector<ValueInfo> Refs = | |||
5301 | makeRefList(ArrayRef<uint64_t>(Record).slice(2)); | |||
5302 | auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs)); | |||
5303 | FS->setModulePath(addThisModule()->first()); | |||
5304 | auto GUID = getValueInfoFromValueId(ValueID); | |||
5305 | FS->setOriginalName(GUID.second); | |||
5306 | TheIndex.addGlobalValueSummary(GUID.first, std::move(FS)); | |||
5307 | break; | |||
5308 | } | |||
5309 | // FS_COMBINED: [valueid, modid, flags, instcount, fflags, numrefs, | |||
5310 | // numrefs x valueid, n x (valueid)] | |||
5311 | // FS_COMBINED_PROFILE: [valueid, modid, flags, instcount, fflags, numrefs, | |||
5312 | // numrefs x valueid, n x (valueid, hotness)] | |||
5313 | case bitc::FS_COMBINED: | |||
5314 | case bitc::FS_COMBINED_PROFILE: { | |||
5315 | unsigned ValueID = Record[0]; | |||
5316 | uint64_t ModuleId = Record[1]; | |||
5317 | uint64_t RawFlags = Record[2]; | |||
5318 | unsigned InstCount = Record[3]; | |||
5319 | uint64_t RawFunFlags = 0; | |||
5320 | unsigned NumRefs = Record[4]; | |||
5321 | int RefListStartIndex = 5; | |||
5322 | ||||
5323 | if (Version >= 4) { | |||
5324 | RawFunFlags = Record[4]; | |||
5325 | NumRefs = Record[5]; | |||
5326 | RefListStartIndex = 6; | |||
5327 | } | |||
5328 | ||||
5329 | auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); | |||
5330 | int CallGraphEdgeStartIndex = RefListStartIndex + NumRefs; | |||
5331 | assert(Record.size() >= RefListStartIndex + NumRefs &&(static_cast <bool> (Record.size() >= RefListStartIndex + NumRefs && "Record size inconsistent with number of references" ) ? void (0) : __assert_fail ("Record.size() >= RefListStartIndex + NumRefs && \"Record size inconsistent with number of references\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5332, __extension__ __PRETTY_FUNCTION__)) | |||
5332 | "Record size inconsistent with number of references")(static_cast <bool> (Record.size() >= RefListStartIndex + NumRefs && "Record size inconsistent with number of references" ) ? void (0) : __assert_fail ("Record.size() >= RefListStartIndex + NumRefs && \"Record size inconsistent with number of references\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5332, __extension__ __PRETTY_FUNCTION__)); | |||
5333 | std::vector<ValueInfo> Refs = makeRefList( | |||
5334 | ArrayRef<uint64_t>(Record).slice(RefListStartIndex, NumRefs)); | |||
5335 | bool HasProfile = (BitCode == bitc::FS_COMBINED_PROFILE); | |||
5336 | std::vector<FunctionSummary::EdgeTy> Edges = makeCallList( | |||
5337 | ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex), | |||
5338 | IsOldProfileFormat, HasProfile, false); | |||
5339 | ValueInfo VI = getValueInfoFromValueId(ValueID).first; | |||
5340 | auto FS = llvm::make_unique<FunctionSummary>( | |||
5341 | Flags, InstCount, getDecodedFFlags(RawFunFlags), std::move(Refs), | |||
5342 | std::move(Edges), std::move(PendingTypeTests), | |||
5343 | std::move(PendingTypeTestAssumeVCalls), | |||
5344 | std::move(PendingTypeCheckedLoadVCalls), | |||
5345 | std::move(PendingTypeTestAssumeConstVCalls), | |||
5346 | std::move(PendingTypeCheckedLoadConstVCalls)); | |||
5347 | PendingTypeTests.clear(); | |||
5348 | PendingTypeTestAssumeVCalls.clear(); | |||
5349 | PendingTypeCheckedLoadVCalls.clear(); | |||
5350 | PendingTypeTestAssumeConstVCalls.clear(); | |||
5351 | PendingTypeCheckedLoadConstVCalls.clear(); | |||
5352 | LastSeenSummary = FS.get(); | |||
5353 | LastSeenGUID = VI.getGUID(); | |||
5354 | FS->setModulePath(ModuleIdMap[ModuleId]); | |||
5355 | TheIndex.addGlobalValueSummary(VI, std::move(FS)); | |||
5356 | break; | |||
5357 | } | |||
5358 | // FS_COMBINED_ALIAS: [valueid, modid, flags, valueid] | |||
5359 | // Aliases must be emitted (and parsed) after all FS_COMBINED entries, as | |||
5360 | // they expect all aliasee summaries to be available. | |||
5361 | case bitc::FS_COMBINED_ALIAS: { | |||
5362 | unsigned ValueID = Record[0]; | |||
5363 | uint64_t ModuleId = Record[1]; | |||
5364 | uint64_t RawFlags = Record[2]; | |||
5365 | unsigned AliaseeValueId = Record[3]; | |||
5366 | auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); | |||
5367 | auto AS = llvm::make_unique<AliasSummary>(Flags); | |||
5368 | LastSeenSummary = AS.get(); | |||
5369 | AS->setModulePath(ModuleIdMap[ModuleId]); | |||
5370 | ||||
5371 | auto AliaseeGUID = | |||
5372 | getValueInfoFromValueId(AliaseeValueId).first.getGUID(); | |||
5373 | auto AliaseeInModule = | |||
5374 | TheIndex.findSummaryInModule(AliaseeGUID, AS->modulePath()); | |||
5375 | AS->setAliasee(AliaseeInModule); | |||
5376 | AS->setAliaseeGUID(AliaseeGUID); | |||
5377 | ||||
5378 | ValueInfo VI = getValueInfoFromValueId(ValueID).first; | |||
5379 | LastSeenGUID = VI.getGUID(); | |||
5380 | TheIndex.addGlobalValueSummary(VI, std::move(AS)); | |||
5381 | break; | |||
5382 | } | |||
5383 | // FS_COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid] | |||
5384 | case bitc::FS_COMBINED_GLOBALVAR_INIT_REFS: { | |||
5385 | unsigned ValueID = Record[0]; | |||
5386 | uint64_t ModuleId = Record[1]; | |||
5387 | uint64_t RawFlags = Record[2]; | |||
5388 | auto Flags = getDecodedGVSummaryFlags(RawFlags, Version); | |||
5389 | std::vector<ValueInfo> Refs = | |||
5390 | makeRefList(ArrayRef<uint64_t>(Record).slice(3)); | |||
5391 | auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs)); | |||
5392 | LastSeenSummary = FS.get(); | |||
5393 | FS->setModulePath(ModuleIdMap[ModuleId]); | |||
5394 | ValueInfo VI = getValueInfoFromValueId(ValueID).first; | |||
5395 | LastSeenGUID = VI.getGUID(); | |||
5396 | TheIndex.addGlobalValueSummary(VI, std::move(FS)); | |||
5397 | break; | |||
5398 | } | |||
5399 | // FS_COMBINED_ORIGINAL_NAME: [original_name] | |||
5400 | case bitc::FS_COMBINED_ORIGINAL_NAME: { | |||
5401 | uint64_t OriginalName = Record[0]; | |||
5402 | if (!LastSeenSummary) | |||
5403 | return error("Name attachment that does not follow a combined record"); | |||
5404 | LastSeenSummary->setOriginalName(OriginalName); | |||
5405 | TheIndex.addOriginalName(LastSeenGUID, OriginalName); | |||
5406 | // Reset the LastSeenSummary | |||
5407 | LastSeenSummary = nullptr; | |||
5408 | LastSeenGUID = 0; | |||
5409 | break; | |||
5410 | } | |||
5411 | case bitc::FS_TYPE_TESTS: | |||
5412 | assert(PendingTypeTests.empty())(static_cast <bool> (PendingTypeTests.empty()) ? void ( 0) : __assert_fail ("PendingTypeTests.empty()", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5412, __extension__ __PRETTY_FUNCTION__)); | |||
5413 | PendingTypeTests.insert(PendingTypeTests.end(), Record.begin(), | |||
5414 | Record.end()); | |||
5415 | break; | |||
5416 | ||||
5417 | case bitc::FS_TYPE_TEST_ASSUME_VCALLS: | |||
5418 | assert(PendingTypeTestAssumeVCalls.empty())(static_cast <bool> (PendingTypeTestAssumeVCalls.empty( )) ? void (0) : __assert_fail ("PendingTypeTestAssumeVCalls.empty()" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5418, __extension__ __PRETTY_FUNCTION__)); | |||
5419 | for (unsigned I = 0; I != Record.size(); I += 2) | |||
5420 | PendingTypeTestAssumeVCalls.push_back({Record[I], Record[I+1]}); | |||
5421 | break; | |||
5422 | ||||
5423 | case bitc::FS_TYPE_CHECKED_LOAD_VCALLS: | |||
5424 | assert(PendingTypeCheckedLoadVCalls.empty())(static_cast <bool> (PendingTypeCheckedLoadVCalls.empty ()) ? void (0) : __assert_fail ("PendingTypeCheckedLoadVCalls.empty()" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5424, __extension__ __PRETTY_FUNCTION__)); | |||
5425 | for (unsigned I = 0; I != Record.size(); I += 2) | |||
5426 | PendingTypeCheckedLoadVCalls.push_back({Record[I], Record[I+1]}); | |||
5427 | break; | |||
5428 | ||||
5429 | case bitc::FS_TYPE_TEST_ASSUME_CONST_VCALL: | |||
5430 | PendingTypeTestAssumeConstVCalls.push_back( | |||
5431 | {{Record[0], Record[1]}, {Record.begin() + 2, Record.end()}}); | |||
5432 | break; | |||
5433 | ||||
5434 | case bitc::FS_TYPE_CHECKED_LOAD_CONST_VCALL: | |||
5435 | PendingTypeCheckedLoadConstVCalls.push_back( | |||
5436 | {{Record[0], Record[1]}, {Record.begin() + 2, Record.end()}}); | |||
5437 | break; | |||
5438 | ||||
5439 | case bitc::FS_CFI_FUNCTION_DEFS: { | |||
5440 | std::set<std::string> &CfiFunctionDefs = TheIndex.cfiFunctionDefs(); | |||
5441 | for (unsigned I = 0; I != Record.size(); I += 2) | |||
5442 | CfiFunctionDefs.insert( | |||
5443 | {Strtab.data() + Record[I], static_cast<size_t>(Record[I + 1])}); | |||
5444 | break; | |||
5445 | } | |||
5446 | ||||
5447 | case bitc::FS_CFI_FUNCTION_DECLS: { | |||
5448 | std::set<std::string> &CfiFunctionDecls = TheIndex.cfiFunctionDecls(); | |||
5449 | for (unsigned I = 0; I != Record.size(); I += 2) | |||
5450 | CfiFunctionDecls.insert( | |||
5451 | {Strtab.data() + Record[I], static_cast<size_t>(Record[I + 1])}); | |||
5452 | break; | |||
5453 | } | |||
5454 | ||||
5455 | case bitc::FS_TYPE_ID: | |||
5456 | parseTypeIdSummaryRecord(Record, Strtab, TheIndex); | |||
5457 | break; | |||
5458 | } | |||
5459 | } | |||
5460 | llvm_unreachable("Exit infinite loop")::llvm::llvm_unreachable_internal("Exit infinite loop", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5460); | |||
5461 | } | |||
5462 | ||||
5463 | // Parse the module string table block into the Index. | |||
5464 | // This populates the ModulePathStringTable map in the index. | |||
5465 | Error ModuleSummaryIndexBitcodeReader::parseModuleStringTable() { | |||
5466 | if (Stream.EnterSubBlock(bitc::MODULE_STRTAB_BLOCK_ID)) | |||
5467 | return error("Invalid record"); | |||
5468 | ||||
5469 | SmallVector<uint64_t, 64> Record; | |||
5470 | ||||
5471 | SmallString<128> ModulePath; | |||
5472 | ModuleSummaryIndex::ModuleInfo *LastSeenModule = nullptr; | |||
5473 | ||||
5474 | while (true) { | |||
5475 | BitstreamEntry Entry = Stream.advanceSkippingSubblocks(); | |||
5476 | ||||
5477 | switch (Entry.Kind) { | |||
5478 | case BitstreamEntry::SubBlock: // Handled for us already. | |||
5479 | case BitstreamEntry::Error: | |||
5480 | return error("Malformed block"); | |||
5481 | case BitstreamEntry::EndBlock: | |||
5482 | return Error::success(); | |||
5483 | case BitstreamEntry::Record: | |||
5484 | // The interesting case. | |||
5485 | break; | |||
5486 | } | |||
5487 | ||||
5488 | Record.clear(); | |||
5489 | switch (Stream.readRecord(Entry.ID, Record)) { | |||
5490 | default: // Default behavior: ignore. | |||
5491 | break; | |||
5492 | case bitc::MST_CODE_ENTRY: { | |||
5493 | // MST_ENTRY: [modid, namechar x N] | |||
5494 | uint64_t ModuleId = Record[0]; | |||
5495 | ||||
5496 | if (convertToString(Record, 1, ModulePath)) | |||
5497 | return error("Invalid record"); | |||
5498 | ||||
5499 | LastSeenModule = TheIndex.addModule(ModulePath, ModuleId); | |||
5500 | ModuleIdMap[ModuleId] = LastSeenModule->first(); | |||
5501 | ||||
5502 | ModulePath.clear(); | |||
5503 | break; | |||
5504 | } | |||
5505 | /// MST_CODE_HASH: [5*i32] | |||
5506 | case bitc::MST_CODE_HASH: { | |||
5507 | if (Record.size() != 5) | |||
5508 | return error("Invalid hash length " + Twine(Record.size()).str()); | |||
5509 | if (!LastSeenModule) | |||
5510 | return error("Invalid hash that does not follow a module path"); | |||
5511 | int Pos = 0; | |||
5512 | for (auto &Val : Record) { | |||
5513 | assert(!(Val >> 32) && "Unexpected high bits set")(static_cast <bool> (!(Val >> 32) && "Unexpected high bits set" ) ? void (0) : __assert_fail ("!(Val >> 32) && \"Unexpected high bits set\"" , "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5513, __extension__ __PRETTY_FUNCTION__)); | |||
5514 | LastSeenModule->second.second[Pos++] = Val; | |||
5515 | } | |||
5516 | // Reset LastSeenModule to avoid overriding the hash unexpectedly. | |||
5517 | LastSeenModule = nullptr; | |||
5518 | break; | |||
5519 | } | |||
5520 | } | |||
5521 | } | |||
5522 | llvm_unreachable("Exit infinite loop")::llvm::llvm_unreachable_internal("Exit infinite loop", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5522); | |||
5523 | } | |||
5524 | ||||
5525 | namespace { | |||
5526 | ||||
5527 | // FIXME: This class is only here to support the transition to llvm::Error. It | |||
5528 | // will be removed once this transition is complete. Clients should prefer to | |||
5529 | // deal with the Error value directly, rather than converting to error_code. | |||
5530 | class BitcodeErrorCategoryType : public std::error_category { | |||
5531 | const char *name() const noexcept override { | |||
5532 | return "llvm.bitcode"; | |||
5533 | } | |||
5534 | ||||
5535 | std::string message(int IE) const override { | |||
5536 | BitcodeError E = static_cast<BitcodeError>(IE); | |||
5537 | switch (E) { | |||
5538 | case BitcodeError::CorruptedBitcode: | |||
5539 | return "Corrupted bitcode"; | |||
5540 | } | |||
5541 | llvm_unreachable("Unknown error type!")::llvm::llvm_unreachable_internal("Unknown error type!", "/build/llvm-toolchain-snapshot-7~svn326246/lib/Bitcode/Reader/BitcodeReader.cpp" , 5541); | |||
5542 | } | |||
5543 | }; | |||
5544 | ||||
5545 | } // end anonymous namespace | |||
5546 | ||||
5547 | static ManagedStatic<BitcodeErrorCategoryType> ErrorCategory; | |||
5548 | ||||
5549 | const std::error_category &llvm::BitcodeErrorCategory() { | |||
5550 | return *ErrorCategory; | |||
5551 | } | |||
5552 | ||||
5553 | static Expected<StringRef> readBlobInRecord(BitstreamCursor &Stream, | |||
5554 | unsigned Block, unsigned RecordID) { | |||
5555 | if (Stream.EnterSubBlock(Block)) | |||
5556 | return error("Invalid record"); | |||
5557 | ||||
5558 | StringRef Strtab; | |||
5559 | while (true) { | |||
5560 | BitstreamEntry Entry = Stream.advance(); | |||
5561 | switch (Entry.Kind) { | |||
5562 | case BitstreamEntry::EndBlock: | |||
5563 | return Strtab; | |||
5564 | ||||
5565 | case BitstreamEntry::Error: | |||
5566 | return error("Malformed block"); | |||
5567 | ||||
5568 | case BitstreamEntry::SubBlock: | |||
5569 | if (Stream.SkipBlock()) | |||
5570 | return error("Malformed block"); | |||
5571 | break; | |||
5572 | ||||
5573 | case BitstreamEntry::Record: | |||
5574 | StringRef Blob; | |||
5575 | SmallVector<uint64_t, 1> Record; | |||
5576 | if (Stream.readRecord(Entry.ID, Record, &Blob) == RecordID) | |||
5577 | Strtab = Blob; | |||
5578 | break; | |||
5579 | } | |||
5580 | } | |||
5581 | } | |||
5582 | ||||
5583 | //===----------------------------------------------------------------------===// | |||
5584 | // External interface | |||
5585 | //===----------------------------------------------------------------------===// | |||
5586 | ||||
5587 | Expected<std::vector<BitcodeModule>> | |||
5588 | llvm::getBitcodeModuleList(MemoryBufferRef Buffer) { | |||
5589 | auto FOrErr = getBitcodeFileContents(Buffer); | |||
5590 | if (!FOrErr) | |||
5591 | return FOrErr.takeError(); | |||
5592 | return std::move(FOrErr->Mods); | |||
5593 | } | |||
5594 | ||||
5595 | Expected<BitcodeFileContents> | |||
5596 | llvm::getBitcodeFileContents(MemoryBufferRef Buffer) { | |||
5597 | Expected<BitstreamCursor> StreamOrErr = initStream(Buffer); | |||
5598 | if (!StreamOrErr) | |||
5599 | return StreamOrErr.takeError(); | |||
5600 | BitstreamCursor &Stream = *StreamOrErr; | |||
5601 | ||||
5602 | BitcodeFileContents F; | |||
5603 | while (true) { | |||
5604 | uint64_t BCBegin = Stream.getCurrentByteNo(); | |||
5605 | ||||
5606 | // We may be consuming bitcode from a client that leaves garbage at the end | |||
5607 | // of the bitcode stream (e.g. Apple's ar tool). If we are close enough to | |||
5608 | // the end that there cannot possibly be another module, stop looking. | |||
5609 | if (BCBegin + 8 >= Stream.getBitcodeBytes().size()) | |||
5610 | return F; | |||
5611 | ||||
5612 | BitstreamEntry Entry = Stream.advance(); | |||
5613 | switch (Entry.Kind) { | |||
5614 | case BitstreamEntry::EndBlock: | |||
5615 | case BitstreamEntry::Error: | |||
5616 | return error("Malformed block"); | |||
5617 | ||||
5618 | case BitstreamEntry::SubBlock: { | |||
5619 | uint64_t IdentificationBit = -1ull; | |||
5620 | if (Entry.ID == bitc::IDENTIFICATION_BLOCK_ID) { | |||
5621 | IdentificationBit = Stream.GetCurrentBitNo() - BCBegin * 8; | |||
5622 | if (Stream.SkipBlock()) | |||
5623 | return error("Malformed block"); | |||
5624 | ||||
5625 | Entry = Stream.advance(); | |||
5626 | if (Entry.Kind != BitstreamEntry::SubBlock || | |||
5627 | Entry.ID != bitc::MODULE_BLOCK_ID) | |||
5628 | return error("Malformed block"); | |||
5629 | } | |||
5630 | ||||
5631 | if (Entry.ID == bitc::MODULE_BLOCK_ID) { | |||
5632 | uint64_t ModuleBit = Stream.GetCurrentBitNo() - BCBegin * 8; | |||
5633 | if (Stream.SkipBlock()) | |||
5634 | return error("Malformed block"); | |||
5635 | ||||
5636 | F.Mods.push_back({Stream.getBitcodeBytes().slice( | |||
5637 | BCBegin, Stream.getCurrentByteNo() - BCBegin), | |||
5638 | Buffer.getBufferIdentifier(), IdentificationBit, | |||
5639 | ModuleBit}); | |||
5640 | continue; | |||
5641 | } | |||
5642 | ||||
5643 | if (Entry.ID == bitc::STRTAB_BLOCK_ID) { | |||
5644 | Expected<StringRef> Strtab = | |||
5645 | readBlobInRecord(Stream, bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB); | |||
5646 | if (!Strtab) | |||
5647 | return Strtab.takeError(); | |||
5648 | // This string table is used by every preceding bitcode module that does | |||
5649 | // not have its own string table. A bitcode file may have multiple | |||
5650 | // string tables if it was created by binary concatenation, for example | |||
5651 | // with "llvm-cat -b". | |||
5652 | for (auto I = F.Mods.rbegin(), E = F.Mods.rend(); I != E; ++I) { | |||
5653 | if (!I->Strtab.empty()) | |||
5654 | break; | |||
5655 | I->Strtab = *Strtab; | |||
5656 | } | |||
5657 | // Similarly, the string table is used by every preceding symbol table; | |||
5658 | // normally there will be just one unless the bitcode file was created | |||
5659 | // by binary concatenation. | |||
5660 | if (!F.Symtab.empty() && F.StrtabForSymtab.empty()) | |||
5661 | F.StrtabForSymtab = *Strtab; | |||
5662 | continue; | |||
5663 | } | |||
5664 | ||||
5665 | if (Entry.ID == bitc::SYMTAB_BLOCK_ID) { | |||
5666 | Expected<StringRef> SymtabOrErr = | |||
5667 | readBlobInRecord(Stream, bitc::SYMTAB_BLOCK_ID, bitc::SYMTAB_BLOB); | |||
5668 | if (!SymtabOrErr) | |||
5669 | return SymtabOrErr.takeError(); | |||
5670 | ||||
5671 | // We can expect the bitcode file to have multiple symbol tables if it | |||
5672 | // was created by binary concatenation. In that case we silently | |||
5673 | // ignore any subsequent symbol tables, which is fine because this is a | |||
5674 | // low level function. The client is expected to notice that the number | |||
5675 | // of modules in the symbol table does not match the number of modules | |||
5676 | // in the input file and regenerate the symbol table. | |||
5677 | if (F.Symtab.empty()) | |||
5678 | F.Symtab = *SymtabOrErr; | |||
5679 | continue; | |||
5680 | } | |||
5681 | ||||
5682 | if (Stream.SkipBlock()) | |||
5683 | return error("Malformed block"); | |||
5684 | continue; | |||
5685 | } | |||
5686 | case BitstreamEntry::Record: | |||
5687 | Stream.skipRecord(Entry.ID); | |||
5688 | continue; | |||
5689 | } | |||
5690 | } | |||
5691 | } | |||
5692 | ||||
5693 | /// \brief Get a lazy one-at-time loading module from bitcode. | |||
5694 | /// | |||
5695 | /// This isn't always used in a lazy context. In particular, it's also used by | |||
5696 | /// \a parseModule(). If this is truly lazy, then we need to eagerly pull | |||
5697 | /// in forward-referenced functions from block address references. | |||
5698 | /// | |||
5699 | /// \param[in] MaterializeAll Set to \c true if we should materialize | |||
5700 | /// everything. | |||
5701 | Expected<std::unique_ptr<Module>> | |||
5702 | BitcodeModule::getModuleImpl(LLVMContext &Context, bool MaterializeAll, | |||
5703 | bool ShouldLazyLoadMetadata, bool IsImporting) { | |||
5704 | BitstreamCursor Stream(Buffer); | |||
5705 | ||||
5706 | std::string ProducerIdentification; | |||
5707 | if (IdentificationBit != -1ull) { | |||
5708 | Stream.JumpToBit(IdentificationBit); | |||
5709 | Expected<std::string> ProducerIdentificationOrErr = | |||
5710 | readIdentificationBlock(Stream); | |||
5711 | if (!ProducerIdentificationOrErr) | |||
5712 | return ProducerIdentificationOrErr.takeError(); | |||
5713 | ||||
5714 | ProducerIdentification = *ProducerIdentificationOrErr; | |||
5715 | } | |||
5716 | ||||
5717 | Stream.JumpToBit(ModuleBit); | |||
5718 | auto *R = new BitcodeReader(std::move(Stream), Strtab, ProducerIdentification, | |||
5719 | Context); | |||
5720 | ||||
5721 | std::unique_ptr<Module> M = | |||
5722 | llvm::make_unique<Module>(ModuleIdentifier, Context); | |||
5723 | M->setMaterializer(R); | |||
5724 | ||||
5725 | // Delay parsing Metadata if ShouldLazyLoadMetadata is true. | |||
5726 | if (Error Err = | |||
5727 | R->parseBitcodeInto(M.get(), ShouldLazyLoadMetadata, IsImporting)) | |||
5728 | return std::move(Err); | |||
5729 | ||||
5730 | if (MaterializeAll) { | |||
5731 | // Read in the entire module, and destroy the BitcodeReader. | |||
5732 | if (Error Err = M->materializeAll()) | |||
5733 | return std::move(Err); | |||
5734 | } else { | |||
5735 | // Resolve forward references from blockaddresses. | |||
5736 | if (Error Err = R->materializeForwardReferencedFunctions()) | |||
5737 | return std::move(Err); | |||
5738 | } | |||
5739 | return std::move(M); | |||
5740 | } | |||
5741 | ||||
5742 | Expected<std::unique_ptr<Module>> | |||
5743 | BitcodeModule::getLazyModule(LLVMContext &Context, bool ShouldLazyLoadMetadata, | |||
5744 | bool IsImporting) { | |||
5745 | return getModuleImpl(Context, false, ShouldLazyLoadMetadata, IsImporting); | |||
5746 | } | |||
5747 | ||||
5748 | // Parse the specified bitcode buffer and merge the index into CombinedIndex. | |||
5749 | // We don't use ModuleIdentifier here because the client may need to control the | |||
5750 | // module path used in the combined summary (e.g. when reading summaries for | |||
5751 | // regular LTO modules). | |||
5752 | Error BitcodeModule::readSummary(ModuleSummaryIndex &CombinedIndex, | |||
5753 | StringRef ModulePath, uint64_t ModuleId) { | |||
5754 | BitstreamCursor Stream(Buffer); | |||
5755 | Stream.JumpToBit(ModuleBit); | |||
5756 | ||||
5757 | ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, CombinedIndex, | |||
5758 | ModulePath, ModuleId); | |||
5759 | return R.parseModule(); | |||
5760 | } | |||
5761 | ||||
5762 | // Parse the specified bitcode buffer, returning the function info index. | |||
5763 | Expected<std::unique_ptr<ModuleSummaryIndex>> BitcodeModule::getSummary() { | |||
5764 | BitstreamCursor Stream(Buffer); | |||
5765 | Stream.JumpToBit(ModuleBit); | |||
5766 | ||||
5767 | auto Index = | |||
5768 | llvm::make_unique<ModuleSummaryIndex>(/*IsPerformingAnalysis=*/false); | |||
5769 | ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, *Index, | |||
5770 | ModuleIdentifier, 0); | |||
5771 | ||||
5772 | if (Error Err = R.parseModule()) | |||
5773 | return std::move(Err); | |||
5774 | ||||
5775 | return std::move(Index); | |||
5776 | } | |||
5777 | ||||
5778 | // Check if the given bitcode buffer contains a global value summary block. | |||
5779 | Expected<BitcodeLTOInfo> BitcodeModule::getLTOInfo() { | |||
5780 | BitstreamCursor Stream(Buffer); | |||
5781 | Stream.JumpToBit(ModuleBit); | |||
5782 | ||||
5783 | if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID)) | |||
5784 | return error("Invalid record"); | |||
5785 | ||||
5786 | while (true) { | |||
5787 | BitstreamEntry Entry = Stream.advance(); | |||
5788 | ||||
5789 | switch (Entry.Kind) { | |||
5790 | case BitstreamEntry::Error: | |||
5791 | return error("Malformed block"); | |||
5792 | case BitstreamEntry::EndBlock: | |||
5793 | return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/false}; | |||
5794 | ||||
5795 | case BitstreamEntry::SubBlock: | |||
5796 | if (Entry.ID == bitc::GLOBALVAL_SUMMARY_BLOCK_ID) | |||
5797 | return BitcodeLTOInfo{/*IsThinLTO=*/true, /*HasSummary=*/true}; | |||
5798 | ||||
5799 | if (Entry.ID == bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID) | |||
5800 | return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/true}; | |||
5801 | ||||
5802 | // Ignore other sub-blocks. | |||
5803 | if (Stream.SkipBlock()) | |||
5804 | return error("Malformed block"); | |||
5805 | continue; | |||
5806 | ||||
5807 | case BitstreamEntry::Record: | |||
5808 | Stream.skipRecord(Entry.ID); | |||
5809 | continue; | |||
5810 | } | |||
5811 | } | |||
5812 | } | |||
5813 | ||||
5814 | static Expected<BitcodeModule> getSingleModule(MemoryBufferRef Buffer) { | |||
5815 | Expected<std::vector<BitcodeModule>> MsOrErr = getBitcodeModuleList(Buffer); | |||
5816 | if (!MsOrErr) | |||
5817 | return MsOrErr.takeError(); | |||
5818 | ||||
5819 | if (MsOrErr->size() != 1) | |||
5820 | return error("Expected a single module"); | |||
5821 | ||||
5822 | return (*MsOrErr)[0]; | |||
5823 | } | |||
5824 | ||||
5825 | Expected<std::unique_ptr<Module>> | |||
5826 | llvm::getLazyBitcodeModule(MemoryBufferRef Buffer, LLVMContext &Context, | |||
5827 | bool ShouldLazyLoadMetadata, bool IsImporting) { | |||
5828 | Expected<BitcodeModule> BM = getSingleModule(Buffer); | |||
5829 | if (!BM) | |||
5830 | return BM.takeError(); | |||
5831 | ||||
5832 | return BM->getLazyModule(Context, ShouldLazyLoadMetadata, IsImporting); | |||
5833 | } | |||
5834 | ||||
5835 | Expected<std::unique_ptr<Module>> llvm::getOwningLazyBitcodeModule( | |||
5836 | std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context, | |||
5837 | bool ShouldLazyLoadMetadata, bool IsImporting) { | |||
5838 | auto MOrErr = getLazyBitcodeModule(*Buffer, Context, ShouldLazyLoadMetadata, | |||
5839 | IsImporting); | |||
5840 | if (MOrErr) | |||
5841 | (*MOrErr)->setOwnedMemoryBuffer(std::move(Buffer)); | |||
5842 | return MOrErr; | |||
5843 | } | |||
5844 | ||||
5845 | Expected<std::unique_ptr<Module>> | |||
5846 | BitcodeModule::parseModule(LLVMContext &Context) { | |||
5847 | return getModuleImpl(Context, true, false, false); | |||
5848 | // TODO: Restore the use-lists to the in-memory state when the bitcode was | |||
5849 | // written. We must defer until the Module has been fully materialized. | |||
5850 | } | |||
5851 | ||||
5852 | Expected<std::unique_ptr<Module>> llvm::parseBitcodeFile(MemoryBufferRef Buffer, | |||
5853 | LLVMContext &Context) { | |||
5854 | Expected<BitcodeModule> BM = getSingleModule(Buffer); | |||
5855 | if (!BM) | |||
5856 | return BM.takeError(); | |||
5857 | ||||
5858 | return BM->parseModule(Context); | |||
5859 | } | |||
5860 | ||||
5861 | Expected<std::string> llvm::getBitcodeTargetTriple(MemoryBufferRef Buffer) { | |||
5862 | Expected<BitstreamCursor> StreamOrErr = initStream(Buffer); | |||
5863 | if (!StreamOrErr) | |||
5864 | return StreamOrErr.takeError(); | |||
5865 | ||||
5866 | return readTriple(*StreamOrErr); | |||
5867 | } | |||
5868 | ||||
5869 | Expected<bool> llvm::isBitcodeContainingObjCCategory(MemoryBufferRef Buffer) { | |||
5870 | Expected<BitstreamCursor> StreamOrErr = initStream(Buffer); | |||
5871 | if (!StreamOrErr) | |||
5872 | return StreamOrErr.takeError(); | |||
5873 | ||||
5874 | return hasObjCCategory(*StreamOrErr); | |||
5875 | } | |||
5876 | ||||
5877 | Expected<std::string> llvm::getBitcodeProducerString(MemoryBufferRef Buffer) { | |||
5878 | Expected<BitstreamCursor> StreamOrErr = initStream(Buffer); | |||
5879 | if (!StreamOrErr) | |||
5880 | return StreamOrErr.takeError(); | |||
5881 | ||||
5882 | return readIdentificationCode(*StreamOrErr); | |||
5883 | } | |||
5884 | ||||
5885 | Error llvm::readModuleSummaryIndex(MemoryBufferRef Buffer, | |||
5886 | ModuleSummaryIndex &CombinedIndex, | |||
5887 | uint64_t ModuleId) { | |||
5888 | Expected<BitcodeModule> BM = getSingleModule(Buffer); | |||
5889 | if (!BM) | |||
5890 | return BM.takeError(); | |||
5891 | ||||
5892 | return BM->readSummary(CombinedIndex, BM->getModuleIdentifier(), ModuleId); | |||
5893 | } | |||
5894 | ||||
5895 | Expected<std::unique_ptr<ModuleSummaryIndex>> | |||
5896 | llvm::getModuleSummaryIndex(MemoryBufferRef Buffer) { | |||
5897 | Expected<BitcodeModule> BM = getSingleModule(Buffer); | |||
5898 | if (!BM) | |||
5899 | return BM.takeError(); | |||
5900 | ||||
5901 | return BM->getSummary(); | |||
5902 | } | |||
5903 | ||||
5904 | Expected<BitcodeLTOInfo> llvm::getBitcodeLTOInfo(MemoryBufferRef Buffer) { | |||
5905 | Expected<BitcodeModule> BM = getSingleModule(Buffer); | |||
5906 | if (!BM) | |||
5907 | return BM.takeError(); | |||
5908 | ||||
5909 | return BM->getLTOInfo(); | |||
5910 | } | |||
5911 | ||||
5912 | Expected<std::unique_ptr<ModuleSummaryIndex>> | |||
5913 | llvm::getModuleSummaryIndexForFile(StringRef Path, | |||
5914 | bool IgnoreEmptyThinLTOIndexFile) { | |||
5915 | ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr = | |||
5916 | MemoryBuffer::getFileOrSTDIN(Path); | |||
5917 | if (!FileOrErr) | |||
5918 | return errorCodeToError(FileOrErr.getError()); | |||
5919 | if (IgnoreEmptyThinLTOIndexFile && !(*FileOrErr)->getBufferSize()) | |||
5920 | return nullptr; | |||
5921 | return getModuleSummaryIndex(**FileOrErr); | |||
5922 | } |
1 | //===- Twine.h - Fast Temporary String Concatenation ------------*- C++ -*-===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | |
10 | #ifndef LLVM_ADT_TWINE_H |
11 | #define LLVM_ADT_TWINE_H |
12 | |
13 | #include "llvm/ADT/SmallVector.h" |
14 | #include "llvm/ADT/StringRef.h" |
15 | #include "llvm/Support/ErrorHandling.h" |
16 | #include <cassert> |
17 | #include <cstdint> |
18 | #include <string> |
19 | |
20 | namespace llvm { |
21 | |
22 | class formatv_object_base; |
23 | class raw_ostream; |
24 | |
25 | /// Twine - A lightweight data structure for efficiently representing the |
26 | /// concatenation of temporary values as strings. |
27 | /// |
28 | /// A Twine is a kind of rope, it represents a concatenated string using a |
29 | /// binary-tree, where the string is the preorder of the nodes. Since the |
30 | /// Twine can be efficiently rendered into a buffer when its result is used, |
31 | /// it avoids the cost of generating temporary values for intermediate string |
32 | /// results -- particularly in cases when the Twine result is never |
33 | /// required. By explicitly tracking the type of leaf nodes, we can also avoid |
34 | /// the creation of temporary strings for conversions operations (such as |
35 | /// appending an integer to a string). |
36 | /// |
37 | /// A Twine is not intended for use directly and should not be stored, its |
38 | /// implementation relies on the ability to store pointers to temporary stack |
39 | /// objects which may be deallocated at the end of a statement. Twines should |
40 | /// only be used accepted as const references in arguments, when an API wishes |
41 | /// to accept possibly-concatenated strings. |
42 | /// |
43 | /// Twines support a special 'null' value, which always concatenates to form |
44 | /// itself, and renders as an empty string. This can be returned from APIs to |
45 | /// effectively nullify any concatenations performed on the result. |
46 | /// |
47 | /// \b Implementation |
48 | /// |
49 | /// Given the nature of a Twine, it is not possible for the Twine's |
50 | /// concatenation method to construct interior nodes; the result must be |
51 | /// represented inside the returned value. For this reason a Twine object |
52 | /// actually holds two values, the left- and right-hand sides of a |
53 | /// concatenation. We also have nullary Twine objects, which are effectively |
54 | /// sentinel values that represent empty strings. |
55 | /// |
56 | /// Thus, a Twine can effectively have zero, one, or two children. The \see |
57 | /// isNullary(), \see isUnary(), and \see isBinary() predicates exist for |
58 | /// testing the number of children. |
59 | /// |
60 | /// We maintain a number of invariants on Twine objects (FIXME: Why): |
61 | /// - Nullary twines are always represented with their Kind on the left-hand |
62 | /// side, and the Empty kind on the right-hand side. |
63 | /// - Unary twines are always represented with the value on the left-hand |
64 | /// side, and the Empty kind on the right-hand side. |
65 | /// - If a Twine has another Twine as a child, that child should always be |
66 | /// binary (otherwise it could have been folded into the parent). |
67 | /// |
68 | /// These invariants are check by \see isValid(). |
69 | /// |
70 | /// \b Efficiency Considerations |
71 | /// |
72 | /// The Twine is designed to yield efficient and small code for common |
73 | /// situations. For this reason, the concat() method is inlined so that |
74 | /// concatenations of leaf nodes can be optimized into stores directly into a |
75 | /// single stack allocated object. |
76 | /// |
77 | /// In practice, not all compilers can be trusted to optimize concat() fully, |
78 | /// so we provide two additional methods (and accompanying operator+ |
79 | /// overloads) to guarantee that particularly important cases (cstring plus |
80 | /// StringRef) codegen as desired. |
81 | class Twine { |
82 | /// NodeKind - Represent the type of an argument. |
83 | enum NodeKind : unsigned char { |
84 | /// An empty string; the result of concatenating anything with it is also |
85 | /// empty. |
86 | NullKind, |
87 | |
88 | /// The empty string. |
89 | EmptyKind, |
90 | |
91 | /// A pointer to a Twine instance. |
92 | TwineKind, |
93 | |
94 | /// A pointer to a C string instance. |
95 | CStringKind, |
96 | |
97 | /// A pointer to an std::string instance. |
98 | StdStringKind, |
99 | |
100 | /// A pointer to a StringRef instance. |
101 | StringRefKind, |
102 | |
103 | /// A pointer to a SmallString instance. |
104 | SmallStringKind, |
105 | |
106 | /// A pointer to a formatv_object_base instance. |
107 | FormatvObjectKind, |
108 | |
109 | /// A char value, to render as a character. |
110 | CharKind, |
111 | |
112 | /// An unsigned int value, to render as an unsigned decimal integer. |
113 | DecUIKind, |
114 | |
115 | /// An int value, to render as a signed decimal integer. |
116 | DecIKind, |
117 | |
118 | /// A pointer to an unsigned long value, to render as an unsigned decimal |
119 | /// integer. |
120 | DecULKind, |
121 | |
122 | /// A pointer to a long value, to render as a signed decimal integer. |
123 | DecLKind, |
124 | |
125 | /// A pointer to an unsigned long long value, to render as an unsigned |
126 | /// decimal integer. |
127 | DecULLKind, |
128 | |
129 | /// A pointer to a long long value, to render as a signed decimal integer. |
130 | DecLLKind, |
131 | |
132 | /// A pointer to a uint64_t value, to render as an unsigned hexadecimal |
133 | /// integer. |
134 | UHexKind |
135 | }; |
136 | |
137 | union Child |
138 | { |
139 | const Twine *twine; |
140 | const char *cString; |
141 | const std::string *stdString; |
142 | const StringRef *stringRef; |
143 | const SmallVectorImpl<char> *smallString; |
144 | const formatv_object_base *formatvObject; |
145 | char character; |
146 | unsigned int decUI; |
147 | int decI; |
148 | const unsigned long *decUL; |
149 | const long *decL; |
150 | const unsigned long long *decULL; |
151 | const long long *decLL; |
152 | const uint64_t *uHex; |
153 | }; |
154 | |
155 | /// LHS - The prefix in the concatenation, which may be uninitialized for |
156 | /// Null or Empty kinds. |
157 | Child LHS; |
158 | |
159 | /// RHS - The suffix in the concatenation, which may be uninitialized for |
160 | /// Null or Empty kinds. |
161 | Child RHS; |
162 | |
163 | /// LHSKind - The NodeKind of the left hand side, \see getLHSKind(). |
164 | NodeKind LHSKind = EmptyKind; |
165 | |
166 | /// RHSKind - The NodeKind of the right hand side, \see getRHSKind(). |
167 | NodeKind RHSKind = EmptyKind; |
168 | |
169 | /// Construct a nullary twine; the kind must be NullKind or EmptyKind. |
170 | explicit Twine(NodeKind Kind) : LHSKind(Kind) { |
171 | assert(isNullary() && "Invalid kind!")(static_cast <bool> (isNullary() && "Invalid kind!" ) ? void (0) : __assert_fail ("isNullary() && \"Invalid kind!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 171, __extension__ __PRETTY_FUNCTION__)); |
172 | } |
173 | |
174 | /// Construct a binary twine. |
175 | explicit Twine(const Twine &LHS, const Twine &RHS) |
176 | : LHSKind(TwineKind), RHSKind(TwineKind) { |
177 | this->LHS.twine = &LHS; |
178 | this->RHS.twine = &RHS; |
179 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 179, __extension__ __PRETTY_FUNCTION__)); |
180 | } |
181 | |
182 | /// Construct a twine from explicit values. |
183 | explicit Twine(Child LHS, NodeKind LHSKind, Child RHS, NodeKind RHSKind) |
184 | : LHS(LHS), RHS(RHS), LHSKind(LHSKind), RHSKind(RHSKind) { |
185 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 185, __extension__ __PRETTY_FUNCTION__)); |
186 | } |
187 | |
188 | /// Check for the null twine. |
189 | bool isNull() const { |
190 | return getLHSKind() == NullKind; |
191 | } |
192 | |
193 | /// Check for the empty twine. |
194 | bool isEmpty() const { |
195 | return getLHSKind() == EmptyKind; |
196 | } |
197 | |
198 | /// Check if this is a nullary twine (null or empty). |
199 | bool isNullary() const { |
200 | return isNull() || isEmpty(); |
201 | } |
202 | |
203 | /// Check if this is a unary twine. |
204 | bool isUnary() const { |
205 | return getRHSKind() == EmptyKind && !isNullary(); |
206 | } |
207 | |
208 | /// Check if this is a binary twine. |
209 | bool isBinary() const { |
210 | return getLHSKind() != NullKind && getRHSKind() != EmptyKind; |
211 | } |
212 | |
213 | /// Check if this is a valid twine (satisfying the invariants on |
214 | /// order and number of arguments). |
215 | bool isValid() const { |
216 | // Nullary twines always have Empty on the RHS. |
217 | if (isNullary() && getRHSKind() != EmptyKind) |
218 | return false; |
219 | |
220 | // Null should never appear on the RHS. |
221 | if (getRHSKind() == NullKind) |
222 | return false; |
223 | |
224 | // The RHS cannot be non-empty if the LHS is empty. |
225 | if (getRHSKind() != EmptyKind && getLHSKind() == EmptyKind) |
226 | return false; |
227 | |
228 | // A twine child should always be binary. |
229 | if (getLHSKind() == TwineKind && |
230 | !LHS.twine->isBinary()) |
231 | return false; |
232 | if (getRHSKind() == TwineKind && |
233 | !RHS.twine->isBinary()) |
234 | return false; |
235 | |
236 | return true; |
237 | } |
238 | |
239 | /// Get the NodeKind of the left-hand side. |
240 | NodeKind getLHSKind() const { return LHSKind; } |
241 | |
242 | /// Get the NodeKind of the right-hand side. |
243 | NodeKind getRHSKind() const { return RHSKind; } |
244 | |
245 | /// Print one child from a twine. |
246 | void printOneChild(raw_ostream &OS, Child Ptr, NodeKind Kind) const; |
247 | |
248 | /// Print the representation of one child from a twine. |
249 | void printOneChildRepr(raw_ostream &OS, Child Ptr, |
250 | NodeKind Kind) const; |
251 | |
252 | public: |
253 | /// @name Constructors |
254 | /// @{ |
255 | |
256 | /// Construct from an empty string. |
257 | /*implicit*/ Twine() { |
258 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 258, __extension__ __PRETTY_FUNCTION__)); |
259 | } |
260 | |
261 | Twine(const Twine &) = default; |
262 | |
263 | /// Construct from a C string. |
264 | /// |
265 | /// We take care here to optimize "" into the empty twine -- this will be |
266 | /// optimized out for string constants. This allows Twine arguments have |
267 | /// default "" values, without introducing unnecessary string constants. |
268 | /*implicit*/ Twine(const char *Str) { |
269 | if (Str[0] != '\0') { |
270 | LHS.cString = Str; |
271 | LHSKind = CStringKind; |
272 | } else |
273 | LHSKind = EmptyKind; |
274 | |
275 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 275, __extension__ __PRETTY_FUNCTION__)); |
276 | } |
277 | |
278 | /// Construct from an std::string. |
279 | /*implicit*/ Twine(const std::string &Str) : LHSKind(StdStringKind) { |
280 | LHS.stdString = &Str; |
281 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 281, __extension__ __PRETTY_FUNCTION__)); |
282 | } |
283 | |
284 | /// Construct from a StringRef. |
285 | /*implicit*/ Twine(const StringRef &Str) : LHSKind(StringRefKind) { |
286 | LHS.stringRef = &Str; |
287 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 287, __extension__ __PRETTY_FUNCTION__)); |
288 | } |
289 | |
290 | /// Construct from a SmallString. |
291 | /*implicit*/ Twine(const SmallVectorImpl<char> &Str) |
292 | : LHSKind(SmallStringKind) { |
293 | LHS.smallString = &Str; |
294 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 294, __extension__ __PRETTY_FUNCTION__)); |
295 | } |
296 | |
297 | /// Construct from a formatv_object_base. |
298 | /*implicit*/ Twine(const formatv_object_base &Fmt) |
299 | : LHSKind(FormatvObjectKind) { |
300 | LHS.formatvObject = &Fmt; |
301 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 301, __extension__ __PRETTY_FUNCTION__)); |
302 | } |
303 | |
304 | /// Construct from a char. |
305 | explicit Twine(char Val) : LHSKind(CharKind) { |
306 | LHS.character = Val; |
307 | } |
308 | |
309 | /// Construct from a signed char. |
310 | explicit Twine(signed char Val) : LHSKind(CharKind) { |
311 | LHS.character = static_cast<char>(Val); |
312 | } |
313 | |
314 | /// Construct from an unsigned char. |
315 | explicit Twine(unsigned char Val) : LHSKind(CharKind) { |
316 | LHS.character = static_cast<char>(Val); |
317 | } |
318 | |
319 | /// Construct a twine to print \p Val as an unsigned decimal integer. |
320 | explicit Twine(unsigned Val) : LHSKind(DecUIKind) { |
321 | LHS.decUI = Val; |
322 | } |
323 | |
324 | /// Construct a twine to print \p Val as a signed decimal integer. |
325 | explicit Twine(int Val) : LHSKind(DecIKind) { |
326 | LHS.decI = Val; |
327 | } |
328 | |
329 | /// Construct a twine to print \p Val as an unsigned decimal integer. |
330 | explicit Twine(const unsigned long &Val) : LHSKind(DecULKind) { |
331 | LHS.decUL = &Val; |
332 | } |
333 | |
334 | /// Construct a twine to print \p Val as a signed decimal integer. |
335 | explicit Twine(const long &Val) : LHSKind(DecLKind) { |
336 | LHS.decL = &Val; |
337 | } |
338 | |
339 | /// Construct a twine to print \p Val as an unsigned decimal integer. |
340 | explicit Twine(const unsigned long long &Val) : LHSKind(DecULLKind) { |
341 | LHS.decULL = &Val; |
342 | } |
343 | |
344 | /// Construct a twine to print \p Val as a signed decimal integer. |
345 | explicit Twine(const long long &Val) : LHSKind(DecLLKind) { |
346 | LHS.decLL = &Val; |
347 | } |
348 | |
349 | // FIXME: Unfortunately, to make sure this is as efficient as possible we |
350 | // need extra binary constructors from particular types. We can't rely on |
351 | // the compiler to be smart enough to fold operator+()/concat() down to the |
352 | // right thing. Yet. |
353 | |
354 | /// Construct as the concatenation of a C string and a StringRef. |
355 | /*implicit*/ Twine(const char *LHS, const StringRef &RHS) |
356 | : LHSKind(CStringKind), RHSKind(StringRefKind) { |
357 | this->LHS.cString = LHS; |
358 | this->RHS.stringRef = &RHS; |
359 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 359, __extension__ __PRETTY_FUNCTION__)); |
360 | } |
361 | |
362 | /// Construct as the concatenation of a StringRef and a C string. |
363 | /*implicit*/ Twine(const StringRef &LHS, const char *RHS) |
364 | : LHSKind(StringRefKind), RHSKind(CStringKind) { |
365 | this->LHS.stringRef = &LHS; |
366 | this->RHS.cString = RHS; |
367 | assert(isValid() && "Invalid twine!")(static_cast <bool> (isValid() && "Invalid twine!" ) ? void (0) : __assert_fail ("isValid() && \"Invalid twine!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 367, __extension__ __PRETTY_FUNCTION__)); |
368 | } |
369 | |
370 | /// Since the intended use of twines is as temporary objects, assignments |
371 | /// when concatenating might cause undefined behavior or stack corruptions |
372 | Twine &operator=(const Twine &) = delete; |
373 | |
374 | /// Create a 'null' string, which is an empty string that always |
375 | /// concatenates to form another empty string. |
376 | static Twine createNull() { |
377 | return Twine(NullKind); |
378 | } |
379 | |
380 | /// @} |
381 | /// @name Numeric Conversions |
382 | /// @{ |
383 | |
384 | // Construct a twine to print \p Val as an unsigned hexadecimal integer. |
385 | static Twine utohexstr(const uint64_t &Val) { |
386 | Child LHS, RHS; |
387 | LHS.uHex = &Val; |
388 | RHS.twine = nullptr; |
389 | return Twine(LHS, UHexKind, RHS, EmptyKind); |
390 | } |
391 | |
392 | /// @} |
393 | /// @name Predicate Operations |
394 | /// @{ |
395 | |
396 | /// Check if this twine is trivially empty; a false return value does not |
397 | /// necessarily mean the twine is empty. |
398 | bool isTriviallyEmpty() const { |
399 | return isNullary(); |
400 | } |
401 | |
402 | /// Return true if this twine can be dynamically accessed as a single |
403 | /// StringRef value with getSingleStringRef(). |
404 | bool isSingleStringRef() const { |
405 | if (getRHSKind() != EmptyKind) return false; |
406 | |
407 | switch (getLHSKind()) { |
408 | case EmptyKind: |
409 | case CStringKind: |
410 | case StdStringKind: |
411 | case StringRefKind: |
412 | case SmallStringKind: |
413 | return true; |
414 | default: |
415 | return false; |
416 | } |
417 | } |
418 | |
419 | /// @} |
420 | /// @name String Operations |
421 | /// @{ |
422 | |
423 | Twine concat(const Twine &Suffix) const; |
424 | |
425 | /// @} |
426 | /// @name Output & Conversion. |
427 | /// @{ |
428 | |
429 | /// Return the twine contents as a std::string. |
430 | std::string str() const; |
431 | |
432 | /// Append the concatenated string into the given SmallString or SmallVector. |
433 | void toVector(SmallVectorImpl<char> &Out) const; |
434 | |
435 | /// This returns the twine as a single StringRef. This method is only valid |
436 | /// if isSingleStringRef() is true. |
437 | StringRef getSingleStringRef() const { |
438 | assert(isSingleStringRef() &&"This cannot be had as a single stringref!")(static_cast <bool> (isSingleStringRef() &&"This cannot be had as a single stringref!" ) ? void (0) : __assert_fail ("isSingleStringRef() &&\"This cannot be had as a single stringref!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 438, __extension__ __PRETTY_FUNCTION__)); |
439 | switch (getLHSKind()) { |
440 | default: llvm_unreachable("Out of sync with isSingleStringRef")::llvm::llvm_unreachable_internal("Out of sync with isSingleStringRef" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/Twine.h" , 440); |
441 | case EmptyKind: return StringRef(); |
442 | case CStringKind: return StringRef(LHS.cString); |
443 | case StdStringKind: return StringRef(*LHS.stdString); |
444 | case StringRefKind: return *LHS.stringRef; |
445 | case SmallStringKind: |
446 | return StringRef(LHS.smallString->data(), LHS.smallString->size()); |
447 | } |
448 | } |
449 | |
450 | /// This returns the twine as a single StringRef if it can be |
451 | /// represented as such. Otherwise the twine is written into the given |
452 | /// SmallVector and a StringRef to the SmallVector's data is returned. |
453 | StringRef toStringRef(SmallVectorImpl<char> &Out) const { |
454 | if (isSingleStringRef()) |
455 | return getSingleStringRef(); |
456 | toVector(Out); |
457 | return StringRef(Out.data(), Out.size()); |
458 | } |
459 | |
460 | /// This returns the twine as a single null terminated StringRef if it |
461 | /// can be represented as such. Otherwise the twine is written into the |
462 | /// given SmallVector and a StringRef to the SmallVector's data is returned. |
463 | /// |
464 | /// The returned StringRef's size does not include the null terminator. |
465 | StringRef toNullTerminatedStringRef(SmallVectorImpl<char> &Out) const; |
466 | |
467 | /// Write the concatenated string represented by this twine to the |
468 | /// stream \p OS. |
469 | void print(raw_ostream &OS) const; |
470 | |
471 | /// Dump the concatenated string represented by this twine to stderr. |
472 | void dump() const; |
473 | |
474 | /// Write the representation of this twine to the stream \p OS. |
475 | void printRepr(raw_ostream &OS) const; |
476 | |
477 | /// Dump the representation of this twine to stderr. |
478 | void dumpRepr() const; |
479 | |
480 | /// @} |
481 | }; |
482 | |
483 | /// @name Twine Inline Implementations |
484 | /// @{ |
485 | |
486 | inline Twine Twine::concat(const Twine &Suffix) const { |
487 | // Concatenation with null is null. |
488 | if (isNull() || Suffix.isNull()) |
489 | return Twine(NullKind); |
490 | |
491 | // Concatenation with empty yields the other side. |
492 | if (isEmpty()) |
493 | return Suffix; |
494 | if (Suffix.isEmpty()) |
495 | return *this; |
496 | |
497 | // Otherwise we need to create a new node, taking care to fold in unary |
498 | // twines. |
499 | Child NewLHS, NewRHS; |
500 | NewLHS.twine = this; |
501 | NewRHS.twine = &Suffix; |
502 | NodeKind NewLHSKind = TwineKind, NewRHSKind = TwineKind; |
503 | if (isUnary()) { |
504 | NewLHS = LHS; |
505 | NewLHSKind = getLHSKind(); |
506 | } |
507 | if (Suffix.isUnary()) { |
508 | NewRHS = Suffix.LHS; |
509 | NewRHSKind = Suffix.getLHSKind(); |
510 | } |
511 | |
512 | return Twine(NewLHS, NewLHSKind, NewRHS, NewRHSKind); |
513 | } |
514 | |
515 | inline Twine operator+(const Twine &LHS, const Twine &RHS) { |
516 | return LHS.concat(RHS); |
517 | } |
518 | |
519 | /// Additional overload to guarantee simplified codegen; this is equivalent to |
520 | /// concat(). |
521 | |
522 | inline Twine operator+(const char *LHS, const StringRef &RHS) { |
523 | return Twine(LHS, RHS); |
524 | } |
525 | |
526 | /// Additional overload to guarantee simplified codegen; this is equivalent to |
527 | /// concat(). |
528 | |
529 | inline Twine operator+(const StringRef &LHS, const char *RHS) { |
530 | return Twine(LHS, RHS); |
531 | } |
532 | |
533 | inline raw_ostream &operator<<(raw_ostream &OS, const Twine &RHS) { |
534 | RHS.print(OS); |
535 | return OS; |
536 | } |
537 | |
538 | /// @} |
539 | |
540 | } // end namespace llvm |
541 | |
542 | #endif // LLVM_ADT_TWINE_H |
1 | //===- llvm/Bitcode/BitcodeReader.h - Bitcode reader ------------*- C++ -*-===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This header defines interfaces to read LLVM bitcode files/streams. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_BITCODE_BITCODEREADER_H |
15 | #define LLVM_BITCODE_BITCODEREADER_H |
16 | |
17 | #include "llvm/ADT/ArrayRef.h" |
18 | #include "llvm/ADT/StringRef.h" |
19 | #include "llvm/Bitcode/BitCodes.h" |
20 | #include "llvm/IR/ModuleSummaryIndex.h" |
21 | #include "llvm/Support/Endian.h" |
22 | #include "llvm/Support/Error.h" |
23 | #include "llvm/Support/ErrorOr.h" |
24 | #include "llvm/Support/MemoryBuffer.h" |
25 | #include <cstdint> |
26 | #include <memory> |
27 | #include <string> |
28 | #include <system_error> |
29 | #include <vector> |
30 | namespace llvm { |
31 | |
32 | class LLVMContext; |
33 | class Module; |
34 | |
35 | // These functions are for converting Expected/Error values to |
36 | // ErrorOr/std::error_code for compatibility with legacy clients. FIXME: |
37 | // Remove these functions once no longer needed by the C and libLTO APIs. |
38 | |
39 | std::error_code errorToErrorCodeAndEmitErrors(LLVMContext &Ctx, Error Err); |
40 | |
41 | template <typename T> |
42 | ErrorOr<T> expectedToErrorOrAndEmitErrors(LLVMContext &Ctx, Expected<T> Val) { |
43 | if (!Val) |
44 | return errorToErrorCodeAndEmitErrors(Ctx, Val.takeError()); |
45 | return std::move(*Val); |
46 | } |
47 | |
48 | struct BitcodeFileContents; |
49 | |
50 | /// Basic information extracted from a bitcode module to be used for LTO. |
51 | struct BitcodeLTOInfo { |
52 | bool IsThinLTO; |
53 | bool HasSummary; |
54 | }; |
55 | |
56 | /// Represents a module in a bitcode file. |
57 | class BitcodeModule { |
58 | // This covers the identification (if present) and module blocks. |
59 | ArrayRef<uint8_t> Buffer; |
60 | StringRef ModuleIdentifier; |
61 | |
62 | // The string table used to interpret this module. |
63 | StringRef Strtab; |
64 | |
65 | // The bitstream location of the IDENTIFICATION_BLOCK. |
66 | uint64_t IdentificationBit; |
67 | |
68 | // The bitstream location of this module's MODULE_BLOCK. |
69 | uint64_t ModuleBit; |
70 | |
71 | BitcodeModule(ArrayRef<uint8_t> Buffer, StringRef ModuleIdentifier, |
72 | uint64_t IdentificationBit, uint64_t ModuleBit) |
73 | : Buffer(Buffer), ModuleIdentifier(ModuleIdentifier), |
74 | IdentificationBit(IdentificationBit), ModuleBit(ModuleBit) {} |
75 | |
76 | // Calls the ctor. |
77 | friend Expected<BitcodeFileContents> |
78 | getBitcodeFileContents(MemoryBufferRef Buffer); |
79 | |
80 | Expected<std::unique_ptr<Module>> getModuleImpl(LLVMContext &Context, |
81 | bool MaterializeAll, |
82 | bool ShouldLazyLoadMetadata, |
83 | bool IsImporting); |
84 | |
85 | public: |
86 | StringRef getBuffer() const { |
87 | return StringRef((const char *)Buffer.begin(), Buffer.size()); |
88 | } |
89 | |
90 | StringRef getStrtab() const { return Strtab; } |
91 | |
92 | StringRef getModuleIdentifier() const { return ModuleIdentifier; } |
93 | |
94 | /// Read the bitcode module and prepare for lazy deserialization of function |
95 | /// bodies. If ShouldLazyLoadMetadata is true, lazily load metadata as well. |
96 | /// If IsImporting is true, this module is being parsed for ThinLTO |
97 | /// importing into another module. |
98 | Expected<std::unique_ptr<Module>> getLazyModule(LLVMContext &Context, |
99 | bool ShouldLazyLoadMetadata, |
100 | bool IsImporting); |
101 | |
102 | /// Read the entire bitcode module and return it. |
103 | Expected<std::unique_ptr<Module>> parseModule(LLVMContext &Context); |
104 | |
105 | /// Returns information about the module to be used for LTO: whether to |
106 | /// compile with ThinLTO, and whether it has a summary. |
107 | Expected<BitcodeLTOInfo> getLTOInfo(); |
108 | |
109 | /// Parse the specified bitcode buffer, returning the module summary index. |
110 | Expected<std::unique_ptr<ModuleSummaryIndex>> getSummary(); |
111 | |
112 | /// Parse the specified bitcode buffer and merge its module summary index |
113 | /// into CombinedIndex. |
114 | Error readSummary(ModuleSummaryIndex &CombinedIndex, StringRef ModulePath, |
115 | uint64_t ModuleId); |
116 | }; |
117 | |
118 | struct BitcodeFileContents { |
119 | std::vector<BitcodeModule> Mods; |
120 | StringRef Symtab, StrtabForSymtab; |
121 | }; |
122 | |
123 | /// Returns the contents of a bitcode file. This includes the raw contents of |
124 | /// the symbol table embedded in the bitcode file. Clients which require a |
125 | /// symbol table should prefer to use irsymtab::read instead of this function |
126 | /// because it creates a reader for the irsymtab and handles upgrading bitcode |
127 | /// files without a symbol table or with an old symbol table. |
128 | Expected<BitcodeFileContents> getBitcodeFileContents(MemoryBufferRef Buffer); |
129 | |
130 | /// Returns a list of modules in the specified bitcode buffer. |
131 | Expected<std::vector<BitcodeModule>> |
132 | getBitcodeModuleList(MemoryBufferRef Buffer); |
133 | |
134 | /// Read the header of the specified bitcode buffer and prepare for lazy |
135 | /// deserialization of function bodies. If ShouldLazyLoadMetadata is true, |
136 | /// lazily load metadata as well. If IsImporting is true, this module is |
137 | /// being parsed for ThinLTO importing into another module. |
138 | Expected<std::unique_ptr<Module>> |
139 | getLazyBitcodeModule(MemoryBufferRef Buffer, LLVMContext &Context, |
140 | bool ShouldLazyLoadMetadata = false, |
141 | bool IsImporting = false); |
142 | |
143 | /// Like getLazyBitcodeModule, except that the module takes ownership of |
144 | /// the memory buffer if successful. If successful, this moves Buffer. On |
145 | /// error, this *does not* move Buffer. If IsImporting is true, this module is |
146 | /// being parsed for ThinLTO importing into another module. |
147 | Expected<std::unique_ptr<Module>> getOwningLazyBitcodeModule( |
148 | std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context, |
149 | bool ShouldLazyLoadMetadata = false, bool IsImporting = false); |
150 | |
151 | /// Read the header of the specified bitcode buffer and extract just the |
152 | /// triple information. If successful, this returns a string. On error, this |
153 | /// returns "". |
154 | Expected<std::string> getBitcodeTargetTriple(MemoryBufferRef Buffer); |
155 | |
156 | /// Return true if \p Buffer contains a bitcode file with ObjC code (category |
157 | /// or class) in it. |
158 | Expected<bool> isBitcodeContainingObjCCategory(MemoryBufferRef Buffer); |
159 | |
160 | /// Read the header of the specified bitcode buffer and extract just the |
161 | /// producer string information. If successful, this returns a string. On |
162 | /// error, this returns "". |
163 | Expected<std::string> getBitcodeProducerString(MemoryBufferRef Buffer); |
164 | |
165 | /// Read the specified bitcode file, returning the module. |
166 | Expected<std::unique_ptr<Module>> parseBitcodeFile(MemoryBufferRef Buffer, |
167 | LLVMContext &Context); |
168 | |
169 | /// Returns LTO information for the specified bitcode file. |
170 | Expected<BitcodeLTOInfo> getBitcodeLTOInfo(MemoryBufferRef Buffer); |
171 | |
172 | /// Parse the specified bitcode buffer, returning the module summary index. |
173 | Expected<std::unique_ptr<ModuleSummaryIndex>> |
174 | getModuleSummaryIndex(MemoryBufferRef Buffer); |
175 | |
176 | /// Parse the specified bitcode buffer and merge the index into CombinedIndex. |
177 | Error readModuleSummaryIndex(MemoryBufferRef Buffer, |
178 | ModuleSummaryIndex &CombinedIndex, |
179 | uint64_t ModuleId); |
180 | |
181 | /// Parse the module summary index out of an IR file and return the module |
182 | /// summary index object if found, or an empty summary if not. If Path refers |
183 | /// to an empty file and IgnoreEmptyThinLTOIndexFile is true, then |
184 | /// this function will return nullptr. |
185 | Expected<std::unique_ptr<ModuleSummaryIndex>> |
186 | getModuleSummaryIndexForFile(StringRef Path, |
187 | bool IgnoreEmptyThinLTOIndexFile = false); |
188 | |
189 | /// isBitcodeWrapper - Return true if the given bytes are the magic bytes |
190 | /// for an LLVM IR bitcode wrapper. |
191 | inline bool isBitcodeWrapper(const unsigned char *BufPtr, |
192 | const unsigned char *BufEnd) { |
193 | // See if you can find the hidden message in the magic bytes :-). |
194 | // (Hint: it's a little-endian encoding.) |
195 | return BufPtr != BufEnd && |
196 | BufPtr[0] == 0xDE && |
197 | BufPtr[1] == 0xC0 && |
198 | BufPtr[2] == 0x17 && |
199 | BufPtr[3] == 0x0B; |
200 | } |
201 | |
202 | /// isRawBitcode - Return true if the given bytes are the magic bytes for |
203 | /// raw LLVM IR bitcode (without a wrapper). |
204 | inline bool isRawBitcode(const unsigned char *BufPtr, |
205 | const unsigned char *BufEnd) { |
206 | // These bytes sort of have a hidden message, but it's not in |
207 | // little-endian this time, and it's a little redundant. |
208 | return BufPtr != BufEnd && |
209 | BufPtr[0] == 'B' && |
210 | BufPtr[1] == 'C' && |
211 | BufPtr[2] == 0xc0 && |
212 | BufPtr[3] == 0xde; |
213 | } |
214 | |
215 | /// isBitcode - Return true if the given bytes are the magic bytes for |
216 | /// LLVM IR bitcode, either with or without a wrapper. |
217 | inline bool isBitcode(const unsigned char *BufPtr, |
218 | const unsigned char *BufEnd) { |
219 | return isBitcodeWrapper(BufPtr, BufEnd) || |
220 | isRawBitcode(BufPtr, BufEnd); |
221 | } |
222 | |
223 | /// SkipBitcodeWrapperHeader - Some systems wrap bc files with a special |
224 | /// header for padding or other reasons. The format of this header is: |
225 | /// |
226 | /// struct bc_header { |
227 | /// uint32_t Magic; // 0x0B17C0DE |
228 | /// uint32_t Version; // Version, currently always 0. |
229 | /// uint32_t BitcodeOffset; // Offset to traditional bitcode file. |
230 | /// uint32_t BitcodeSize; // Size of traditional bitcode file. |
231 | /// ... potentially other gunk ... |
232 | /// }; |
233 | /// |
234 | /// This function is called when we find a file with a matching magic number. |
235 | /// In this case, skip down to the subsection of the file that is actually a |
236 | /// BC file. |
237 | /// If 'VerifyBufferSize' is true, check that the buffer is large enough to |
238 | /// contain the whole bitcode file. |
239 | inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr, |
240 | const unsigned char *&BufEnd, |
241 | bool VerifyBufferSize) { |
242 | // Must contain the offset and size field! |
243 | if (unsigned(BufEnd - BufPtr) < BWH_SizeField + 4) |
244 | return true; |
245 | |
246 | unsigned Offset = support::endian::read32le(&BufPtr[BWH_OffsetField]); |
247 | unsigned Size = support::endian::read32le(&BufPtr[BWH_SizeField]); |
248 | uint64_t BitcodeOffsetEnd = (uint64_t)Offset + (uint64_t)Size; |
249 | |
250 | // Verify that Offset+Size fits in the file. |
251 | if (VerifyBufferSize && BitcodeOffsetEnd > uint64_t(BufEnd-BufPtr)) |
252 | return true; |
253 | BufPtr += Offset; |
254 | BufEnd = BufPtr+Size; |
255 | return false; |
256 | } |
257 | |
258 | const std::error_category &BitcodeErrorCategory(); |
259 | enum class BitcodeError { CorruptedBitcode = 1 }; |
260 | inline std::error_code make_error_code(BitcodeError E) { |
261 | return std::error_code(static_cast<int>(E), BitcodeErrorCategory()); |
262 | } |
263 | |
264 | } // end namespace llvm |
265 | |
266 | namespace std { |
267 | |
268 | template <> struct is_error_code_enum<llvm::BitcodeError> : std::true_type {}; |
269 | |
270 | } // end namespace std |
271 | |
272 | #endif // LLVM_BITCODE_BITCODEREADER_H |
1 | //===-- llvm/Support/ManagedStatic.h - Static Global wrapper ----*- C++ -*-===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file defines the ManagedStatic class and the llvm_shutdown() function. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_SUPPORT_MANAGEDSTATIC_H |
15 | #define LLVM_SUPPORT_MANAGEDSTATIC_H |
16 | |
17 | #include <atomic> |
18 | #include <cstddef> |
19 | |
20 | namespace llvm { |
21 | |
22 | /// object_creator - Helper method for ManagedStatic. |
23 | template <class C> struct object_creator { |
24 | static void *call() { return new C(); } |
25 | }; |
26 | |
27 | /// object_deleter - Helper method for ManagedStatic. |
28 | /// |
29 | template <typename T> struct object_deleter { |
30 | static void call(void *Ptr) { delete (T *)Ptr; } |
31 | }; |
32 | template <typename T, size_t N> struct object_deleter<T[N]> { |
33 | static void call(void *Ptr) { delete[](T *)Ptr; } |
34 | }; |
35 | |
36 | /// ManagedStaticBase - Common base class for ManagedStatic instances. |
37 | class ManagedStaticBase { |
38 | protected: |
39 | // This should only be used as a static variable, which guarantees that this |
40 | // will be zero initialized. |
41 | mutable std::atomic<void *> Ptr; |
42 | mutable void (*DeleterFn)(void*); |
43 | mutable const ManagedStaticBase *Next; |
44 | |
45 | void RegisterManagedStatic(void *(*creator)(), void (*deleter)(void*)) const; |
46 | |
47 | public: |
48 | /// isConstructed - Return true if this object has not been created yet. |
49 | bool isConstructed() const { return Ptr != nullptr; } |
50 | |
51 | void destroy() const; |
52 | }; |
53 | |
54 | /// ManagedStatic - This transparently changes the behavior of global statics to |
55 | /// be lazily constructed on demand (good for reducing startup times of dynamic |
56 | /// libraries that link in LLVM components) and for making destruction be |
57 | /// explicit through the llvm_shutdown() function call. |
58 | /// |
59 | template <class C, class Creator = object_creator<C>, |
60 | class Deleter = object_deleter<C>> |
61 | class ManagedStatic : public ManagedStaticBase { |
62 | public: |
63 | // Accessors. |
64 | C &operator*() { |
65 | void *Tmp = Ptr.load(std::memory_order_acquire); |
66 | if (!Tmp) |
67 | RegisterManagedStatic(Creator::call, Deleter::call); |
68 | |
69 | return *static_cast<C *>(Ptr.load(std::memory_order_relaxed)); |
70 | } |
71 | |
72 | C *operator->() { return &**this; } |
73 | |
74 | const C &operator*() const { |
75 | void *Tmp = Ptr.load(std::memory_order_acquire); |
76 | if (!Tmp) |
77 | RegisterManagedStatic(Creator::call, Deleter::call); |
78 | |
79 | return *static_cast<C *>(Ptr.load(std::memory_order_relaxed)); |
80 | } |
81 | |
82 | const C *operator->() const { return &**this; } |
83 | }; |
84 | |
85 | /// llvm_shutdown - Deallocate and destroy all ManagedStatic variables. |
86 | void llvm_shutdown(); |
87 | |
88 | /// llvm_shutdown_obj - This is a simple helper class that calls |
89 | /// llvm_shutdown() when it is destroyed. |
90 | struct llvm_shutdown_obj { |
91 | llvm_shutdown_obj() = default; |
92 | ~llvm_shutdown_obj() { llvm_shutdown(); } |
93 | }; |
94 | |
95 | } // end namespace llvm |
96 | |
97 | #endif // LLVM_SUPPORT_MANAGEDSTATIC_H |
1 | // -*- C++ -*- header. |
2 | |
3 | // Copyright (C) 2008-2017 Free Software Foundation, Inc. |
4 | // |
5 | // This file is part of the GNU ISO C++ Library. This library is free |
6 | // software; you can redistribute it and/or modify it under the |
7 | // terms of the GNU General Public License as published by the |
8 | // Free Software Foundation; either version 3, or (at your option) |
9 | // any later version. |
10 | |
11 | // This library is distributed in the hope that it will be useful, |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | // GNU General Public License for more details. |
15 | |
16 | // Under Section 7 of GPL version 3, you are granted additional |
17 | // permissions described in the GCC Runtime Library Exception, version |
18 | // 3.1, as published by the Free Software Foundation. |
19 | |
20 | // You should have received a copy of the GNU General Public License and |
21 | // a copy of the GCC Runtime Library Exception along with this program; |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | // <http://www.gnu.org/licenses/>. |
24 | |
25 | /** @file include/atomic |
26 | * This is a Standard C++ Library header. |
27 | */ |
28 | |
29 | // Based on "C++ Atomic Types and Operations" by Hans Boehm and Lawrence Crowl. |
30 | // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2427.html |
31 | |
32 | #ifndef _GLIBCXX_ATOMIC1 |
33 | #define _GLIBCXX_ATOMIC1 1 |
34 | |
35 | #pragma GCC system_header |
36 | |
37 | #if __cplusplus201103L < 201103L |
38 | # include <bits/c++0x_warning.h> |
39 | #else |
40 | |
41 | #include <bits/atomic_base.h> |
42 | #include <bits/move.h> |
43 | |
44 | namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default"))) |
45 | { |
46 | _GLIBCXX_BEGIN_NAMESPACE_VERSION |
47 | |
48 | /** |
49 | * @addtogroup atomics |
50 | * @{ |
51 | */ |
52 | |
53 | #if __cplusplus201103L > 201402L |
54 | # define __cpp_lib_atomic_is_always_lock_free 201603 |
55 | #endif |
56 | |
57 | template<typename _Tp> |
58 | struct atomic; |
59 | |
60 | /// atomic<bool> |
61 | // NB: No operators or fetch-operations for this type. |
62 | template<> |
63 | struct atomic<bool> |
64 | { |
65 | private: |
66 | __atomic_base<bool> _M_base; |
67 | |
68 | public: |
69 | atomic() noexcept = default; |
70 | ~atomic() noexcept = default; |
71 | atomic(const atomic&) = delete; |
72 | atomic& operator=(const atomic&) = delete; |
73 | atomic& operator=(const atomic&) volatile = delete; |
74 | |
75 | constexpr atomic(bool __i) noexcept : _M_base(__i) { } |
76 | |
77 | bool |
78 | operator=(bool __i) noexcept |
79 | { return _M_base.operator=(__i); } |
80 | |
81 | bool |
82 | operator=(bool __i) volatile noexcept |
83 | { return _M_base.operator=(__i); } |
84 | |
85 | operator bool() const noexcept |
86 | { return _M_base.load(); } |
87 | |
88 | operator bool() const volatile noexcept |
89 | { return _M_base.load(); } |
90 | |
91 | bool |
92 | is_lock_free() const noexcept { return _M_base.is_lock_free(); } |
93 | |
94 | bool |
95 | is_lock_free() const volatile noexcept { return _M_base.is_lock_free(); } |
96 | |
97 | #if __cplusplus201103L > 201402L |
98 | static constexpr bool is_always_lock_free = ATOMIC_BOOL_LOCK_FREE2 == 2; |
99 | #endif |
100 | |
101 | void |
102 | store(bool __i, memory_order __m = memory_order_seq_cst) noexcept |
103 | { _M_base.store(__i, __m); } |
104 | |
105 | void |
106 | store(bool __i, memory_order __m = memory_order_seq_cst) volatile noexcept |
107 | { _M_base.store(__i, __m); } |
108 | |
109 | bool |
110 | load(memory_order __m = memory_order_seq_cst) const noexcept |
111 | { return _M_base.load(__m); } |
112 | |
113 | bool |
114 | load(memory_order __m = memory_order_seq_cst) const volatile noexcept |
115 | { return _M_base.load(__m); } |
116 | |
117 | bool |
118 | exchange(bool __i, memory_order __m = memory_order_seq_cst) noexcept |
119 | { return _M_base.exchange(__i, __m); } |
120 | |
121 | bool |
122 | exchange(bool __i, |
123 | memory_order __m = memory_order_seq_cst) volatile noexcept |
124 | { return _M_base.exchange(__i, __m); } |
125 | |
126 | bool |
127 | compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1, |
128 | memory_order __m2) noexcept |
129 | { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); } |
130 | |
131 | bool |
132 | compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1, |
133 | memory_order __m2) volatile noexcept |
134 | { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); } |
135 | |
136 | bool |
137 | compare_exchange_weak(bool& __i1, bool __i2, |
138 | memory_order __m = memory_order_seq_cst) noexcept |
139 | { return _M_base.compare_exchange_weak(__i1, __i2, __m); } |
140 | |
141 | bool |
142 | compare_exchange_weak(bool& __i1, bool __i2, |
143 | memory_order __m = memory_order_seq_cst) volatile noexcept |
144 | { return _M_base.compare_exchange_weak(__i1, __i2, __m); } |
145 | |
146 | bool |
147 | compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1, |
148 | memory_order __m2) noexcept |
149 | { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); } |
150 | |
151 | bool |
152 | compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1, |
153 | memory_order __m2) volatile noexcept |
154 | { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); } |
155 | |
156 | bool |
157 | compare_exchange_strong(bool& __i1, bool __i2, |
158 | memory_order __m = memory_order_seq_cst) noexcept |
159 | { return _M_base.compare_exchange_strong(__i1, __i2, __m); } |
160 | |
161 | bool |
162 | compare_exchange_strong(bool& __i1, bool __i2, |
163 | memory_order __m = memory_order_seq_cst) volatile noexcept |
164 | { return _M_base.compare_exchange_strong(__i1, __i2, __m); } |
165 | }; |
166 | |
167 | |
168 | /** |
169 | * @brief Generic atomic type, primary class template. |
170 | * |
171 | * @tparam _Tp Type to be made atomic, must be trivally copyable. |
172 | */ |
173 | template<typename _Tp> |
174 | struct atomic |
175 | { |
176 | private: |
177 | // Align 1/2/4/8/16-byte types to at least their size. |
178 | static constexpr int _S_min_alignment |
179 | = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16 |
180 | ? 0 : sizeof(_Tp); |
181 | |
182 | static constexpr int _S_alignment |
183 | = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp); |
184 | |
185 | alignas(_S_alignment) _Tp _M_i; |
186 | |
187 | static_assert(__is_trivially_copyable(_Tp), |
188 | "std::atomic requires a trivially copyable type"); |
189 | |
190 | static_assert(sizeof(_Tp) > 0, |
191 | "Incomplete or zero-sized types are not supported"); |
192 | |
193 | public: |
194 | atomic() noexcept = default; |
195 | ~atomic() noexcept = default; |
196 | atomic(const atomic&) = delete; |
197 | atomic& operator=(const atomic&) = delete; |
198 | atomic& operator=(const atomic&) volatile = delete; |
199 | |
200 | constexpr atomic(_Tp __i) noexcept : _M_i(__i) { } |
201 | |
202 | operator _Tp() const noexcept |
203 | { return load(); } |
204 | |
205 | operator _Tp() const volatile noexcept |
206 | { return load(); } |
207 | |
208 | _Tp |
209 | operator=(_Tp __i) noexcept |
210 | { store(__i); return __i; } |
211 | |
212 | _Tp |
213 | operator=(_Tp __i) volatile noexcept |
214 | { store(__i); return __i; } |
215 | |
216 | bool |
217 | is_lock_free() const noexcept |
218 | { |
219 | // Produce a fake, minimally aligned pointer. |
220 | return __atomic_is_lock_free(sizeof(_M_i), |
221 | reinterpret_cast<void *>(-__alignof(_M_i))); |
222 | } |
223 | |
224 | bool |
225 | is_lock_free() const volatile noexcept |
226 | { |
227 | // Produce a fake, minimally aligned pointer. |
228 | return __atomic_is_lock_free(sizeof(_M_i), |
229 | reinterpret_cast<void *>(-__alignof(_M_i))); |
230 | } |
231 | |
232 | #if __cplusplus201103L > 201402L |
233 | static constexpr bool is_always_lock_free |
234 | = __atomic_always_lock_free(sizeof(_M_i), 0); |
235 | #endif |
236 | |
237 | void |
238 | store(_Tp __i, memory_order __m = memory_order_seq_cst) noexcept |
239 | { __atomic_store(std::__addressof(_M_i), std::__addressof(__i), __m); } |
240 | |
241 | void |
242 | store(_Tp __i, memory_order __m = memory_order_seq_cst) volatile noexcept |
243 | { __atomic_store(std::__addressof(_M_i), std::__addressof(__i), __m); } |
244 | |
245 | _Tp |
246 | load(memory_order __m = memory_order_seq_cst) const noexcept |
247 | { |
248 | alignas(_Tp) unsigned char __buf[sizeof(_Tp)]; |
249 | _Tp* __ptr = reinterpret_cast<_Tp*>(__buf); |
250 | __atomic_load(std::__addressof(_M_i), __ptr, __m); |
251 | return *__ptr; |
252 | } |
253 | |
254 | _Tp |
255 | load(memory_order __m = memory_order_seq_cst) const volatile noexcept |
256 | { |
257 | alignas(_Tp) unsigned char __buf[sizeof(_Tp)]; |
258 | _Tp* __ptr = reinterpret_cast<_Tp*>(__buf); |
259 | __atomic_load(std::__addressof(_M_i), __ptr, __m); |
260 | return *__ptr; |
261 | } |
262 | |
263 | _Tp |
264 | exchange(_Tp __i, memory_order __m = memory_order_seq_cst) noexcept |
265 | { |
266 | alignas(_Tp) unsigned char __buf[sizeof(_Tp)]; |
267 | _Tp* __ptr = reinterpret_cast<_Tp*>(__buf); |
268 | __atomic_exchange(std::__addressof(_M_i), std::__addressof(__i), |
269 | __ptr, __m); |
270 | return *__ptr; |
271 | } |
272 | |
273 | _Tp |
274 | exchange(_Tp __i, |
275 | memory_order __m = memory_order_seq_cst) volatile noexcept |
276 | { |
277 | alignas(_Tp) unsigned char __buf[sizeof(_Tp)]; |
278 | _Tp* __ptr = reinterpret_cast<_Tp*>(__buf); |
279 | __atomic_exchange(std::__addressof(_M_i), std::__addressof(__i), |
280 | __ptr, __m); |
281 | return *__ptr; |
282 | } |
283 | |
284 | bool |
285 | compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, |
286 | memory_order __f) noexcept |
287 | { |
288 | return __atomic_compare_exchange(std::__addressof(_M_i), |
289 | std::__addressof(__e), |
290 | std::__addressof(__i), |
291 | true, __s, __f); |
292 | } |
293 | |
294 | bool |
295 | compare_exchange_weak(_Tp& __e, _Tp __i, memory_order __s, |
296 | memory_order __f) volatile noexcept |
297 | { |
298 | return __atomic_compare_exchange(std::__addressof(_M_i), |
299 | std::__addressof(__e), |
300 | std::__addressof(__i), |
301 | true, __s, __f); |
302 | } |
303 | |
304 | bool |
305 | compare_exchange_weak(_Tp& __e, _Tp __i, |
306 | memory_order __m = memory_order_seq_cst) noexcept |
307 | { return compare_exchange_weak(__e, __i, __m, |
308 | __cmpexch_failure_order(__m)); } |
309 | |
310 | bool |
311 | compare_exchange_weak(_Tp& __e, _Tp __i, |
312 | memory_order __m = memory_order_seq_cst) volatile noexcept |
313 | { return compare_exchange_weak(__e, __i, __m, |
314 | __cmpexch_failure_order(__m)); } |
315 | |
316 | bool |
317 | compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, |
318 | memory_order __f) noexcept |
319 | { |
320 | return __atomic_compare_exchange(std::__addressof(_M_i), |
321 | std::__addressof(__e), |
322 | std::__addressof(__i), |
323 | false, __s, __f); |
324 | } |
325 | |
326 | bool |
327 | compare_exchange_strong(_Tp& __e, _Tp __i, memory_order __s, |
328 | memory_order __f) volatile noexcept |
329 | { |
330 | return __atomic_compare_exchange(std::__addressof(_M_i), |
331 | std::__addressof(__e), |
332 | std::__addressof(__i), |
333 | false, __s, __f); |
334 | } |
335 | |
336 | bool |
337 | compare_exchange_strong(_Tp& __e, _Tp __i, |
338 | memory_order __m = memory_order_seq_cst) noexcept |
339 | { return compare_exchange_strong(__e, __i, __m, |
340 | __cmpexch_failure_order(__m)); } |
341 | |
342 | bool |
343 | compare_exchange_strong(_Tp& __e, _Tp __i, |
344 | memory_order __m = memory_order_seq_cst) volatile noexcept |
345 | { return compare_exchange_strong(__e, __i, __m, |
346 | __cmpexch_failure_order(__m)); } |
347 | }; |
348 | |
349 | |
350 | /// Partial specialization for pointer types. |
351 | template<typename _Tp> |
352 | struct atomic<_Tp*> |
353 | { |
354 | typedef _Tp* __pointer_type; |
355 | typedef __atomic_base<_Tp*> __base_type; |
356 | __base_type _M_b; |
357 | |
358 | atomic() noexcept = default; |
359 | ~atomic() noexcept = default; |
360 | atomic(const atomic&) = delete; |
361 | atomic& operator=(const atomic&) = delete; |
362 | atomic& operator=(const atomic&) volatile = delete; |
363 | |
364 | constexpr atomic(__pointer_type __p) noexcept : _M_b(__p) { } |
365 | |
366 | operator __pointer_type() const noexcept |
367 | { return __pointer_type(_M_b); } |
368 | |
369 | operator __pointer_type() const volatile noexcept |
370 | { return __pointer_type(_M_b); } |
371 | |
372 | __pointer_type |
373 | operator=(__pointer_type __p) noexcept |
374 | { return _M_b.operator=(__p); } |
375 | |
376 | __pointer_type |
377 | operator=(__pointer_type __p) volatile noexcept |
378 | { return _M_b.operator=(__p); } |
379 | |
380 | __pointer_type |
381 | operator++(int) noexcept |
382 | { return _M_b++; } |
383 | |
384 | __pointer_type |
385 | operator++(int) volatile noexcept |
386 | { return _M_b++; } |
387 | |
388 | __pointer_type |
389 | operator--(int) noexcept |
390 | { return _M_b--; } |
391 | |
392 | __pointer_type |
393 | operator--(int) volatile noexcept |
394 | { return _M_b--; } |
395 | |
396 | __pointer_type |
397 | operator++() noexcept |
398 | { return ++_M_b; } |
399 | |
400 | __pointer_type |
401 | operator++() volatile noexcept |
402 | { return ++_M_b; } |
403 | |
404 | __pointer_type |
405 | operator--() noexcept |
406 | { return --_M_b; } |
407 | |
408 | __pointer_type |
409 | operator--() volatile noexcept |
410 | { return --_M_b; } |
411 | |
412 | __pointer_type |
413 | operator+=(ptrdiff_t __d) noexcept |
414 | { return _M_b.operator+=(__d); } |
415 | |
416 | __pointer_type |
417 | operator+=(ptrdiff_t __d) volatile noexcept |
418 | { return _M_b.operator+=(__d); } |
419 | |
420 | __pointer_type |
421 | operator-=(ptrdiff_t __d) noexcept |
422 | { return _M_b.operator-=(__d); } |
423 | |
424 | __pointer_type |
425 | operator-=(ptrdiff_t __d) volatile noexcept |
426 | { return _M_b.operator-=(__d); } |
427 | |
428 | bool |
429 | is_lock_free() const noexcept |
430 | { return _M_b.is_lock_free(); } |
431 | |
432 | bool |
433 | is_lock_free() const volatile noexcept |
434 | { return _M_b.is_lock_free(); } |
435 | |
436 | #if __cplusplus201103L > 201402L |
437 | static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE2 == 2; |
438 | #endif |
439 | |
440 | void |
441 | store(__pointer_type __p, |
442 | memory_order __m = memory_order_seq_cst) noexcept |
443 | { return _M_b.store(__p, __m); } |
444 | |
445 | void |
446 | store(__pointer_type __p, |
447 | memory_order __m = memory_order_seq_cst) volatile noexcept |
448 | { return _M_b.store(__p, __m); } |
449 | |
450 | __pointer_type |
451 | load(memory_order __m = memory_order_seq_cst) const noexcept |
452 | { return _M_b.load(__m); } |
453 | |
454 | __pointer_type |
455 | load(memory_order __m = memory_order_seq_cst) const volatile noexcept |
456 | { return _M_b.load(__m); } |
457 | |
458 | __pointer_type |
459 | exchange(__pointer_type __p, |
460 | memory_order __m = memory_order_seq_cst) noexcept |
461 | { return _M_b.exchange(__p, __m); } |
462 | |
463 | __pointer_type |
464 | exchange(__pointer_type __p, |
465 | memory_order __m = memory_order_seq_cst) volatile noexcept |
466 | { return _M_b.exchange(__p, __m); } |
467 | |
468 | bool |
469 | compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, |
470 | memory_order __m1, memory_order __m2) noexcept |
471 | { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } |
472 | |
473 | bool |
474 | compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, |
475 | memory_order __m1, |
476 | memory_order __m2) volatile noexcept |
477 | { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } |
478 | |
479 | bool |
480 | compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, |
481 | memory_order __m = memory_order_seq_cst) noexcept |
482 | { |
483 | return compare_exchange_weak(__p1, __p2, __m, |
484 | __cmpexch_failure_order(__m)); |
485 | } |
486 | |
487 | bool |
488 | compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2, |
489 | memory_order __m = memory_order_seq_cst) volatile noexcept |
490 | { |
491 | return compare_exchange_weak(__p1, __p2, __m, |
492 | __cmpexch_failure_order(__m)); |
493 | } |
494 | |
495 | bool |
496 | compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, |
497 | memory_order __m1, memory_order __m2) noexcept |
498 | { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } |
499 | |
500 | bool |
501 | compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, |
502 | memory_order __m1, |
503 | memory_order __m2) volatile noexcept |
504 | { return _M_b.compare_exchange_strong(__p1, __p2, __m1, __m2); } |
505 | |
506 | bool |
507 | compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, |
508 | memory_order __m = memory_order_seq_cst) noexcept |
509 | { |
510 | return _M_b.compare_exchange_strong(__p1, __p2, __m, |
511 | __cmpexch_failure_order(__m)); |
512 | } |
513 | |
514 | bool |
515 | compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, |
516 | memory_order __m = memory_order_seq_cst) volatile noexcept |
517 | { |
518 | return _M_b.compare_exchange_strong(__p1, __p2, __m, |
519 | __cmpexch_failure_order(__m)); |
520 | } |
521 | |
522 | __pointer_type |
523 | fetch_add(ptrdiff_t __d, |
524 | memory_order __m = memory_order_seq_cst) noexcept |
525 | { return _M_b.fetch_add(__d, __m); } |
526 | |
527 | __pointer_type |
528 | fetch_add(ptrdiff_t __d, |
529 | memory_order __m = memory_order_seq_cst) volatile noexcept |
530 | { return _M_b.fetch_add(__d, __m); } |
531 | |
532 | __pointer_type |
533 | fetch_sub(ptrdiff_t __d, |
534 | memory_order __m = memory_order_seq_cst) noexcept |
535 | { return _M_b.fetch_sub(__d, __m); } |
536 | |
537 | __pointer_type |
538 | fetch_sub(ptrdiff_t __d, |
539 | memory_order __m = memory_order_seq_cst) volatile noexcept |
540 | { return _M_b.fetch_sub(__d, __m); } |
541 | }; |
542 | |
543 | |
544 | /// Explicit specialization for char. |
545 | template<> |
546 | struct atomic<char> : __atomic_base<char> |
547 | { |
548 | typedef char __integral_type; |
549 | typedef __atomic_base<char> __base_type; |
550 | |
551 | atomic() noexcept = default; |
552 | ~atomic() noexcept = default; |
553 | atomic(const atomic&) = delete; |
554 | atomic& operator=(const atomic&) = delete; |
555 | atomic& operator=(const atomic&) volatile = delete; |
556 | |
557 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
558 | |
559 | using __base_type::operator __integral_type; |
560 | using __base_type::operator=; |
561 | |
562 | #if __cplusplus201103L > 201402L |
563 | static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE2 == 2; |
564 | #endif |
565 | }; |
566 | |
567 | /// Explicit specialization for signed char. |
568 | template<> |
569 | struct atomic<signed char> : __atomic_base<signed char> |
570 | { |
571 | typedef signed char __integral_type; |
572 | typedef __atomic_base<signed char> __base_type; |
573 | |
574 | atomic() noexcept= default; |
575 | ~atomic() noexcept = default; |
576 | atomic(const atomic&) = delete; |
577 | atomic& operator=(const atomic&) = delete; |
578 | atomic& operator=(const atomic&) volatile = delete; |
579 | |
580 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
581 | |
582 | using __base_type::operator __integral_type; |
583 | using __base_type::operator=; |
584 | |
585 | #if __cplusplus201103L > 201402L |
586 | static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE2 == 2; |
587 | #endif |
588 | }; |
589 | |
590 | /// Explicit specialization for unsigned char. |
591 | template<> |
592 | struct atomic<unsigned char> : __atomic_base<unsigned char> |
593 | { |
594 | typedef unsigned char __integral_type; |
595 | typedef __atomic_base<unsigned char> __base_type; |
596 | |
597 | atomic() noexcept= default; |
598 | ~atomic() noexcept = default; |
599 | atomic(const atomic&) = delete; |
600 | atomic& operator=(const atomic&) = delete; |
601 | atomic& operator=(const atomic&) volatile = delete; |
602 | |
603 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
604 | |
605 | using __base_type::operator __integral_type; |
606 | using __base_type::operator=; |
607 | |
608 | #if __cplusplus201103L > 201402L |
609 | static constexpr bool is_always_lock_free = ATOMIC_CHAR_LOCK_FREE2 == 2; |
610 | #endif |
611 | }; |
612 | |
613 | /// Explicit specialization for short. |
614 | template<> |
615 | struct atomic<short> : __atomic_base<short> |
616 | { |
617 | typedef short __integral_type; |
618 | typedef __atomic_base<short> __base_type; |
619 | |
620 | atomic() noexcept = default; |
621 | ~atomic() noexcept = default; |
622 | atomic(const atomic&) = delete; |
623 | atomic& operator=(const atomic&) = delete; |
624 | atomic& operator=(const atomic&) volatile = delete; |
625 | |
626 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
627 | |
628 | using __base_type::operator __integral_type; |
629 | using __base_type::operator=; |
630 | |
631 | #if __cplusplus201103L > 201402L |
632 | static constexpr bool is_always_lock_free = ATOMIC_SHORT_LOCK_FREE2 == 2; |
633 | #endif |
634 | }; |
635 | |
636 | /// Explicit specialization for unsigned short. |
637 | template<> |
638 | struct atomic<unsigned short> : __atomic_base<unsigned short> |
639 | { |
640 | typedef unsigned short __integral_type; |
641 | typedef __atomic_base<unsigned short> __base_type; |
642 | |
643 | atomic() noexcept = default; |
644 | ~atomic() noexcept = default; |
645 | atomic(const atomic&) = delete; |
646 | atomic& operator=(const atomic&) = delete; |
647 | atomic& operator=(const atomic&) volatile = delete; |
648 | |
649 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
650 | |
651 | using __base_type::operator __integral_type; |
652 | using __base_type::operator=; |
653 | |
654 | #if __cplusplus201103L > 201402L |
655 | static constexpr bool is_always_lock_free = ATOMIC_SHORT_LOCK_FREE2 == 2; |
656 | #endif |
657 | }; |
658 | |
659 | /// Explicit specialization for int. |
660 | template<> |
661 | struct atomic<int> : __atomic_base<int> |
662 | { |
663 | typedef int __integral_type; |
664 | typedef __atomic_base<int> __base_type; |
665 | |
666 | atomic() noexcept = default; |
667 | ~atomic() noexcept = default; |
668 | atomic(const atomic&) = delete; |
669 | atomic& operator=(const atomic&) = delete; |
670 | atomic& operator=(const atomic&) volatile = delete; |
671 | |
672 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
673 | |
674 | using __base_type::operator __integral_type; |
675 | using __base_type::operator=; |
676 | |
677 | #if __cplusplus201103L > 201402L |
678 | static constexpr bool is_always_lock_free = ATOMIC_INT_LOCK_FREE2 == 2; |
679 | #endif |
680 | }; |
681 | |
682 | /// Explicit specialization for unsigned int. |
683 | template<> |
684 | struct atomic<unsigned int> : __atomic_base<unsigned int> |
685 | { |
686 | typedef unsigned int __integral_type; |
687 | typedef __atomic_base<unsigned int> __base_type; |
688 | |
689 | atomic() noexcept = default; |
690 | ~atomic() noexcept = default; |
691 | atomic(const atomic&) = delete; |
692 | atomic& operator=(const atomic&) = delete; |
693 | atomic& operator=(const atomic&) volatile = delete; |
694 | |
695 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
696 | |
697 | using __base_type::operator __integral_type; |
698 | using __base_type::operator=; |
699 | |
700 | #if __cplusplus201103L > 201402L |
701 | static constexpr bool is_always_lock_free = ATOMIC_INT_LOCK_FREE2 == 2; |
702 | #endif |
703 | }; |
704 | |
705 | /// Explicit specialization for long. |
706 | template<> |
707 | struct atomic<long> : __atomic_base<long> |
708 | { |
709 | typedef long __integral_type; |
710 | typedef __atomic_base<long> __base_type; |
711 | |
712 | atomic() noexcept = default; |
713 | ~atomic() noexcept = default; |
714 | atomic(const atomic&) = delete; |
715 | atomic& operator=(const atomic&) = delete; |
716 | atomic& operator=(const atomic&) volatile = delete; |
717 | |
718 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
719 | |
720 | using __base_type::operator __integral_type; |
721 | using __base_type::operator=; |
722 | |
723 | #if __cplusplus201103L > 201402L |
724 | static constexpr bool is_always_lock_free = ATOMIC_LONG_LOCK_FREE2 == 2; |
725 | #endif |
726 | }; |
727 | |
728 | /// Explicit specialization for unsigned long. |
729 | template<> |
730 | struct atomic<unsigned long> : __atomic_base<unsigned long> |
731 | { |
732 | typedef unsigned long __integral_type; |
733 | typedef __atomic_base<unsigned long> __base_type; |
734 | |
735 | atomic() noexcept = default; |
736 | ~atomic() noexcept = default; |
737 | atomic(const atomic&) = delete; |
738 | atomic& operator=(const atomic&) = delete; |
739 | atomic& operator=(const atomic&) volatile = delete; |
740 | |
741 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
742 | |
743 | using __base_type::operator __integral_type; |
744 | using __base_type::operator=; |
745 | |
746 | #if __cplusplus201103L > 201402L |
747 | static constexpr bool is_always_lock_free = ATOMIC_LONG_LOCK_FREE2 == 2; |
748 | #endif |
749 | }; |
750 | |
751 | /// Explicit specialization for long long. |
752 | template<> |
753 | struct atomic<long long> : __atomic_base<long long> |
754 | { |
755 | typedef long long __integral_type; |
756 | typedef __atomic_base<long long> __base_type; |
757 | |
758 | atomic() noexcept = default; |
759 | ~atomic() noexcept = default; |
760 | atomic(const atomic&) = delete; |
761 | atomic& operator=(const atomic&) = delete; |
762 | atomic& operator=(const atomic&) volatile = delete; |
763 | |
764 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
765 | |
766 | using __base_type::operator __integral_type; |
767 | using __base_type::operator=; |
768 | |
769 | #if __cplusplus201103L > 201402L |
770 | static constexpr bool is_always_lock_free = ATOMIC_LLONG_LOCK_FREE2 == 2; |
771 | #endif |
772 | }; |
773 | |
774 | /// Explicit specialization for unsigned long long. |
775 | template<> |
776 | struct atomic<unsigned long long> : __atomic_base<unsigned long long> |
777 | { |
778 | typedef unsigned long long __integral_type; |
779 | typedef __atomic_base<unsigned long long> __base_type; |
780 | |
781 | atomic() noexcept = default; |
782 | ~atomic() noexcept = default; |
783 | atomic(const atomic&) = delete; |
784 | atomic& operator=(const atomic&) = delete; |
785 | atomic& operator=(const atomic&) volatile = delete; |
786 | |
787 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
788 | |
789 | using __base_type::operator __integral_type; |
790 | using __base_type::operator=; |
791 | |
792 | #if __cplusplus201103L > 201402L |
793 | static constexpr bool is_always_lock_free = ATOMIC_LLONG_LOCK_FREE2 == 2; |
794 | #endif |
795 | }; |
796 | |
797 | /// Explicit specialization for wchar_t. |
798 | template<> |
799 | struct atomic<wchar_t> : __atomic_base<wchar_t> |
800 | { |
801 | typedef wchar_t __integral_type; |
802 | typedef __atomic_base<wchar_t> __base_type; |
803 | |
804 | atomic() noexcept = default; |
805 | ~atomic() noexcept = default; |
806 | atomic(const atomic&) = delete; |
807 | atomic& operator=(const atomic&) = delete; |
808 | atomic& operator=(const atomic&) volatile = delete; |
809 | |
810 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
811 | |
812 | using __base_type::operator __integral_type; |
813 | using __base_type::operator=; |
814 | |
815 | #if __cplusplus201103L > 201402L |
816 | static constexpr bool is_always_lock_free = ATOMIC_WCHAR_T_LOCK_FREE2 == 2; |
817 | #endif |
818 | }; |
819 | |
820 | /// Explicit specialization for char16_t. |
821 | template<> |
822 | struct atomic<char16_t> : __atomic_base<char16_t> |
823 | { |
824 | typedef char16_t __integral_type; |
825 | typedef __atomic_base<char16_t> __base_type; |
826 | |
827 | atomic() noexcept = default; |
828 | ~atomic() noexcept = default; |
829 | atomic(const atomic&) = delete; |
830 | atomic& operator=(const atomic&) = delete; |
831 | atomic& operator=(const atomic&) volatile = delete; |
832 | |
833 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
834 | |
835 | using __base_type::operator __integral_type; |
836 | using __base_type::operator=; |
837 | |
838 | #if __cplusplus201103L > 201402L |
839 | static constexpr bool is_always_lock_free = ATOMIC_CHAR16_T_LOCK_FREE2 == 2; |
840 | #endif |
841 | }; |
842 | |
843 | /// Explicit specialization for char32_t. |
844 | template<> |
845 | struct atomic<char32_t> : __atomic_base<char32_t> |
846 | { |
847 | typedef char32_t __integral_type; |
848 | typedef __atomic_base<char32_t> __base_type; |
849 | |
850 | atomic() noexcept = default; |
851 | ~atomic() noexcept = default; |
852 | atomic(const atomic&) = delete; |
853 | atomic& operator=(const atomic&) = delete; |
854 | atomic& operator=(const atomic&) volatile = delete; |
855 | |
856 | constexpr atomic(__integral_type __i) noexcept : __base_type(__i) { } |
857 | |
858 | using __base_type::operator __integral_type; |
859 | using __base_type::operator=; |
860 | |
861 | #if __cplusplus201103L > 201402L |
862 | static constexpr bool is_always_lock_free = ATOMIC_CHAR32_T_LOCK_FREE2 == 2; |
863 | #endif |
864 | }; |
865 | |
866 | |
867 | /// atomic_bool |
868 | typedef atomic<bool> atomic_bool; |
869 | |
870 | /// atomic_char |
871 | typedef atomic<char> atomic_char; |
872 | |
873 | /// atomic_schar |
874 | typedef atomic<signed char> atomic_schar; |
875 | |
876 | /// atomic_uchar |
877 | typedef atomic<unsigned char> atomic_uchar; |
878 | |
879 | /// atomic_short |
880 | typedef atomic<short> atomic_short; |
881 | |
882 | /// atomic_ushort |
883 | typedef atomic<unsigned short> atomic_ushort; |
884 | |
885 | /// atomic_int |
886 | typedef atomic<int> atomic_int; |
887 | |
888 | /// atomic_uint |
889 | typedef atomic<unsigned int> atomic_uint; |
890 | |
891 | /// atomic_long |
892 | typedef atomic<long> atomic_long; |
893 | |
894 | /// atomic_ulong |
895 | typedef atomic<unsigned long> atomic_ulong; |
896 | |
897 | /// atomic_llong |
898 | typedef atomic<long long> atomic_llong; |
899 | |
900 | /// atomic_ullong |
901 | typedef atomic<unsigned long long> atomic_ullong; |
902 | |
903 | /// atomic_wchar_t |
904 | typedef atomic<wchar_t> atomic_wchar_t; |
905 | |
906 | /// atomic_char16_t |
907 | typedef atomic<char16_t> atomic_char16_t; |
908 | |
909 | /// atomic_char32_t |
910 | typedef atomic<char32_t> atomic_char32_t; |
911 | |
912 | |
913 | // _GLIBCXX_RESOLVE_LIB_DEFECTS |
914 | // 2441. Exact-width atomic typedefs should be provided |
915 | |
916 | /// atomic_int8_t |
917 | typedef atomic<int8_t> atomic_int8_t; |
918 | |
919 | /// atomic_uint8_t |
920 | typedef atomic<uint8_t> atomic_uint8_t; |
921 | |
922 | /// atomic_int16_t |
923 | typedef atomic<int16_t> atomic_int16_t; |
924 | |
925 | /// atomic_uint16_t |
926 | typedef atomic<uint16_t> atomic_uint16_t; |
927 | |
928 | /// atomic_int32_t |
929 | typedef atomic<int32_t> atomic_int32_t; |
930 | |
931 | /// atomic_uint32_t |
932 | typedef atomic<uint32_t> atomic_uint32_t; |
933 | |
934 | /// atomic_int64_t |
935 | typedef atomic<int64_t> atomic_int64_t; |
936 | |
937 | /// atomic_uint64_t |
938 | typedef atomic<uint64_t> atomic_uint64_t; |
939 | |
940 | |
941 | /// atomic_int_least8_t |
942 | typedef atomic<int_least8_t> atomic_int_least8_t; |
943 | |
944 | /// atomic_uint_least8_t |
945 | typedef atomic<uint_least8_t> atomic_uint_least8_t; |
946 | |
947 | /// atomic_int_least16_t |
948 | typedef atomic<int_least16_t> atomic_int_least16_t; |
949 | |
950 | /// atomic_uint_least16_t |
951 | typedef atomic<uint_least16_t> atomic_uint_least16_t; |
952 | |
953 | /// atomic_int_least32_t |
954 | typedef atomic<int_least32_t> atomic_int_least32_t; |
955 | |
956 | /// atomic_uint_least32_t |
957 | typedef atomic<uint_least32_t> atomic_uint_least32_t; |
958 | |
959 | /// atomic_int_least64_t |
960 | typedef atomic<int_least64_t> atomic_int_least64_t; |
961 | |
962 | /// atomic_uint_least64_t |
963 | typedef atomic<uint_least64_t> atomic_uint_least64_t; |
964 | |
965 | |
966 | /// atomic_int_fast8_t |
967 | typedef atomic<int_fast8_t> atomic_int_fast8_t; |
968 | |
969 | /// atomic_uint_fast8_t |
970 | typedef atomic<uint_fast8_t> atomic_uint_fast8_t; |
971 | |
972 | /// atomic_int_fast16_t |
973 | typedef atomic<int_fast16_t> atomic_int_fast16_t; |
974 | |
975 | /// atomic_uint_fast16_t |
976 | typedef atomic<uint_fast16_t> atomic_uint_fast16_t; |
977 | |
978 | /// atomic_int_fast32_t |
979 | typedef atomic<int_fast32_t> atomic_int_fast32_t; |
980 | |
981 | /// atomic_uint_fast32_t |
982 | typedef atomic<uint_fast32_t> atomic_uint_fast32_t; |
983 | |
984 | /// atomic_int_fast64_t |
985 | typedef atomic<int_fast64_t> atomic_int_fast64_t; |
986 | |
987 | /// atomic_uint_fast64_t |
988 | typedef atomic<uint_fast64_t> atomic_uint_fast64_t; |
989 | |
990 | |
991 | /// atomic_intptr_t |
992 | typedef atomic<intptr_t> atomic_intptr_t; |
993 | |
994 | /// atomic_uintptr_t |
995 | typedef atomic<uintptr_t> atomic_uintptr_t; |
996 | |
997 | /// atomic_size_t |
998 | typedef atomic<size_t> atomic_size_t; |
999 | |
1000 | /// atomic_intmax_t |
1001 | typedef atomic<intmax_t> atomic_intmax_t; |
1002 | |
1003 | /// atomic_uintmax_t |
1004 | typedef atomic<uintmax_t> atomic_uintmax_t; |
1005 | |
1006 | /// atomic_ptrdiff_t |
1007 | typedef atomic<ptrdiff_t> atomic_ptrdiff_t; |
1008 | |
1009 | |
1010 | // Function definitions, atomic_flag operations. |
1011 | inline bool |
1012 | atomic_flag_test_and_set_explicit(atomic_flag* __a, |
1013 | memory_order __m) noexcept |
1014 | { return __a->test_and_set(__m); } |
1015 | |
1016 | inline bool |
1017 | atomic_flag_test_and_set_explicit(volatile atomic_flag* __a, |
1018 | memory_order __m) noexcept |
1019 | { return __a->test_and_set(__m); } |
1020 | |
1021 | inline void |
1022 | atomic_flag_clear_explicit(atomic_flag* __a, memory_order __m) noexcept |
1023 | { __a->clear(__m); } |
1024 | |
1025 | inline void |
1026 | atomic_flag_clear_explicit(volatile atomic_flag* __a, |
1027 | memory_order __m) noexcept |
1028 | { __a->clear(__m); } |
1029 | |
1030 | inline bool |
1031 | atomic_flag_test_and_set(atomic_flag* __a) noexcept |
1032 | { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); } |
1033 | |
1034 | inline bool |
1035 | atomic_flag_test_and_set(volatile atomic_flag* __a) noexcept |
1036 | { return atomic_flag_test_and_set_explicit(__a, memory_order_seq_cst); } |
1037 | |
1038 | inline void |
1039 | atomic_flag_clear(atomic_flag* __a) noexcept |
1040 | { atomic_flag_clear_explicit(__a, memory_order_seq_cst); } |
1041 | |
1042 | inline void |
1043 | atomic_flag_clear(volatile atomic_flag* __a) noexcept |
1044 | { atomic_flag_clear_explicit(__a, memory_order_seq_cst); } |
1045 | |
1046 | |
1047 | // Function templates generally applicable to atomic types. |
1048 | template<typename _ITp> |
1049 | inline bool |
1050 | atomic_is_lock_free(const atomic<_ITp>* __a) noexcept |
1051 | { return __a->is_lock_free(); } |
1052 | |
1053 | template<typename _ITp> |
1054 | inline bool |
1055 | atomic_is_lock_free(const volatile atomic<_ITp>* __a) noexcept |
1056 | { return __a->is_lock_free(); } |
1057 | |
1058 | template<typename _ITp> |
1059 | inline void |
1060 | atomic_init(atomic<_ITp>* __a, _ITp __i) noexcept |
1061 | { __a->store(__i, memory_order_relaxed); } |
1062 | |
1063 | template<typename _ITp> |
1064 | inline void |
1065 | atomic_init(volatile atomic<_ITp>* __a, _ITp __i) noexcept |
1066 | { __a->store(__i, memory_order_relaxed); } |
1067 | |
1068 | template<typename _ITp> |
1069 | inline void |
1070 | atomic_store_explicit(atomic<_ITp>* __a, _ITp __i, |
1071 | memory_order __m) noexcept |
1072 | { __a->store(__i, __m); } |
1073 | |
1074 | template<typename _ITp> |
1075 | inline void |
1076 | atomic_store_explicit(volatile atomic<_ITp>* __a, _ITp __i, |
1077 | memory_order __m) noexcept |
1078 | { __a->store(__i, __m); } |
1079 | |
1080 | template<typename _ITp> |
1081 | inline _ITp |
1082 | atomic_load_explicit(const atomic<_ITp>* __a, memory_order __m) noexcept |
1083 | { return __a->load(__m); } |
1084 | |
1085 | template<typename _ITp> |
1086 | inline _ITp |
1087 | atomic_load_explicit(const volatile atomic<_ITp>* __a, |
1088 | memory_order __m) noexcept |
1089 | { return __a->load(__m); } |
1090 | |
1091 | template<typename _ITp> |
1092 | inline _ITp |
1093 | atomic_exchange_explicit(atomic<_ITp>* __a, _ITp __i, |
1094 | memory_order __m) noexcept |
1095 | { return __a->exchange(__i, __m); } |
1096 | |
1097 | template<typename _ITp> |
1098 | inline _ITp |
1099 | atomic_exchange_explicit(volatile atomic<_ITp>* __a, _ITp __i, |
1100 | memory_order __m) noexcept |
1101 | { return __a->exchange(__i, __m); } |
1102 | |
1103 | template<typename _ITp> |
1104 | inline bool |
1105 | atomic_compare_exchange_weak_explicit(atomic<_ITp>* __a, |
1106 | _ITp* __i1, _ITp __i2, |
1107 | memory_order __m1, |
1108 | memory_order __m2) noexcept |
1109 | { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } |
1110 | |
1111 | template<typename _ITp> |
1112 | inline bool |
1113 | atomic_compare_exchange_weak_explicit(volatile atomic<_ITp>* __a, |
1114 | _ITp* __i1, _ITp __i2, |
1115 | memory_order __m1, |
1116 | memory_order __m2) noexcept |
1117 | { return __a->compare_exchange_weak(*__i1, __i2, __m1, __m2); } |
1118 | |
1119 | template<typename _ITp> |
1120 | inline bool |
1121 | atomic_compare_exchange_strong_explicit(atomic<_ITp>* __a, |
1122 | _ITp* __i1, _ITp __i2, |
1123 | memory_order __m1, |
1124 | memory_order __m2) noexcept |
1125 | { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } |
1126 | |
1127 | template<typename _ITp> |
1128 | inline bool |
1129 | atomic_compare_exchange_strong_explicit(volatile atomic<_ITp>* __a, |
1130 | _ITp* __i1, _ITp __i2, |
1131 | memory_order __m1, |
1132 | memory_order __m2) noexcept |
1133 | { return __a->compare_exchange_strong(*__i1, __i2, __m1, __m2); } |
1134 | |
1135 | |
1136 | template<typename _ITp> |
1137 | inline void |
1138 | atomic_store(atomic<_ITp>* __a, _ITp __i) noexcept |
1139 | { atomic_store_explicit(__a, __i, memory_order_seq_cst); } |
1140 | |
1141 | template<typename _ITp> |
1142 | inline void |
1143 | atomic_store(volatile atomic<_ITp>* __a, _ITp __i) noexcept |
1144 | { atomic_store_explicit(__a, __i, memory_order_seq_cst); } |
1145 | |
1146 | template<typename _ITp> |
1147 | inline _ITp |
1148 | atomic_load(const atomic<_ITp>* __a) noexcept |
1149 | { return atomic_load_explicit(__a, memory_order_seq_cst); } |
1150 | |
1151 | template<typename _ITp> |
1152 | inline _ITp |
1153 | atomic_load(const volatile atomic<_ITp>* __a) noexcept |
1154 | { return atomic_load_explicit(__a, memory_order_seq_cst); } |
1155 | |
1156 | template<typename _ITp> |
1157 | inline _ITp |
1158 | atomic_exchange(atomic<_ITp>* __a, _ITp __i) noexcept |
1159 | { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } |
1160 | |
1161 | template<typename _ITp> |
1162 | inline _ITp |
1163 | atomic_exchange(volatile atomic<_ITp>* __a, _ITp __i) noexcept |
1164 | { return atomic_exchange_explicit(__a, __i, memory_order_seq_cst); } |
1165 | |
1166 | template<typename _ITp> |
1167 | inline bool |
1168 | atomic_compare_exchange_weak(atomic<_ITp>* __a, |
1169 | _ITp* __i1, _ITp __i2) noexcept |
1170 | { |
1171 | return atomic_compare_exchange_weak_explicit(__a, __i1, __i2, |
1172 | memory_order_seq_cst, |
1173 | memory_order_seq_cst); |
1174 | } |
1175 | |
1176 | template<typename _ITp> |
1177 | inline bool |
1178 | atomic_compare_exchange_weak(volatile atomic<_ITp>* __a, |
1179 | _ITp* __i1, _ITp __i2) noexcept |
1180 | { |
1181 | return atomic_compare_exchange_weak_explicit(__a, __i1, __i2, |
1182 | memory_order_seq_cst, |
1183 | memory_order_seq_cst); |
1184 | } |
1185 | |
1186 | template<typename _ITp> |
1187 | inline bool |
1188 | atomic_compare_exchange_strong(atomic<_ITp>* __a, |
1189 | _ITp* __i1, _ITp __i2) noexcept |
1190 | { |
1191 | return atomic_compare_exchange_strong_explicit(__a, __i1, __i2, |
1192 | memory_order_seq_cst, |
1193 | memory_order_seq_cst); |
1194 | } |
1195 | |
1196 | template<typename _ITp> |
1197 | inline bool |
1198 | atomic_compare_exchange_strong(volatile atomic<_ITp>* __a, |
1199 | _ITp* __i1, _ITp __i2) noexcept |
1200 | { |
1201 | return atomic_compare_exchange_strong_explicit(__a, __i1, __i2, |
1202 | memory_order_seq_cst, |
1203 | memory_order_seq_cst); |
1204 | } |
1205 | |
1206 | // Function templates for atomic_integral operations only, using |
1207 | // __atomic_base. Template argument should be constricted to |
1208 | // intergral types as specified in the standard, excluding address |
1209 | // types. |
1210 | template<typename _ITp> |
1211 | inline _ITp |
1212 | atomic_fetch_add_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
1213 | memory_order __m) noexcept |
1214 | { return __a->fetch_add(__i, __m); } |
1215 | |
1216 | template<typename _ITp> |
1217 | inline _ITp |
1218 | atomic_fetch_add_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, |
1219 | memory_order __m) noexcept |
1220 | { return __a->fetch_add(__i, __m); } |
1221 | |
1222 | template<typename _ITp> |
1223 | inline _ITp |
1224 | atomic_fetch_sub_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
1225 | memory_order __m) noexcept |
1226 | { return __a->fetch_sub(__i, __m); } |
1227 | |
1228 | template<typename _ITp> |
1229 | inline _ITp |
1230 | atomic_fetch_sub_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, |
1231 | memory_order __m) noexcept |
1232 | { return __a->fetch_sub(__i, __m); } |
1233 | |
1234 | template<typename _ITp> |
1235 | inline _ITp |
1236 | atomic_fetch_and_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
1237 | memory_order __m) noexcept |
1238 | { return __a->fetch_and(__i, __m); } |
1239 | |
1240 | template<typename _ITp> |
1241 | inline _ITp |
1242 | atomic_fetch_and_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, |
1243 | memory_order __m) noexcept |
1244 | { return __a->fetch_and(__i, __m); } |
1245 | |
1246 | template<typename _ITp> |
1247 | inline _ITp |
1248 | atomic_fetch_or_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
1249 | memory_order __m) noexcept |
1250 | { return __a->fetch_or(__i, __m); } |
1251 | |
1252 | template<typename _ITp> |
1253 | inline _ITp |
1254 | atomic_fetch_or_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, |
1255 | memory_order __m) noexcept |
1256 | { return __a->fetch_or(__i, __m); } |
1257 | |
1258 | template<typename _ITp> |
1259 | inline _ITp |
1260 | atomic_fetch_xor_explicit(__atomic_base<_ITp>* __a, _ITp __i, |
1261 | memory_order __m) noexcept |
1262 | { return __a->fetch_xor(__i, __m); } |
1263 | |
1264 | template<typename _ITp> |
1265 | inline _ITp |
1266 | atomic_fetch_xor_explicit(volatile __atomic_base<_ITp>* __a, _ITp __i, |
1267 | memory_order __m) noexcept |
1268 | { return __a->fetch_xor(__i, __m); } |
1269 | |
1270 | template<typename _ITp> |
1271 | inline _ITp |
1272 | atomic_fetch_add(__atomic_base<_ITp>* __a, _ITp __i) noexcept |
1273 | { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } |
1274 | |
1275 | template<typename _ITp> |
1276 | inline _ITp |
1277 | atomic_fetch_add(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept |
1278 | { return atomic_fetch_add_explicit(__a, __i, memory_order_seq_cst); } |
1279 | |
1280 | template<typename _ITp> |
1281 | inline _ITp |
1282 | atomic_fetch_sub(__atomic_base<_ITp>* __a, _ITp __i) noexcept |
1283 | { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); } |
1284 | |
1285 | template<typename _ITp> |
1286 | inline _ITp |
1287 | atomic_fetch_sub(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept |
1288 | { return atomic_fetch_sub_explicit(__a, __i, memory_order_seq_cst); } |
1289 | |
1290 | template<typename _ITp> |
1291 | inline _ITp |
1292 | atomic_fetch_and(__atomic_base<_ITp>* __a, _ITp __i) noexcept |
1293 | { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); } |
1294 | |
1295 | template<typename _ITp> |
1296 | inline _ITp |
1297 | atomic_fetch_and(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept |
1298 | { return atomic_fetch_and_explicit(__a, __i, memory_order_seq_cst); } |
1299 | |
1300 | template<typename _ITp> |
1301 | inline _ITp |
1302 | atomic_fetch_or(__atomic_base<_ITp>* __a, _ITp __i) noexcept |
1303 | { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); } |
1304 | |
1305 | template<typename _ITp> |
1306 | inline _ITp |
1307 | atomic_fetch_or(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept |
1308 | { return atomic_fetch_or_explicit(__a, __i, memory_order_seq_cst); } |
1309 | |
1310 | template<typename _ITp> |
1311 | inline _ITp |
1312 | atomic_fetch_xor(__atomic_base<_ITp>* __a, _ITp __i) noexcept |
1313 | { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } |
1314 | |
1315 | template<typename _ITp> |
1316 | inline _ITp |
1317 | atomic_fetch_xor(volatile __atomic_base<_ITp>* __a, _ITp __i) noexcept |
1318 | { return atomic_fetch_xor_explicit(__a, __i, memory_order_seq_cst); } |
1319 | |
1320 | |
1321 | // Partial specializations for pointers. |
1322 | template<typename _ITp> |
1323 | inline _ITp* |
1324 | atomic_fetch_add_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, |
1325 | memory_order __m) noexcept |
1326 | { return __a->fetch_add(__d, __m); } |
1327 | |
1328 | template<typename _ITp> |
1329 | inline _ITp* |
1330 | atomic_fetch_add_explicit(volatile atomic<_ITp*>* __a, ptrdiff_t __d, |
1331 | memory_order __m) noexcept |
1332 | { return __a->fetch_add(__d, __m); } |
1333 | |
1334 | template<typename _ITp> |
1335 | inline _ITp* |
1336 | atomic_fetch_add(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept |
1337 | { return __a->fetch_add(__d); } |
1338 | |
1339 | template<typename _ITp> |
1340 | inline _ITp* |
1341 | atomic_fetch_add(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept |
1342 | { return __a->fetch_add(__d); } |
1343 | |
1344 | template<typename _ITp> |
1345 | inline _ITp* |
1346 | atomic_fetch_sub_explicit(volatile atomic<_ITp*>* __a, |
1347 | ptrdiff_t __d, memory_order __m) noexcept |
1348 | { return __a->fetch_sub(__d, __m); } |
1349 | |
1350 | template<typename _ITp> |
1351 | inline _ITp* |
1352 | atomic_fetch_sub_explicit(atomic<_ITp*>* __a, ptrdiff_t __d, |
1353 | memory_order __m) noexcept |
1354 | { return __a->fetch_sub(__d, __m); } |
1355 | |
1356 | template<typename _ITp> |
1357 | inline _ITp* |
1358 | atomic_fetch_sub(volatile atomic<_ITp*>* __a, ptrdiff_t __d) noexcept |
1359 | { return __a->fetch_sub(__d); } |
1360 | |
1361 | template<typename _ITp> |
1362 | inline _ITp* |
1363 | atomic_fetch_sub(atomic<_ITp*>* __a, ptrdiff_t __d) noexcept |
1364 | { return __a->fetch_sub(__d); } |
1365 | // @} group atomics |
1366 | |
1367 | _GLIBCXX_END_NAMESPACE_VERSION |
1368 | } // namespace |
1369 | |
1370 | #endif // C++11 |
1371 | |
1372 | #endif // _GLIBCXX_ATOMIC |
1 | // -*- C++ -*- header. |
2 | |
3 | // Copyright (C) 2008-2017 Free Software Foundation, Inc. |
4 | // |
5 | // This file is part of the GNU ISO C++ Library. This library is free |
6 | // software; you can redistribute it and/or modify it under the |
7 | // terms of the GNU General Public License as published by the |
8 | // Free Software Foundation; either version 3, or (at your option) |
9 | // any later version. |
10 | |
11 | // This library is distributed in the hope that it will be useful, |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | // GNU General Public License for more details. |
15 | |
16 | // Under Section 7 of GPL version 3, you are granted additional |
17 | // permissions described in the GCC Runtime Library Exception, version |
18 | // 3.1, as published by the Free Software Foundation. |
19 | |
20 | // You should have received a copy of the GNU General Public License and |
21 | // a copy of the GCC Runtime Library Exception along with this program; |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | // <http://www.gnu.org/licenses/>. |
24 | |
25 | /** @file bits/atomic_base.h |
26 | * This is an internal header file, included by other library headers. |
27 | * Do not attempt to use it directly. @headername{atomic} |
28 | */ |
29 | |
30 | #ifndef _GLIBCXX_ATOMIC_BASE_H1 |
31 | #define _GLIBCXX_ATOMIC_BASE_H1 1 |
32 | |
33 | #pragma GCC system_header |
34 | |
35 | #include <bits/c++config.h> |
36 | #include <stdint.h> |
37 | #include <bits/atomic_lockfree_defines.h> |
38 | |
39 | #ifndef _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) |
40 | #define _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) inline __attribute__((__always_inline__)) |
41 | #endif |
42 | |
43 | namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default"))) |
44 | { |
45 | _GLIBCXX_BEGIN_NAMESPACE_VERSION |
46 | |
47 | /** |
48 | * @defgroup atomics Atomics |
49 | * |
50 | * Components for performing atomic operations. |
51 | * @{ |
52 | */ |
53 | |
54 | /// Enumeration for memory_order |
55 | typedef enum memory_order |
56 | { |
57 | memory_order_relaxed, |
58 | memory_order_consume, |
59 | memory_order_acquire, |
60 | memory_order_release, |
61 | memory_order_acq_rel, |
62 | memory_order_seq_cst |
63 | } memory_order; |
64 | |
65 | enum __memory_order_modifier |
66 | { |
67 | __memory_order_mask = 0x0ffff, |
68 | __memory_order_modifier_mask = 0xffff0000, |
69 | __memory_order_hle_acquire = 0x10000, |
70 | __memory_order_hle_release = 0x20000 |
71 | }; |
72 | |
73 | constexpr memory_order |
74 | operator|(memory_order __m, __memory_order_modifier __mod) |
75 | { |
76 | return memory_order(__m | int(__mod)); |
77 | } |
78 | |
79 | constexpr memory_order |
80 | operator&(memory_order __m, __memory_order_modifier __mod) |
81 | { |
82 | return memory_order(__m & int(__mod)); |
83 | } |
84 | |
85 | // Drop release ordering as per [atomics.types.operations.req]/21 |
86 | constexpr memory_order |
87 | __cmpexch_failure_order2(memory_order __m) noexcept |
88 | { |
89 | return __m == memory_order_acq_rel ? memory_order_acquire |
90 | : __m == memory_order_release ? memory_order_relaxed : __m; |
91 | } |
92 | |
93 | constexpr memory_order |
94 | __cmpexch_failure_order(memory_order __m) noexcept |
95 | { |
96 | return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask) |
97 | | (__m & __memory_order_modifier_mask)); |
98 | } |
99 | |
100 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) void |
101 | atomic_thread_fence(memory_order __m) noexcept |
102 | { __atomic_thread_fence(__m); } |
103 | |
104 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) void |
105 | atomic_signal_fence(memory_order __m) noexcept |
106 | { __atomic_signal_fence(__m); } |
107 | |
108 | /// kill_dependency |
109 | template<typename _Tp> |
110 | inline _Tp |
111 | kill_dependency(_Tp __y) noexcept |
112 | { |
113 | _Tp __ret(__y); |
114 | return __ret; |
115 | } |
116 | |
117 | |
118 | // Base types for atomics. |
119 | template<typename _IntTp> |
120 | struct __atomic_base; |
121 | |
122 | |
123 | #define ATOMIC_VAR_INIT(_VI){ _VI } { _VI } |
124 | |
125 | template<typename _Tp> |
126 | struct atomic; |
127 | |
128 | template<typename _Tp> |
129 | struct atomic<_Tp*>; |
130 | |
131 | /* The target's "set" value for test-and-set may not be exactly 1. */ |
132 | #if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL1 == 1 |
133 | typedef bool __atomic_flag_data_type; |
134 | #else |
135 | typedef unsigned char __atomic_flag_data_type; |
136 | #endif |
137 | |
138 | /** |
139 | * @brief Base type for atomic_flag. |
140 | * |
141 | * Base type is POD with data, allowing atomic_flag to derive from |
142 | * it and meet the standard layout type requirement. In addition to |
143 | * compatibility with a C interface, this allows different |
144 | * implementations of atomic_flag to use the same atomic operation |
145 | * functions, via a standard conversion to the __atomic_flag_base |
146 | * argument. |
147 | */ |
148 | _GLIBCXX_BEGIN_EXTERN_Cextern "C" { |
149 | |
150 | struct __atomic_flag_base |
151 | { |
152 | __atomic_flag_data_type _M_i; |
153 | }; |
154 | |
155 | _GLIBCXX_END_EXTERN_C} |
156 | |
157 | #define ATOMIC_FLAG_INIT{ 0 } { 0 } |
158 | |
159 | /// atomic_flag |
160 | struct atomic_flag : public __atomic_flag_base |
161 | { |
162 | atomic_flag() noexcept = default; |
163 | ~atomic_flag() noexcept = default; |
164 | atomic_flag(const atomic_flag&) = delete; |
165 | atomic_flag& operator=(const atomic_flag&) = delete; |
166 | atomic_flag& operator=(const atomic_flag&) volatile = delete; |
167 | |
168 | // Conversion to ATOMIC_FLAG_INIT. |
169 | constexpr atomic_flag(bool __i) noexcept |
170 | : __atomic_flag_base{ _S_init(__i) } |
171 | { } |
172 | |
173 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
174 | test_and_set(memory_order __m = memory_order_seq_cst) noexcept |
175 | { |
176 | return __atomic_test_and_set (&_M_i, __m); |
177 | } |
178 | |
179 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
180 | test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept |
181 | { |
182 | return __atomic_test_and_set (&_M_i, __m); |
183 | } |
184 | |
185 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) void |
186 | clear(memory_order __m = memory_order_seq_cst) noexcept |
187 | { |
188 | memory_order __b = __m & __memory_order_mask; |
189 | __glibcxx_assert(__b != memory_order_consume); |
190 | __glibcxx_assert(__b != memory_order_acquire); |
191 | __glibcxx_assert(__b != memory_order_acq_rel); |
192 | |
193 | __atomic_clear (&_M_i, __m); |
194 | } |
195 | |
196 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) void |
197 | clear(memory_order __m = memory_order_seq_cst) volatile noexcept |
198 | { |
199 | memory_order __b = __m & __memory_order_mask; |
200 | __glibcxx_assert(__b != memory_order_consume); |
201 | __glibcxx_assert(__b != memory_order_acquire); |
202 | __glibcxx_assert(__b != memory_order_acq_rel); |
203 | |
204 | __atomic_clear (&_M_i, __m); |
205 | } |
206 | |
207 | private: |
208 | static constexpr __atomic_flag_data_type |
209 | _S_init(bool __i) |
210 | { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL1 : 0; } |
211 | }; |
212 | |
213 | |
214 | /// Base class for atomic integrals. |
215 | // |
216 | // For each of the integral types, define atomic_[integral type] struct |
217 | // |
218 | // atomic_bool bool |
219 | // atomic_char char |
220 | // atomic_schar signed char |
221 | // atomic_uchar unsigned char |
222 | // atomic_short short |
223 | // atomic_ushort unsigned short |
224 | // atomic_int int |
225 | // atomic_uint unsigned int |
226 | // atomic_long long |
227 | // atomic_ulong unsigned long |
228 | // atomic_llong long long |
229 | // atomic_ullong unsigned long long |
230 | // atomic_char16_t char16_t |
231 | // atomic_char32_t char32_t |
232 | // atomic_wchar_t wchar_t |
233 | // |
234 | // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or |
235 | // 8 bytes, since that is what GCC built-in functions for atomic |
236 | // memory access expect. |
237 | template<typename _ITp> |
238 | struct __atomic_base |
239 | { |
240 | private: |
241 | typedef _ITp __int_type; |
242 | |
243 | static constexpr int _S_alignment = |
244 | sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp); |
245 | |
246 | alignas(_S_alignment) __int_type _M_i; |
247 | |
248 | public: |
249 | __atomic_base() noexcept = default; |
250 | ~__atomic_base() noexcept = default; |
251 | __atomic_base(const __atomic_base&) = delete; |
252 | __atomic_base& operator=(const __atomic_base&) = delete; |
253 | __atomic_base& operator=(const __atomic_base&) volatile = delete; |
254 | |
255 | // Requires __int_type convertible to _M_i. |
256 | constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { } |
257 | |
258 | operator __int_type() const noexcept |
259 | { return load(); } |
260 | |
261 | operator __int_type() const volatile noexcept |
262 | { return load(); } |
263 | |
264 | __int_type |
265 | operator=(__int_type __i) noexcept |
266 | { |
267 | store(__i); |
268 | return __i; |
269 | } |
270 | |
271 | __int_type |
272 | operator=(__int_type __i) volatile noexcept |
273 | { |
274 | store(__i); |
275 | return __i; |
276 | } |
277 | |
278 | __int_type |
279 | operator++(int) noexcept |
280 | { return fetch_add(1); } |
281 | |
282 | __int_type |
283 | operator++(int) volatile noexcept |
284 | { return fetch_add(1); } |
285 | |
286 | __int_type |
287 | operator--(int) noexcept |
288 | { return fetch_sub(1); } |
289 | |
290 | __int_type |
291 | operator--(int) volatile noexcept |
292 | { return fetch_sub(1); } |
293 | |
294 | __int_type |
295 | operator++() noexcept |
296 | { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); } |
297 | |
298 | __int_type |
299 | operator++() volatile noexcept |
300 | { return __atomic_add_fetch(&_M_i, 1, memory_order_seq_cst); } |
301 | |
302 | __int_type |
303 | operator--() noexcept |
304 | { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); } |
305 | |
306 | __int_type |
307 | operator--() volatile noexcept |
308 | { return __atomic_sub_fetch(&_M_i, 1, memory_order_seq_cst); } |
309 | |
310 | __int_type |
311 | operator+=(__int_type __i) noexcept |
312 | { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); } |
313 | |
314 | __int_type |
315 | operator+=(__int_type __i) volatile noexcept |
316 | { return __atomic_add_fetch(&_M_i, __i, memory_order_seq_cst); } |
317 | |
318 | __int_type |
319 | operator-=(__int_type __i) noexcept |
320 | { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); } |
321 | |
322 | __int_type |
323 | operator-=(__int_type __i) volatile noexcept |
324 | { return __atomic_sub_fetch(&_M_i, __i, memory_order_seq_cst); } |
325 | |
326 | __int_type |
327 | operator&=(__int_type __i) noexcept |
328 | { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); } |
329 | |
330 | __int_type |
331 | operator&=(__int_type __i) volatile noexcept |
332 | { return __atomic_and_fetch(&_M_i, __i, memory_order_seq_cst); } |
333 | |
334 | __int_type |
335 | operator|=(__int_type __i) noexcept |
336 | { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); } |
337 | |
338 | __int_type |
339 | operator|=(__int_type __i) volatile noexcept |
340 | { return __atomic_or_fetch(&_M_i, __i, memory_order_seq_cst); } |
341 | |
342 | __int_type |
343 | operator^=(__int_type __i) noexcept |
344 | { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); } |
345 | |
346 | __int_type |
347 | operator^=(__int_type __i) volatile noexcept |
348 | { return __atomic_xor_fetch(&_M_i, __i, memory_order_seq_cst); } |
349 | |
350 | bool |
351 | is_lock_free() const noexcept |
352 | { |
353 | // Use a fake, minimally aligned pointer. |
354 | return __atomic_is_lock_free(sizeof(_M_i), |
355 | reinterpret_cast<void *>(-__alignof(_M_i))); |
356 | } |
357 | |
358 | bool |
359 | is_lock_free() const volatile noexcept |
360 | { |
361 | // Use a fake, minimally aligned pointer. |
362 | return __atomic_is_lock_free(sizeof(_M_i), |
363 | reinterpret_cast<void *>(-__alignof(_M_i))); |
364 | } |
365 | |
366 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) void |
367 | store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept |
368 | { |
369 | memory_order __b = __m & __memory_order_mask; |
370 | __glibcxx_assert(__b != memory_order_acquire); |
371 | __glibcxx_assert(__b != memory_order_acq_rel); |
372 | __glibcxx_assert(__b != memory_order_consume); |
373 | |
374 | __atomic_store_n(&_M_i, __i, __m); |
375 | } |
376 | |
377 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) void |
378 | store(__int_type __i, |
379 | memory_order __m = memory_order_seq_cst) volatile noexcept |
380 | { |
381 | memory_order __b = __m & __memory_order_mask; |
382 | __glibcxx_assert(__b != memory_order_acquire); |
383 | __glibcxx_assert(__b != memory_order_acq_rel); |
384 | __glibcxx_assert(__b != memory_order_consume); |
385 | |
386 | __atomic_store_n(&_M_i, __i, __m); |
387 | } |
388 | |
389 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
390 | load(memory_order __m = memory_order_seq_cst) const noexcept |
391 | { |
392 | memory_order __b = __m & __memory_order_mask; |
393 | __glibcxx_assert(__b != memory_order_release); |
394 | __glibcxx_assert(__b != memory_order_acq_rel); |
395 | |
396 | return __atomic_load_n(&_M_i, __m); |
397 | } |
398 | |
399 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
400 | load(memory_order __m = memory_order_seq_cst) const volatile noexcept |
401 | { |
402 | memory_order __b = __m & __memory_order_mask; |
403 | __glibcxx_assert(__b != memory_order_release); |
404 | __glibcxx_assert(__b != memory_order_acq_rel); |
405 | |
406 | return __atomic_load_n(&_M_i, __m); |
407 | } |
408 | |
409 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
410 | exchange(__int_type __i, |
411 | memory_order __m = memory_order_seq_cst) noexcept |
412 | { |
413 | return __atomic_exchange_n(&_M_i, __i, __m); |
414 | } |
415 | |
416 | |
417 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
418 | exchange(__int_type __i, |
419 | memory_order __m = memory_order_seq_cst) volatile noexcept |
420 | { |
421 | return __atomic_exchange_n(&_M_i, __i, __m); |
422 | } |
423 | |
424 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
425 | compare_exchange_weak(__int_type& __i1, __int_type __i2, |
426 | memory_order __m1, memory_order __m2) noexcept |
427 | { |
428 | memory_order __b2 = __m2 & __memory_order_mask; |
429 | memory_order __b1 = __m1 & __memory_order_mask; |
430 | __glibcxx_assert(__b2 != memory_order_release); |
431 | __glibcxx_assert(__b2 != memory_order_acq_rel); |
432 | __glibcxx_assert(__b2 <= __b1); |
433 | |
434 | return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2); |
435 | } |
436 | |
437 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
438 | compare_exchange_weak(__int_type& __i1, __int_type __i2, |
439 | memory_order __m1, |
440 | memory_order __m2) volatile noexcept |
441 | { |
442 | memory_order __b2 = __m2 & __memory_order_mask; |
443 | memory_order __b1 = __m1 & __memory_order_mask; |
444 | __glibcxx_assert(__b2 != memory_order_release); |
445 | __glibcxx_assert(__b2 != memory_order_acq_rel); |
446 | __glibcxx_assert(__b2 <= __b1); |
447 | |
448 | return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1, __m1, __m2); |
449 | } |
450 | |
451 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
452 | compare_exchange_weak(__int_type& __i1, __int_type __i2, |
453 | memory_order __m = memory_order_seq_cst) noexcept |
454 | { |
455 | return compare_exchange_weak(__i1, __i2, __m, |
456 | __cmpexch_failure_order(__m)); |
457 | } |
458 | |
459 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
460 | compare_exchange_weak(__int_type& __i1, __int_type __i2, |
461 | memory_order __m = memory_order_seq_cst) volatile noexcept |
462 | { |
463 | return compare_exchange_weak(__i1, __i2, __m, |
464 | __cmpexch_failure_order(__m)); |
465 | } |
466 | |
467 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
468 | compare_exchange_strong(__int_type& __i1, __int_type __i2, |
469 | memory_order __m1, memory_order __m2) noexcept |
470 | { |
471 | memory_order __b2 = __m2 & __memory_order_mask; |
472 | memory_order __b1 = __m1 & __memory_order_mask; |
473 | __glibcxx_assert(__b2 != memory_order_release); |
474 | __glibcxx_assert(__b2 != memory_order_acq_rel); |
475 | __glibcxx_assert(__b2 <= __b1); |
476 | |
477 | return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2); |
478 | } |
479 | |
480 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
481 | compare_exchange_strong(__int_type& __i1, __int_type __i2, |
482 | memory_order __m1, |
483 | memory_order __m2) volatile noexcept |
484 | { |
485 | memory_order __b2 = __m2 & __memory_order_mask; |
486 | memory_order __b1 = __m1 & __memory_order_mask; |
487 | |
488 | __glibcxx_assert(__b2 != memory_order_release); |
489 | __glibcxx_assert(__b2 != memory_order_acq_rel); |
490 | __glibcxx_assert(__b2 <= __b1); |
491 | |
492 | return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0, __m1, __m2); |
493 | } |
494 | |
495 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
496 | compare_exchange_strong(__int_type& __i1, __int_type __i2, |
497 | memory_order __m = memory_order_seq_cst) noexcept |
498 | { |
499 | return compare_exchange_strong(__i1, __i2, __m, |
500 | __cmpexch_failure_order(__m)); |
501 | } |
502 | |
503 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
504 | compare_exchange_strong(__int_type& __i1, __int_type __i2, |
505 | memory_order __m = memory_order_seq_cst) volatile noexcept |
506 | { |
507 | return compare_exchange_strong(__i1, __i2, __m, |
508 | __cmpexch_failure_order(__m)); |
509 | } |
510 | |
511 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
512 | fetch_add(__int_type __i, |
513 | memory_order __m = memory_order_seq_cst) noexcept |
514 | { return __atomic_fetch_add(&_M_i, __i, __m); } |
515 | |
516 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
517 | fetch_add(__int_type __i, |
518 | memory_order __m = memory_order_seq_cst) volatile noexcept |
519 | { return __atomic_fetch_add(&_M_i, __i, __m); } |
520 | |
521 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
522 | fetch_sub(__int_type __i, |
523 | memory_order __m = memory_order_seq_cst) noexcept |
524 | { return __atomic_fetch_sub(&_M_i, __i, __m); } |
525 | |
526 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
527 | fetch_sub(__int_type __i, |
528 | memory_order __m = memory_order_seq_cst) volatile noexcept |
529 | { return __atomic_fetch_sub(&_M_i, __i, __m); } |
530 | |
531 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
532 | fetch_and(__int_type __i, |
533 | memory_order __m = memory_order_seq_cst) noexcept |
534 | { return __atomic_fetch_and(&_M_i, __i, __m); } |
535 | |
536 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
537 | fetch_and(__int_type __i, |
538 | memory_order __m = memory_order_seq_cst) volatile noexcept |
539 | { return __atomic_fetch_and(&_M_i, __i, __m); } |
540 | |
541 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
542 | fetch_or(__int_type __i, |
543 | memory_order __m = memory_order_seq_cst) noexcept |
544 | { return __atomic_fetch_or(&_M_i, __i, __m); } |
545 | |
546 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
547 | fetch_or(__int_type __i, |
548 | memory_order __m = memory_order_seq_cst) volatile noexcept |
549 | { return __atomic_fetch_or(&_M_i, __i, __m); } |
550 | |
551 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
552 | fetch_xor(__int_type __i, |
553 | memory_order __m = memory_order_seq_cst) noexcept |
554 | { return __atomic_fetch_xor(&_M_i, __i, __m); } |
555 | |
556 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __int_type |
557 | fetch_xor(__int_type __i, |
558 | memory_order __m = memory_order_seq_cst) volatile noexcept |
559 | { return __atomic_fetch_xor(&_M_i, __i, __m); } |
560 | }; |
561 | |
562 | |
563 | /// Partial specialization for pointer types. |
564 | template<typename _PTp> |
565 | struct __atomic_base<_PTp*> |
566 | { |
567 | private: |
568 | typedef _PTp* __pointer_type; |
569 | |
570 | __pointer_type _M_p; |
571 | |
572 | // Factored out to facilitate explicit specialization. |
573 | constexpr ptrdiff_t |
574 | _M_type_size(ptrdiff_t __d) const { return __d * sizeof(_PTp); } |
575 | |
576 | constexpr ptrdiff_t |
577 | _M_type_size(ptrdiff_t __d) const volatile { return __d * sizeof(_PTp); } |
578 | |
579 | public: |
580 | __atomic_base() noexcept = default; |
581 | ~__atomic_base() noexcept = default; |
582 | __atomic_base(const __atomic_base&) = delete; |
583 | __atomic_base& operator=(const __atomic_base&) = delete; |
584 | __atomic_base& operator=(const __atomic_base&) volatile = delete; |
585 | |
586 | // Requires __pointer_type convertible to _M_p. |
587 | constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { } |
588 | |
589 | operator __pointer_type() const noexcept |
590 | { return load(); } |
591 | |
592 | operator __pointer_type() const volatile noexcept |
593 | { return load(); } |
594 | |
595 | __pointer_type |
596 | operator=(__pointer_type __p) noexcept |
597 | { |
598 | store(__p); |
599 | return __p; |
600 | } |
601 | |
602 | __pointer_type |
603 | operator=(__pointer_type __p) volatile noexcept |
604 | { |
605 | store(__p); |
606 | return __p; |
607 | } |
608 | |
609 | __pointer_type |
610 | operator++(int) noexcept |
611 | { return fetch_add(1); } |
612 | |
613 | __pointer_type |
614 | operator++(int) volatile noexcept |
615 | { return fetch_add(1); } |
616 | |
617 | __pointer_type |
618 | operator--(int) noexcept |
619 | { return fetch_sub(1); } |
620 | |
621 | __pointer_type |
622 | operator--(int) volatile noexcept |
623 | { return fetch_sub(1); } |
624 | |
625 | __pointer_type |
626 | operator++() noexcept |
627 | { return __atomic_add_fetch(&_M_p, _M_type_size(1), |
628 | memory_order_seq_cst); } |
629 | |
630 | __pointer_type |
631 | operator++() volatile noexcept |
632 | { return __atomic_add_fetch(&_M_p, _M_type_size(1), |
633 | memory_order_seq_cst); } |
634 | |
635 | __pointer_type |
636 | operator--() noexcept |
637 | { return __atomic_sub_fetch(&_M_p, _M_type_size(1), |
638 | memory_order_seq_cst); } |
639 | |
640 | __pointer_type |
641 | operator--() volatile noexcept |
642 | { return __atomic_sub_fetch(&_M_p, _M_type_size(1), |
643 | memory_order_seq_cst); } |
644 | |
645 | __pointer_type |
646 | operator+=(ptrdiff_t __d) noexcept |
647 | { return __atomic_add_fetch(&_M_p, _M_type_size(__d), |
648 | memory_order_seq_cst); } |
649 | |
650 | __pointer_type |
651 | operator+=(ptrdiff_t __d) volatile noexcept |
652 | { return __atomic_add_fetch(&_M_p, _M_type_size(__d), |
653 | memory_order_seq_cst); } |
654 | |
655 | __pointer_type |
656 | operator-=(ptrdiff_t __d) noexcept |
657 | { return __atomic_sub_fetch(&_M_p, _M_type_size(__d), |
658 | memory_order_seq_cst); } |
659 | |
660 | __pointer_type |
661 | operator-=(ptrdiff_t __d) volatile noexcept |
662 | { return __atomic_sub_fetch(&_M_p, _M_type_size(__d), |
663 | memory_order_seq_cst); } |
664 | |
665 | bool |
666 | is_lock_free() const noexcept |
667 | { |
668 | // Produce a fake, minimally aligned pointer. |
669 | return __atomic_is_lock_free(sizeof(_M_p), |
670 | reinterpret_cast<void *>(-__alignof(_M_p))); |
671 | } |
672 | |
673 | bool |
674 | is_lock_free() const volatile noexcept |
675 | { |
676 | // Produce a fake, minimally aligned pointer. |
677 | return __atomic_is_lock_free(sizeof(_M_p), |
678 | reinterpret_cast<void *>(-__alignof(_M_p))); |
679 | } |
680 | |
681 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) void |
682 | store(__pointer_type __p, |
683 | memory_order __m = memory_order_seq_cst) noexcept |
684 | { |
685 | memory_order __b = __m & __memory_order_mask; |
686 | |
687 | __glibcxx_assert(__b != memory_order_acquire); |
688 | __glibcxx_assert(__b != memory_order_acq_rel); |
689 | __glibcxx_assert(__b != memory_order_consume); |
690 | |
691 | __atomic_store_n(&_M_p, __p, __m); |
692 | } |
693 | |
694 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) void |
695 | store(__pointer_type __p, |
696 | memory_order __m = memory_order_seq_cst) volatile noexcept |
697 | { |
698 | memory_order __b = __m & __memory_order_mask; |
699 | __glibcxx_assert(__b != memory_order_acquire); |
700 | __glibcxx_assert(__b != memory_order_acq_rel); |
701 | __glibcxx_assert(__b != memory_order_consume); |
702 | |
703 | __atomic_store_n(&_M_p, __p, __m); |
704 | } |
705 | |
706 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __pointer_type |
707 | load(memory_order __m = memory_order_seq_cst) const noexcept |
708 | { |
709 | memory_order __b = __m & __memory_order_mask; |
710 | __glibcxx_assert(__b != memory_order_release); |
711 | __glibcxx_assert(__b != memory_order_acq_rel); |
712 | |
713 | return __atomic_load_n(&_M_p, __m); |
714 | } |
715 | |
716 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __pointer_type |
717 | load(memory_order __m = memory_order_seq_cst) const volatile noexcept |
718 | { |
719 | memory_order __b = __m & __memory_order_mask; |
720 | __glibcxx_assert(__b != memory_order_release); |
721 | __glibcxx_assert(__b != memory_order_acq_rel); |
722 | |
723 | return __atomic_load_n(&_M_p, __m); |
724 | } |
725 | |
726 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __pointer_type |
727 | exchange(__pointer_type __p, |
728 | memory_order __m = memory_order_seq_cst) noexcept |
729 | { |
730 | return __atomic_exchange_n(&_M_p, __p, __m); |
731 | } |
732 | |
733 | |
734 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __pointer_type |
735 | exchange(__pointer_type __p, |
736 | memory_order __m = memory_order_seq_cst) volatile noexcept |
737 | { |
738 | return __atomic_exchange_n(&_M_p, __p, __m); |
739 | } |
740 | |
741 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
742 | compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, |
743 | memory_order __m1, |
744 | memory_order __m2) noexcept |
745 | { |
746 | memory_order __b2 = __m2 & __memory_order_mask; |
747 | memory_order __b1 = __m1 & __memory_order_mask; |
748 | __glibcxx_assert(__b2 != memory_order_release); |
749 | __glibcxx_assert(__b2 != memory_order_acq_rel); |
750 | __glibcxx_assert(__b2 <= __b1); |
751 | |
752 | return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2); |
753 | } |
754 | |
755 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) bool |
756 | compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2, |
757 | memory_order __m1, |
758 | memory_order __m2) volatile noexcept |
759 | { |
760 | memory_order __b2 = __m2 & __memory_order_mask; |
761 | memory_order __b1 = __m1 & __memory_order_mask; |
762 | |
763 | __glibcxx_assert(__b2 != memory_order_release); |
764 | __glibcxx_assert(__b2 != memory_order_acq_rel); |
765 | __glibcxx_assert(__b2 <= __b1); |
766 | |
767 | return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0, __m1, __m2); |
768 | } |
769 | |
770 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __pointer_type |
771 | fetch_add(ptrdiff_t __d, |
772 | memory_order __m = memory_order_seq_cst) noexcept |
773 | { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); } |
774 | |
775 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __pointer_type |
776 | fetch_add(ptrdiff_t __d, |
777 | memory_order __m = memory_order_seq_cst) volatile noexcept |
778 | { return __atomic_fetch_add(&_M_p, _M_type_size(__d), __m); } |
779 | |
780 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __pointer_type |
781 | fetch_sub(ptrdiff_t __d, |
782 | memory_order __m = memory_order_seq_cst) noexcept |
783 | { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); } |
784 | |
785 | _GLIBCXX_ALWAYS_INLINEinline __attribute__((__always_inline__)) __pointer_type |
786 | fetch_sub(ptrdiff_t __d, |
787 | memory_order __m = memory_order_seq_cst) volatile noexcept |
788 | { return __atomic_fetch_sub(&_M_p, _M_type_size(__d), __m); } |
789 | }; |
790 | |
791 | // @} group atomics |
792 | |
793 | _GLIBCXX_END_NAMESPACE_VERSION |
794 | } // namespace std |
795 | |
796 | #endif |
1 | //===- llvm/Support/Error.h - Recoverable error handling --------*- C++ -*-===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file defines an API used to report recoverable errors. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_SUPPORT_ERROR_H |
15 | #define LLVM_SUPPORT_ERROR_H |
16 | |
17 | #include "llvm/ADT/SmallVector.h" |
18 | #include "llvm/ADT/STLExtras.h" |
19 | #include "llvm/ADT/StringExtras.h" |
20 | #include "llvm/ADT/Twine.h" |
21 | #include "llvm/Config/abi-breaking.h" |
22 | #include "llvm/Support/AlignOf.h" |
23 | #include "llvm/Support/Compiler.h" |
24 | #include "llvm/Support/Debug.h" |
25 | #include "llvm/Support/ErrorHandling.h" |
26 | #include "llvm/Support/ErrorOr.h" |
27 | #include "llvm/Support/raw_ostream.h" |
28 | #include <algorithm> |
29 | #include <cassert> |
30 | #include <cstdint> |
31 | #include <cstdlib> |
32 | #include <functional> |
33 | #include <memory> |
34 | #include <new> |
35 | #include <string> |
36 | #include <system_error> |
37 | #include <type_traits> |
38 | #include <utility> |
39 | #include <vector> |
40 | |
41 | namespace llvm { |
42 | |
43 | class ErrorSuccess; |
44 | |
45 | /// Base class for error info classes. Do not extend this directly: Extend |
46 | /// the ErrorInfo template subclass instead. |
47 | class ErrorInfoBase { |
48 | public: |
49 | virtual ~ErrorInfoBase() = default; |
50 | |
51 | /// Print an error message to an output stream. |
52 | virtual void log(raw_ostream &OS) const = 0; |
53 | |
54 | /// Return the error message as a string. |
55 | virtual std::string message() const { |
56 | std::string Msg; |
57 | raw_string_ostream OS(Msg); |
58 | log(OS); |
59 | return OS.str(); |
60 | } |
61 | |
62 | /// Convert this error to a std::error_code. |
63 | /// |
64 | /// This is a temporary crutch to enable interaction with code still |
65 | /// using std::error_code. It will be removed in the future. |
66 | virtual std::error_code convertToErrorCode() const = 0; |
67 | |
68 | // Returns the class ID for this type. |
69 | static const void *classID() { return &ID; } |
70 | |
71 | // Returns the class ID for the dynamic type of this ErrorInfoBase instance. |
72 | virtual const void *dynamicClassID() const = 0; |
73 | |
74 | // Check whether this instance is a subclass of the class identified by |
75 | // ClassID. |
76 | virtual bool isA(const void *const ClassID) const { |
77 | return ClassID == classID(); |
78 | } |
79 | |
80 | // Check whether this instance is a subclass of ErrorInfoT. |
81 | template <typename ErrorInfoT> bool isA() const { |
82 | return isA(ErrorInfoT::classID()); |
83 | } |
84 | |
85 | private: |
86 | virtual void anchor(); |
87 | |
88 | static char ID; |
89 | }; |
90 | |
91 | /// Lightweight error class with error context and mandatory checking. |
92 | /// |
93 | /// Instances of this class wrap a ErrorInfoBase pointer. Failure states |
94 | /// are represented by setting the pointer to a ErrorInfoBase subclass |
95 | /// instance containing information describing the failure. Success is |
96 | /// represented by a null pointer value. |
97 | /// |
98 | /// Instances of Error also contains a 'Checked' flag, which must be set |
99 | /// before the destructor is called, otherwise the destructor will trigger a |
100 | /// runtime error. This enforces at runtime the requirement that all Error |
101 | /// instances be checked or returned to the caller. |
102 | /// |
103 | /// There are two ways to set the checked flag, depending on what state the |
104 | /// Error instance is in. For Error instances indicating success, it |
105 | /// is sufficient to invoke the boolean conversion operator. E.g.: |
106 | /// |
107 | /// @code{.cpp} |
108 | /// Error foo(<...>); |
109 | /// |
110 | /// if (auto E = foo(<...>)) |
111 | /// return E; // <- Return E if it is in the error state. |
112 | /// // We have verified that E was in the success state. It can now be safely |
113 | /// // destroyed. |
114 | /// @endcode |
115 | /// |
116 | /// A success value *can not* be dropped. For example, just calling 'foo(<...>)' |
117 | /// without testing the return value will raise a runtime error, even if foo |
118 | /// returns success. |
119 | /// |
120 | /// For Error instances representing failure, you must use either the |
121 | /// handleErrors or handleAllErrors function with a typed handler. E.g.: |
122 | /// |
123 | /// @code{.cpp} |
124 | /// class MyErrorInfo : public ErrorInfo<MyErrorInfo> { |
125 | /// // Custom error info. |
126 | /// }; |
127 | /// |
128 | /// Error foo(<...>) { return make_error<MyErrorInfo>(...); } |
129 | /// |
130 | /// auto E = foo(<...>); // <- foo returns failure with MyErrorInfo. |
131 | /// auto NewE = |
132 | /// handleErrors(E, |
133 | /// [](const MyErrorInfo &M) { |
134 | /// // Deal with the error. |
135 | /// }, |
136 | /// [](std::unique_ptr<OtherError> M) -> Error { |
137 | /// if (canHandle(*M)) { |
138 | /// // handle error. |
139 | /// return Error::success(); |
140 | /// } |
141 | /// // Couldn't handle this error instance. Pass it up the stack. |
142 | /// return Error(std::move(M)); |
143 | /// ); |
144 | /// // Note - we must check or return NewE in case any of the handlers |
145 | /// // returned a new error. |
146 | /// @endcode |
147 | /// |
148 | /// The handleAllErrors function is identical to handleErrors, except |
149 | /// that it has a void return type, and requires all errors to be handled and |
150 | /// no new errors be returned. It prevents errors (assuming they can all be |
151 | /// handled) from having to be bubbled all the way to the top-level. |
152 | /// |
153 | /// *All* Error instances must be checked before destruction, even if |
154 | /// they're moved-assigned or constructed from Success values that have already |
155 | /// been checked. This enforces checking through all levels of the call stack. |
156 | class LLVM_NODISCARD[[clang::warn_unused_result]] Error { |
157 | // ErrorList needs to be able to yank ErrorInfoBase pointers out of this |
158 | // class to add to the error list. |
159 | friend class ErrorList; |
160 | |
161 | // handleErrors needs to be able to set the Checked flag. |
162 | template <typename... HandlerTs> |
163 | friend Error handleErrors(Error E, HandlerTs &&... Handlers); |
164 | |
165 | // Expected<T> needs to be able to steal the payload when constructed from an |
166 | // error. |
167 | template <typename T> friend class Expected; |
168 | |
169 | protected: |
170 | /// Create a success value. Prefer using 'Error::success()' for readability |
171 | Error() { |
172 | setPtr(nullptr); |
173 | setChecked(false); |
174 | } |
175 | |
176 | public: |
177 | /// Create a success value. |
178 | static ErrorSuccess success(); |
179 | |
180 | // Errors are not copy-constructable. |
181 | Error(const Error &Other) = delete; |
182 | |
183 | /// Move-construct an error value. The newly constructed error is considered |
184 | /// unchecked, even if the source error had been checked. The original error |
185 | /// becomes a checked Success value, regardless of its original state. |
186 | Error(Error &&Other) { |
187 | setChecked(true); |
188 | *this = std::move(Other); |
189 | } |
190 | |
191 | /// Create an error value. Prefer using the 'make_error' function, but |
192 | /// this constructor can be useful when "re-throwing" errors from handlers. |
193 | Error(std::unique_ptr<ErrorInfoBase> Payload) { |
194 | setPtr(Payload.release()); |
195 | setChecked(false); |
196 | } |
197 | |
198 | // Errors are not copy-assignable. |
199 | Error &operator=(const Error &Other) = delete; |
200 | |
201 | /// Move-assign an error value. The current error must represent success, you |
202 | /// you cannot overwrite an unhandled error. The current error is then |
203 | /// considered unchecked. The source error becomes a checked success value, |
204 | /// regardless of its original state. |
205 | Error &operator=(Error &&Other) { |
206 | // Don't allow overwriting of unchecked values. |
207 | assertIsChecked(); |
208 | setPtr(Other.getPtr()); |
209 | |
210 | // This Error is unchecked, even if the source error was checked. |
211 | setChecked(false); |
212 | |
213 | // Null out Other's payload and set its checked bit. |
214 | Other.setPtr(nullptr); |
215 | Other.setChecked(true); |
216 | |
217 | return *this; |
218 | } |
219 | |
220 | /// Destroy a Error. Fails with a call to abort() if the error is |
221 | /// unchecked. |
222 | ~Error() { |
223 | assertIsChecked(); |
224 | delete getPtr(); |
225 | } |
226 | |
227 | /// Bool conversion. Returns true if this Error is in a failure state, |
228 | /// and false if it is in an accept state. If the error is in a Success state |
229 | /// it will be considered checked. |
230 | explicit operator bool() { |
231 | setChecked(getPtr() == nullptr); |
232 | return getPtr() != nullptr; |
233 | } |
234 | |
235 | /// Check whether one error is a subclass of another. |
236 | template <typename ErrT> bool isA() const { |
237 | return getPtr() && getPtr()->isA(ErrT::classID()); |
238 | } |
239 | |
240 | /// Returns the dynamic class id of this error, or null if this is a success |
241 | /// value. |
242 | const void* dynamicClassID() const { |
243 | if (!getPtr()) |
244 | return nullptr; |
245 | return getPtr()->dynamicClassID(); |
246 | } |
247 | |
248 | private: |
249 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
250 | // assertIsChecked() happens very frequently, but under normal circumstances |
251 | // is supposed to be a no-op. So we want it to be inlined, but having a bunch |
252 | // of debug prints can cause the function to be too large for inlining. So |
253 | // it's important that we define this function out of line so that it can't be |
254 | // inlined. |
255 | LLVM_ATTRIBUTE_NORETURN__attribute__((noreturn)) |
256 | void fatalUncheckedError() const; |
257 | #endif |
258 | |
259 | void assertIsChecked() { |
260 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
261 | if (LLVM_UNLIKELY(!getChecked() || getPtr())__builtin_expect((bool)(!getChecked() || getPtr()), false)) |
262 | fatalUncheckedError(); |
263 | #endif |
264 | } |
265 | |
266 | ErrorInfoBase *getPtr() const { |
267 | return reinterpret_cast<ErrorInfoBase*>( |
268 | reinterpret_cast<uintptr_t>(Payload) & |
269 | ~static_cast<uintptr_t>(0x1)); |
270 | } |
271 | |
272 | void setPtr(ErrorInfoBase *EI) { |
273 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
274 | Payload = reinterpret_cast<ErrorInfoBase*>( |
275 | (reinterpret_cast<uintptr_t>(EI) & |
276 | ~static_cast<uintptr_t>(0x1)) | |
277 | (reinterpret_cast<uintptr_t>(Payload) & 0x1)); |
278 | #else |
279 | Payload = EI; |
280 | #endif |
281 | } |
282 | |
283 | bool getChecked() const { |
284 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
285 | return (reinterpret_cast<uintptr_t>(Payload) & 0x1) == 0; |
286 | #else |
287 | return true; |
288 | #endif |
289 | } |
290 | |
291 | void setChecked(bool V) { |
292 | Payload = reinterpret_cast<ErrorInfoBase*>( |
293 | (reinterpret_cast<uintptr_t>(Payload) & |
294 | ~static_cast<uintptr_t>(0x1)) | |
295 | (V ? 0 : 1)); |
296 | } |
297 | |
298 | std::unique_ptr<ErrorInfoBase> takePayload() { |
299 | std::unique_ptr<ErrorInfoBase> Tmp(getPtr()); |
300 | setPtr(nullptr); |
301 | setChecked(true); |
302 | return Tmp; |
303 | } |
304 | |
305 | ErrorInfoBase *Payload = nullptr; |
306 | }; |
307 | |
308 | /// Subclass of Error for the sole purpose of identifying the success path in |
309 | /// the type system. This allows to catch invalid conversion to Expected<T> at |
310 | /// compile time. |
311 | class ErrorSuccess : public Error {}; |
312 | |
313 | inline ErrorSuccess Error::success() { return ErrorSuccess(); } |
314 | |
315 | /// Make a Error instance representing failure using the given error info |
316 | /// type. |
317 | template <typename ErrT, typename... ArgTs> Error make_error(ArgTs &&... Args) { |
318 | return Error(llvm::make_unique<ErrT>(std::forward<ArgTs>(Args)...)); |
319 | } |
320 | |
321 | /// Base class for user error types. Users should declare their error types |
322 | /// like: |
323 | /// |
324 | /// class MyError : public ErrorInfo<MyError> { |
325 | /// .... |
326 | /// }; |
327 | /// |
328 | /// This class provides an implementation of the ErrorInfoBase::kind |
329 | /// method, which is used by the Error RTTI system. |
330 | template <typename ThisErrT, typename ParentErrT = ErrorInfoBase> |
331 | class ErrorInfo : public ParentErrT { |
332 | public: |
333 | static const void *classID() { return &ThisErrT::ID; } |
334 | |
335 | const void *dynamicClassID() const override { return &ThisErrT::ID; } |
336 | |
337 | bool isA(const void *const ClassID) const override { |
338 | return ClassID == classID() || ParentErrT::isA(ClassID); |
339 | } |
340 | }; |
341 | |
342 | /// Special ErrorInfo subclass representing a list of ErrorInfos. |
343 | /// Instances of this class are constructed by joinError. |
344 | class ErrorList final : public ErrorInfo<ErrorList> { |
345 | // handleErrors needs to be able to iterate the payload list of an |
346 | // ErrorList. |
347 | template <typename... HandlerTs> |
348 | friend Error handleErrors(Error E, HandlerTs &&... Handlers); |
349 | |
350 | // joinErrors is implemented in terms of join. |
351 | friend Error joinErrors(Error, Error); |
352 | |
353 | public: |
354 | void log(raw_ostream &OS) const override { |
355 | OS << "Multiple errors:\n"; |
356 | for (auto &ErrPayload : Payloads) { |
357 | ErrPayload->log(OS); |
358 | OS << "\n"; |
359 | } |
360 | } |
361 | |
362 | std::error_code convertToErrorCode() const override; |
363 | |
364 | // Used by ErrorInfo::classID. |
365 | static char ID; |
366 | |
367 | private: |
368 | ErrorList(std::unique_ptr<ErrorInfoBase> Payload1, |
369 | std::unique_ptr<ErrorInfoBase> Payload2) { |
370 | assert(!Payload1->isA<ErrorList>() && !Payload2->isA<ErrorList>() &&(static_cast <bool> (!Payload1->isA<ErrorList> () && !Payload2->isA<ErrorList>() && "ErrorList constructor payloads should be singleton errors") ? void (0) : __assert_fail ("!Payload1->isA<ErrorList>() && !Payload2->isA<ErrorList>() && \"ErrorList constructor payloads should be singleton errors\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 371, __extension__ __PRETTY_FUNCTION__)) |
371 | "ErrorList constructor payloads should be singleton errors")(static_cast <bool> (!Payload1->isA<ErrorList> () && !Payload2->isA<ErrorList>() && "ErrorList constructor payloads should be singleton errors") ? void (0) : __assert_fail ("!Payload1->isA<ErrorList>() && !Payload2->isA<ErrorList>() && \"ErrorList constructor payloads should be singleton errors\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 371, __extension__ __PRETTY_FUNCTION__)); |
372 | Payloads.push_back(std::move(Payload1)); |
373 | Payloads.push_back(std::move(Payload2)); |
374 | } |
375 | |
376 | static Error join(Error E1, Error E2) { |
377 | if (!E1) |
378 | return E2; |
379 | if (!E2) |
380 | return E1; |
381 | if (E1.isA<ErrorList>()) { |
382 | auto &E1List = static_cast<ErrorList &>(*E1.getPtr()); |
383 | if (E2.isA<ErrorList>()) { |
384 | auto E2Payload = E2.takePayload(); |
385 | auto &E2List = static_cast<ErrorList &>(*E2Payload); |
386 | for (auto &Payload : E2List.Payloads) |
387 | E1List.Payloads.push_back(std::move(Payload)); |
388 | } else |
389 | E1List.Payloads.push_back(E2.takePayload()); |
390 | |
391 | return E1; |
392 | } |
393 | if (E2.isA<ErrorList>()) { |
394 | auto &E2List = static_cast<ErrorList &>(*E2.getPtr()); |
395 | E2List.Payloads.insert(E2List.Payloads.begin(), E1.takePayload()); |
396 | return E2; |
397 | } |
398 | return Error(std::unique_ptr<ErrorList>( |
399 | new ErrorList(E1.takePayload(), E2.takePayload()))); |
400 | } |
401 | |
402 | std::vector<std::unique_ptr<ErrorInfoBase>> Payloads; |
403 | }; |
404 | |
405 | /// Concatenate errors. The resulting Error is unchecked, and contains the |
406 | /// ErrorInfo(s), if any, contained in E1, followed by the |
407 | /// ErrorInfo(s), if any, contained in E2. |
408 | inline Error joinErrors(Error E1, Error E2) { |
409 | return ErrorList::join(std::move(E1), std::move(E2)); |
410 | } |
411 | |
412 | /// Tagged union holding either a T or a Error. |
413 | /// |
414 | /// This class parallels ErrorOr, but replaces error_code with Error. Since |
415 | /// Error cannot be copied, this class replaces getError() with |
416 | /// takeError(). It also adds an bool errorIsA<ErrT>() method for testing the |
417 | /// error class type. |
418 | template <class T> class LLVM_NODISCARD[[clang::warn_unused_result]] Expected { |
419 | template <class T1> friend class ExpectedAsOutParameter; |
420 | template <class OtherT> friend class Expected; |
421 | |
422 | static const bool isRef = std::is_reference<T>::value; |
423 | |
424 | using wrap = ReferenceStorage<typename std::remove_reference<T>::type>; |
425 | |
426 | using error_type = std::unique_ptr<ErrorInfoBase>; |
427 | |
428 | public: |
429 | using storage_type = typename std::conditional<isRef, wrap, T>::type; |
430 | using value_type = T; |
431 | |
432 | private: |
433 | using reference = typename std::remove_reference<T>::type &; |
434 | using const_reference = const typename std::remove_reference<T>::type &; |
435 | using pointer = typename std::remove_reference<T>::type *; |
436 | using const_pointer = const typename std::remove_reference<T>::type *; |
437 | |
438 | public: |
439 | /// Create an Expected<T> error value from the given Error. |
440 | Expected(Error Err) |
441 | : HasError(true) |
442 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
443 | // Expected is unchecked upon construction in Debug builds. |
444 | , Unchecked(true) |
445 | #endif |
446 | { |
447 | assert(Err && "Cannot create Expected<T> from Error success value.")(static_cast <bool> (Err && "Cannot create Expected<T> from Error success value." ) ? void (0) : __assert_fail ("Err && \"Cannot create Expected<T> from Error success value.\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 447, __extension__ __PRETTY_FUNCTION__)); |
448 | new (getErrorStorage()) error_type(Err.takePayload()); |
449 | } |
450 | |
451 | /// Forbid to convert from Error::success() implicitly, this avoids having |
452 | /// Expected<T> foo() { return Error::success(); } which compiles otherwise |
453 | /// but triggers the assertion above. |
454 | Expected(ErrorSuccess) = delete; |
455 | |
456 | /// Create an Expected<T> success value from the given OtherT value, which |
457 | /// must be convertible to T. |
458 | template <typename OtherT> |
459 | Expected(OtherT &&Val, |
460 | typename std::enable_if<std::is_convertible<OtherT, T>::value>::type |
461 | * = nullptr) |
462 | : HasError(false) |
463 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
464 | // Expected is unchecked upon construction in Debug builds. |
465 | , Unchecked(true) |
466 | #endif |
467 | { |
468 | new (getStorage()) storage_type(std::forward<OtherT>(Val)); |
469 | } |
470 | |
471 | /// Move construct an Expected<T> value. |
472 | Expected(Expected &&Other) { moveConstruct(std::move(Other)); } |
473 | |
474 | /// Move construct an Expected<T> value from an Expected<OtherT>, where OtherT |
475 | /// must be convertible to T. |
476 | template <class OtherT> |
477 | Expected(Expected<OtherT> &&Other, |
478 | typename std::enable_if<std::is_convertible<OtherT, T>::value>::type |
479 | * = nullptr) { |
480 | moveConstruct(std::move(Other)); |
481 | } |
482 | |
483 | /// Move construct an Expected<T> value from an Expected<OtherT>, where OtherT |
484 | /// isn't convertible to T. |
485 | template <class OtherT> |
486 | explicit Expected( |
487 | Expected<OtherT> &&Other, |
488 | typename std::enable_if<!std::is_convertible<OtherT, T>::value>::type * = |
489 | nullptr) { |
490 | moveConstruct(std::move(Other)); |
491 | } |
492 | |
493 | /// Move-assign from another Expected<T>. |
494 | Expected &operator=(Expected &&Other) { |
495 | moveAssign(std::move(Other)); |
496 | return *this; |
497 | } |
498 | |
499 | /// Destroy an Expected<T>. |
500 | ~Expected() { |
501 | assertIsChecked(); |
502 | if (!HasError) |
503 | getStorage()->~storage_type(); |
504 | else |
505 | getErrorStorage()->~error_type(); |
506 | } |
507 | |
508 | /// \brief Return false if there is an error. |
509 | explicit operator bool() { |
510 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
511 | Unchecked = HasError; |
512 | #endif |
513 | return !HasError; |
514 | } |
515 | |
516 | /// \brief Returns a reference to the stored T value. |
517 | reference get() { |
518 | assertIsChecked(); |
519 | return *getStorage(); |
520 | } |
521 | |
522 | /// \brief Returns a const reference to the stored T value. |
523 | const_reference get() const { |
524 | assertIsChecked(); |
525 | return const_cast<Expected<T> *>(this)->get(); |
526 | } |
527 | |
528 | /// \brief Check that this Expected<T> is an error of type ErrT. |
529 | template <typename ErrT> bool errorIsA() const { |
530 | return HasError && (*getErrorStorage())->template isA<ErrT>(); |
531 | } |
532 | |
533 | /// \brief Take ownership of the stored error. |
534 | /// After calling this the Expected<T> is in an indeterminate state that can |
535 | /// only be safely destructed. No further calls (beside the destructor) should |
536 | /// be made on the Expected<T> vaule. |
537 | Error takeError() { |
538 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
539 | Unchecked = false; |
540 | #endif |
541 | return HasError ? Error(std::move(*getErrorStorage())) : Error::success(); |
542 | } |
543 | |
544 | /// \brief Returns a pointer to the stored T value. |
545 | pointer operator->() { |
546 | assertIsChecked(); |
547 | return toPointer(getStorage()); |
548 | } |
549 | |
550 | /// \brief Returns a const pointer to the stored T value. |
551 | const_pointer operator->() const { |
552 | assertIsChecked(); |
553 | return toPointer(getStorage()); |
554 | } |
555 | |
556 | /// \brief Returns a reference to the stored T value. |
557 | reference operator*() { |
558 | assertIsChecked(); |
559 | return *getStorage(); |
560 | } |
561 | |
562 | /// \brief Returns a const reference to the stored T value. |
563 | const_reference operator*() const { |
564 | assertIsChecked(); |
565 | return *getStorage(); |
566 | } |
567 | |
568 | private: |
569 | template <class T1> |
570 | static bool compareThisIfSameType(const T1 &a, const T1 &b) { |
571 | return &a == &b; |
572 | } |
573 | |
574 | template <class T1, class T2> |
575 | static bool compareThisIfSameType(const T1 &a, const T2 &b) { |
576 | return false; |
577 | } |
578 | |
579 | template <class OtherT> void moveConstruct(Expected<OtherT> &&Other) { |
580 | HasError = Other.HasError; |
581 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
582 | Unchecked = true; |
583 | Other.Unchecked = false; |
584 | #endif |
585 | |
586 | if (!HasError) |
587 | new (getStorage()) storage_type(std::move(*Other.getStorage())); |
588 | else |
589 | new (getErrorStorage()) error_type(std::move(*Other.getErrorStorage())); |
590 | } |
591 | |
592 | template <class OtherT> void moveAssign(Expected<OtherT> &&Other) { |
593 | assertIsChecked(); |
594 | |
595 | if (compareThisIfSameType(*this, Other)) |
596 | return; |
597 | |
598 | this->~Expected(); |
599 | new (this) Expected(std::move(Other)); |
600 | } |
601 | |
602 | pointer toPointer(pointer Val) { return Val; } |
603 | |
604 | const_pointer toPointer(const_pointer Val) const { return Val; } |
605 | |
606 | pointer toPointer(wrap *Val) { return &Val->get(); } |
607 | |
608 | const_pointer toPointer(const wrap *Val) const { return &Val->get(); } |
609 | |
610 | storage_type *getStorage() { |
611 | assert(!HasError && "Cannot get value when an error exists!")(static_cast <bool> (!HasError && "Cannot get value when an error exists!" ) ? void (0) : __assert_fail ("!HasError && \"Cannot get value when an error exists!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 611, __extension__ __PRETTY_FUNCTION__)); |
612 | return reinterpret_cast<storage_type *>(TStorage.buffer); |
613 | } |
614 | |
615 | const storage_type *getStorage() const { |
616 | assert(!HasError && "Cannot get value when an error exists!")(static_cast <bool> (!HasError && "Cannot get value when an error exists!" ) ? void (0) : __assert_fail ("!HasError && \"Cannot get value when an error exists!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 616, __extension__ __PRETTY_FUNCTION__)); |
617 | return reinterpret_cast<const storage_type *>(TStorage.buffer); |
618 | } |
619 | |
620 | error_type *getErrorStorage() { |
621 | assert(HasError && "Cannot get error when a value exists!")(static_cast <bool> (HasError && "Cannot get error when a value exists!" ) ? void (0) : __assert_fail ("HasError && \"Cannot get error when a value exists!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 621, __extension__ __PRETTY_FUNCTION__)); |
622 | return reinterpret_cast<error_type *>(ErrorStorage.buffer); |
623 | } |
624 | |
625 | const error_type *getErrorStorage() const { |
626 | assert(HasError && "Cannot get error when a value exists!")(static_cast <bool> (HasError && "Cannot get error when a value exists!" ) ? void (0) : __assert_fail ("HasError && \"Cannot get error when a value exists!\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 626, __extension__ __PRETTY_FUNCTION__)); |
627 | return reinterpret_cast<const error_type *>(ErrorStorage.buffer); |
628 | } |
629 | |
630 | // Used by ExpectedAsOutParameter to reset the checked flag. |
631 | void setUnchecked() { |
632 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
633 | Unchecked = true; |
634 | #endif |
635 | } |
636 | |
637 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
638 | LLVM_ATTRIBUTE_NORETURN__attribute__((noreturn)) |
639 | LLVM_ATTRIBUTE_NOINLINE__attribute__((noinline)) |
640 | void fatalUncheckedExpected() const { |
641 | dbgs() << "Expected<T> must be checked before access or destruction.\n"; |
642 | if (HasError) { |
643 | dbgs() << "Unchecked Expected<T> contained error:\n"; |
644 | (*getErrorStorage())->log(dbgs()); |
645 | } else |
646 | dbgs() << "Expected<T> value was in success state. (Note: Expected<T> " |
647 | "values in success mode must still be checked prior to being " |
648 | "destroyed).\n"; |
649 | abort(); |
650 | } |
651 | #endif |
652 | |
653 | void assertIsChecked() { |
654 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
655 | if (LLVM_UNLIKELY(Unchecked)__builtin_expect((bool)(Unchecked), false)) |
656 | fatalUncheckedExpected(); |
657 | #endif |
658 | } |
659 | |
660 | union { |
661 | AlignedCharArrayUnion<storage_type> TStorage; |
662 | AlignedCharArrayUnion<error_type> ErrorStorage; |
663 | }; |
664 | bool HasError : 1; |
665 | #if LLVM_ENABLE_ABI_BREAKING_CHECKS1 |
666 | bool Unchecked : 1; |
667 | #endif |
668 | }; |
669 | |
670 | /// Report a serious error, calling any installed error handler. See |
671 | /// ErrorHandling.h. |
672 | LLVM_ATTRIBUTE_NORETURN__attribute__((noreturn)) void report_fatal_error(Error Err, |
673 | bool gen_crash_diag = true); |
674 | |
675 | /// Report a fatal error if Err is a failure value. |
676 | /// |
677 | /// This function can be used to wrap calls to fallible functions ONLY when it |
678 | /// is known that the Error will always be a success value. E.g. |
679 | /// |
680 | /// @code{.cpp} |
681 | /// // foo only attempts the fallible operation if DoFallibleOperation is |
682 | /// // true. If DoFallibleOperation is false then foo always returns |
683 | /// // Error::success(). |
684 | /// Error foo(bool DoFallibleOperation); |
685 | /// |
686 | /// cantFail(foo(false)); |
687 | /// @endcode |
688 | inline void cantFail(Error Err, const char *Msg = nullptr) { |
689 | if (Err) { |
690 | if (!Msg) |
691 | Msg = "Failure value returned from cantFail wrapped call"; |
692 | llvm_unreachable(Msg)::llvm::llvm_unreachable_internal(Msg, "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 692); |
693 | } |
694 | } |
695 | |
696 | /// Report a fatal error if ValOrErr is a failure value, otherwise unwraps and |
697 | /// returns the contained value. |
698 | /// |
699 | /// This function can be used to wrap calls to fallible functions ONLY when it |
700 | /// is known that the Error will always be a success value. E.g. |
701 | /// |
702 | /// @code{.cpp} |
703 | /// // foo only attempts the fallible operation if DoFallibleOperation is |
704 | /// // true. If DoFallibleOperation is false then foo always returns an int. |
705 | /// Expected<int> foo(bool DoFallibleOperation); |
706 | /// |
707 | /// int X = cantFail(foo(false)); |
708 | /// @endcode |
709 | template <typename T> |
710 | T cantFail(Expected<T> ValOrErr, const char *Msg = nullptr) { |
711 | if (ValOrErr) |
712 | return std::move(*ValOrErr); |
713 | else { |
714 | if (!Msg) |
715 | Msg = "Failure value returned from cantFail wrapped call"; |
716 | llvm_unreachable(Msg)::llvm::llvm_unreachable_internal(Msg, "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 716); |
717 | } |
718 | } |
719 | |
720 | /// Report a fatal error if ValOrErr is a failure value, otherwise unwraps and |
721 | /// returns the contained reference. |
722 | /// |
723 | /// This function can be used to wrap calls to fallible functions ONLY when it |
724 | /// is known that the Error will always be a success value. E.g. |
725 | /// |
726 | /// @code{.cpp} |
727 | /// // foo only attempts the fallible operation if DoFallibleOperation is |
728 | /// // true. If DoFallibleOperation is false then foo always returns a Bar&. |
729 | /// Expected<Bar&> foo(bool DoFallibleOperation); |
730 | /// |
731 | /// Bar &X = cantFail(foo(false)); |
732 | /// @endcode |
733 | template <typename T> |
734 | T& cantFail(Expected<T&> ValOrErr, const char *Msg = nullptr) { |
735 | if (ValOrErr) |
736 | return *ValOrErr; |
737 | else { |
738 | if (!Msg) |
739 | Msg = "Failure value returned from cantFail wrapped call"; |
740 | llvm_unreachable(Msg)::llvm::llvm_unreachable_internal(Msg, "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 740); |
741 | } |
742 | } |
743 | |
744 | /// Helper for testing applicability of, and applying, handlers for |
745 | /// ErrorInfo types. |
746 | template <typename HandlerT> |
747 | class ErrorHandlerTraits |
748 | : public ErrorHandlerTraits<decltype( |
749 | &std::remove_reference<HandlerT>::type::operator())> {}; |
750 | |
751 | // Specialization functions of the form 'Error (const ErrT&)'. |
752 | template <typename ErrT> class ErrorHandlerTraits<Error (&)(ErrT &)> { |
753 | public: |
754 | static bool appliesTo(const ErrorInfoBase &E) { |
755 | return E.template isA<ErrT>(); |
756 | } |
757 | |
758 | template <typename HandlerT> |
759 | static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) { |
760 | assert(appliesTo(*E) && "Applying incorrect handler")(static_cast <bool> (appliesTo(*E) && "Applying incorrect handler" ) ? void (0) : __assert_fail ("appliesTo(*E) && \"Applying incorrect handler\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 760, __extension__ __PRETTY_FUNCTION__)); |
761 | return H(static_cast<ErrT &>(*E)); |
762 | } |
763 | }; |
764 | |
765 | // Specialization functions of the form 'void (const ErrT&)'. |
766 | template <typename ErrT> class ErrorHandlerTraits<void (&)(ErrT &)> { |
767 | public: |
768 | static bool appliesTo(const ErrorInfoBase &E) { |
769 | return E.template isA<ErrT>(); |
770 | } |
771 | |
772 | template <typename HandlerT> |
773 | static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) { |
774 | assert(appliesTo(*E) && "Applying incorrect handler")(static_cast <bool> (appliesTo(*E) && "Applying incorrect handler" ) ? void (0) : __assert_fail ("appliesTo(*E) && \"Applying incorrect handler\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 774, __extension__ __PRETTY_FUNCTION__)); |
775 | H(static_cast<ErrT &>(*E)); |
776 | return Error::success(); |
777 | } |
778 | }; |
779 | |
780 | /// Specialization for functions of the form 'Error (std::unique_ptr<ErrT>)'. |
781 | template <typename ErrT> |
782 | class ErrorHandlerTraits<Error (&)(std::unique_ptr<ErrT>)> { |
783 | public: |
784 | static bool appliesTo(const ErrorInfoBase &E) { |
785 | return E.template isA<ErrT>(); |
786 | } |
787 | |
788 | template <typename HandlerT> |
789 | static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) { |
790 | assert(appliesTo(*E) && "Applying incorrect handler")(static_cast <bool> (appliesTo(*E) && "Applying incorrect handler" ) ? void (0) : __assert_fail ("appliesTo(*E) && \"Applying incorrect handler\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 790, __extension__ __PRETTY_FUNCTION__)); |
791 | std::unique_ptr<ErrT> SubE(static_cast<ErrT *>(E.release())); |
792 | return H(std::move(SubE)); |
793 | } |
794 | }; |
795 | |
796 | /// Specialization for functions of the form 'void (std::unique_ptr<ErrT>)'. |
797 | template <typename ErrT> |
798 | class ErrorHandlerTraits<void (&)(std::unique_ptr<ErrT>)> { |
799 | public: |
800 | static bool appliesTo(const ErrorInfoBase &E) { |
801 | return E.template isA<ErrT>(); |
802 | } |
803 | |
804 | template <typename HandlerT> |
805 | static Error apply(HandlerT &&H, std::unique_ptr<ErrorInfoBase> E) { |
806 | assert(appliesTo(*E) && "Applying incorrect handler")(static_cast <bool> (appliesTo(*E) && "Applying incorrect handler" ) ? void (0) : __assert_fail ("appliesTo(*E) && \"Applying incorrect handler\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/Support/Error.h" , 806, __extension__ __PRETTY_FUNCTION__)); |
807 | std::unique_ptr<ErrT> SubE(static_cast<ErrT *>(E.release())); |
808 | H(std::move(SubE)); |
809 | return Error::success(); |
810 | } |
811 | }; |
812 | |
813 | // Specialization for member functions of the form 'RetT (const ErrT&)'. |
814 | template <typename C, typename RetT, typename ErrT> |
815 | class ErrorHandlerTraits<RetT (C::*)(ErrT &)> |
816 | : public ErrorHandlerTraits<RetT (&)(ErrT &)> {}; |
817 | |
818 | // Specialization for member functions of the form 'RetT (const ErrT&) const'. |
819 | template <typename C, typename RetT, typename ErrT> |
820 | class ErrorHandlerTraits<RetT (C::*)(ErrT &) const> |
821 | : public ErrorHandlerTraits<RetT (&)(ErrT &)> {}; |
822 | |
823 | // Specialization for member functions of the form 'RetT (const ErrT&)'. |
824 | template <typename C, typename RetT, typename ErrT> |
825 | class ErrorHandlerTraits<RetT (C::*)(const ErrT &)> |
826 | : public ErrorHandlerTraits<RetT (&)(ErrT &)> {}; |
827 | |
828 | // Specialization for member functions of the form 'RetT (const ErrT&) const'. |
829 | template <typename C, typename RetT, typename ErrT> |
830 | class ErrorHandlerTraits<RetT (C::*)(const ErrT &) const> |
831 | : public ErrorHandlerTraits<RetT (&)(ErrT &)> {}; |
832 | |
833 | /// Specialization for member functions of the form |
834 | /// 'RetT (std::unique_ptr<ErrT>)'. |
835 | template <typename C, typename RetT, typename ErrT> |
836 | class ErrorHandlerTraits<RetT (C::*)(std::unique_ptr<ErrT>)> |
837 | : public ErrorHandlerTraits<RetT (&)(std::unique_ptr<ErrT>)> {}; |
838 | |
839 | /// Specialization for member functions of the form |
840 | /// 'RetT (std::unique_ptr<ErrT>) const'. |
841 | template <typename C, typename RetT, typename ErrT> |
842 | class ErrorHandlerTraits<RetT (C::*)(std::unique_ptr<ErrT>) const> |
843 | : public ErrorHandlerTraits<RetT (&)(std::unique_ptr<ErrT>)> {}; |
844 | |
845 | inline Error handleErrorImpl(std::unique_ptr<ErrorInfoBase> Payload) { |
846 | return Error(std::move(Payload)); |
847 | } |
848 | |
849 | template <typename HandlerT, typename... HandlerTs> |
850 | Error handleErrorImpl(std::unique_ptr<ErrorInfoBase> Payload, |
851 | HandlerT &&Handler, HandlerTs &&... Handlers) { |
852 | if (ErrorHandlerTraits<HandlerT>::appliesTo(*Payload)) |
853 | return ErrorHandlerTraits<HandlerT>::apply(std::forward<HandlerT>(Handler), |
854 | std::move(Payload)); |
855 | return handleErrorImpl(std::move(Payload), |
856 | std::forward<HandlerTs>(Handlers)...); |
857 | } |
858 | |
859 | /// Pass the ErrorInfo(s) contained in E to their respective handlers. Any |
860 | /// unhandled errors (or Errors returned by handlers) are re-concatenated and |
861 | /// returned. |
862 | /// Because this function returns an error, its result must also be checked |
863 | /// or returned. If you intend to handle all errors use handleAllErrors |
864 | /// (which returns void, and will abort() on unhandled errors) instead. |
865 | template <typename... HandlerTs> |
866 | Error handleErrors(Error E, HandlerTs &&... Hs) { |
867 | if (!E) |
868 | return Error::success(); |
869 | |
870 | std::unique_ptr<ErrorInfoBase> Payload = E.takePayload(); |
871 | |
872 | if (Payload->isA<ErrorList>()) { |
873 | ErrorList &List = static_cast<ErrorList &>(*Payload); |
874 | Error R; |
875 | for (auto &P : List.Payloads) |
876 | R = ErrorList::join( |
877 | std::move(R), |
878 | handleErrorImpl(std::move(P), std::forward<HandlerTs>(Hs)...)); |
879 | return R; |
880 | } |
881 | |
882 | return handleErrorImpl(std::move(Payload), std::forward<HandlerTs>(Hs)...); |
883 | } |
884 | |
885 | /// Behaves the same as handleErrors, except that it requires that all |
886 | /// errors be handled by the given handlers. If any unhandled error remains |
887 | /// after the handlers have run, report_fatal_error() will be called. |
888 | template <typename... HandlerTs> |
889 | void handleAllErrors(Error E, HandlerTs &&... Handlers) { |
890 | cantFail(handleErrors(std::move(E), std::forward<HandlerTs>(Handlers)...)); |
891 | } |
892 | |
893 | /// Check that E is a non-error, then drop it. |
894 | /// If E is an error report_fatal_error will be called. |
895 | inline void handleAllErrors(Error E) { |
896 | cantFail(std::move(E)); |
897 | } |
898 | |
899 | /// Handle any errors (if present) in an Expected<T>, then try a recovery path. |
900 | /// |
901 | /// If the incoming value is a success value it is returned unmodified. If it |
902 | /// is a failure value then it the contained error is passed to handleErrors. |
903 | /// If handleErrors is able to handle the error then the RecoveryPath functor |
904 | /// is called to supply the final result. If handleErrors is not able to |
905 | /// handle all errors then the unhandled errors are returned. |
906 | /// |
907 | /// This utility enables the follow pattern: |
908 | /// |
909 | /// @code{.cpp} |
910 | /// enum FooStrategy { Aggressive, Conservative }; |
911 | /// Expected<Foo> foo(FooStrategy S); |
912 | /// |
913 | /// auto ResultOrErr = |
914 | /// handleExpected( |
915 | /// foo(Aggressive), |
916 | /// []() { return foo(Conservative); }, |
917 | /// [](AggressiveStrategyError&) { |
918 | /// // Implicitly conusme this - we'll recover by using a conservative |
919 | /// // strategy. |
920 | /// }); |
921 | /// |
922 | /// @endcode |
923 | template <typename T, typename RecoveryFtor, typename... HandlerTs> |
924 | Expected<T> handleExpected(Expected<T> ValOrErr, RecoveryFtor &&RecoveryPath, |
925 | HandlerTs &&... Handlers) { |
926 | if (ValOrErr) |
927 | return ValOrErr; |
928 | |
929 | if (auto Err = handleErrors(ValOrErr.takeError(), |
930 | std::forward<HandlerTs>(Handlers)...)) |
931 | return std::move(Err); |
932 | |
933 | return RecoveryPath(); |
934 | } |
935 | |
936 | /// Log all errors (if any) in E to OS. If there are any errors, ErrorBanner |
937 | /// will be printed before the first one is logged. A newline will be printed |
938 | /// after each error. |
939 | /// |
940 | /// This is useful in the base level of your program to allow clean termination |
941 | /// (allowing clean deallocation of resources, etc.), while reporting error |
942 | /// information to the user. |
943 | void logAllUnhandledErrors(Error E, raw_ostream &OS, Twine ErrorBanner); |
944 | |
945 | /// Write all error messages (if any) in E to a string. The newline character |
946 | /// is used to separate error messages. |
947 | inline std::string toString(Error E) { |
948 | SmallVector<std::string, 2> Errors; |
949 | handleAllErrors(std::move(E), [&Errors](const ErrorInfoBase &EI) { |
950 | Errors.push_back(EI.message()); |
951 | }); |
952 | return join(Errors.begin(), Errors.end(), "\n"); |
953 | } |
954 | |
955 | /// Consume a Error without doing anything. This method should be used |
956 | /// only where an error can be considered a reasonable and expected return |
957 | /// value. |
958 | /// |
959 | /// Uses of this method are potentially indicative of design problems: If it's |
960 | /// legitimate to do nothing while processing an "error", the error-producer |
961 | /// might be more clearly refactored to return an Optional<T>. |
962 | inline void consumeError(Error Err) { |
963 | handleAllErrors(std::move(Err), [](const ErrorInfoBase &) {}); |
964 | } |
965 | |
966 | /// Helper for converting an Error to a bool. |
967 | /// |
968 | /// This method returns true if Err is in an error state, or false if it is |
969 | /// in a success state. Puts Err in a checked state in both cases (unlike |
970 | /// Error::operator bool(), which only does this for success states). |
971 | inline bool errorToBool(Error Err) { |
972 | bool IsError = static_cast<bool>(Err); |
973 | if (IsError) |
974 | consumeError(std::move(Err)); |
975 | return IsError; |
976 | } |
977 | |
978 | /// Helper for Errors used as out-parameters. |
979 | /// |
980 | /// This helper is for use with the Error-as-out-parameter idiom, where an error |
981 | /// is passed to a function or method by reference, rather than being returned. |
982 | /// In such cases it is helpful to set the checked bit on entry to the function |
983 | /// so that the error can be written to (unchecked Errors abort on assignment) |
984 | /// and clear the checked bit on exit so that clients cannot accidentally forget |
985 | /// to check the result. This helper performs these actions automatically using |
986 | /// RAII: |
987 | /// |
988 | /// @code{.cpp} |
989 | /// Result foo(Error &Err) { |
990 | /// ErrorAsOutParameter ErrAsOutParam(&Err); // 'Checked' flag set |
991 | /// // <body of foo> |
992 | /// // <- 'Checked' flag auto-cleared when ErrAsOutParam is destructed. |
993 | /// } |
994 | /// @endcode |
995 | /// |
996 | /// ErrorAsOutParameter takes an Error* rather than Error& so that it can be |
997 | /// used with optional Errors (Error pointers that are allowed to be null). If |
998 | /// ErrorAsOutParameter took an Error reference, an instance would have to be |
999 | /// created inside every condition that verified that Error was non-null. By |
1000 | /// taking an Error pointer we can just create one instance at the top of the |
1001 | /// function. |
1002 | class ErrorAsOutParameter { |
1003 | public: |
1004 | ErrorAsOutParameter(Error *Err) : Err(Err) { |
1005 | // Raise the checked bit if Err is success. |
1006 | if (Err) |
1007 | (void)!!*Err; |
1008 | } |
1009 | |
1010 | ~ErrorAsOutParameter() { |
1011 | // Clear the checked bit. |
1012 | if (Err && !*Err) |
1013 | *Err = Error::success(); |
1014 | } |
1015 | |
1016 | private: |
1017 | Error *Err; |
1018 | }; |
1019 | |
1020 | /// Helper for Expected<T>s used as out-parameters. |
1021 | /// |
1022 | /// See ErrorAsOutParameter. |
1023 | template <typename T> |
1024 | class ExpectedAsOutParameter { |
1025 | public: |
1026 | ExpectedAsOutParameter(Expected<T> *ValOrErr) |
1027 | : ValOrErr(ValOrErr) { |
1028 | if (ValOrErr) |
1029 | (void)!!*ValOrErr; |
1030 | } |
1031 | |
1032 | ~ExpectedAsOutParameter() { |
1033 | if (ValOrErr) |
1034 | ValOrErr->setUnchecked(); |
1035 | } |
1036 | |
1037 | private: |
1038 | Expected<T> *ValOrErr; |
1039 | }; |
1040 | |
1041 | /// This class wraps a std::error_code in a Error. |
1042 | /// |
1043 | /// This is useful if you're writing an interface that returns a Error |
1044 | /// (or Expected) and you want to call code that still returns |
1045 | /// std::error_codes. |
1046 | class ECError : public ErrorInfo<ECError> { |
1047 | friend Error errorCodeToError(std::error_code); |
1048 | |
1049 | public: |
1050 | void setErrorCode(std::error_code EC) { this->EC = EC; } |
1051 | std::error_code convertToErrorCode() const override { return EC; } |
1052 | void log(raw_ostream &OS) const override { OS << EC.message(); } |
1053 | |
1054 | // Used by ErrorInfo::classID. |
1055 | static char ID; |
1056 | |
1057 | protected: |
1058 | ECError() = default; |
1059 | ECError(std::error_code EC) : EC(EC) {} |
1060 | |
1061 | std::error_code EC; |
1062 | }; |
1063 | |
1064 | /// The value returned by this function can be returned from convertToErrorCode |
1065 | /// for Error values where no sensible translation to std::error_code exists. |
1066 | /// It should only be used in this situation, and should never be used where a |
1067 | /// sensible conversion to std::error_code is available, as attempts to convert |
1068 | /// to/from this error will result in a fatal error. (i.e. it is a programmatic |
1069 | ///error to try to convert such a value). |
1070 | std::error_code inconvertibleErrorCode(); |
1071 | |
1072 | /// Helper for converting an std::error_code to a Error. |
1073 | Error errorCodeToError(std::error_code EC); |
1074 | |
1075 | /// Helper for converting an ECError to a std::error_code. |
1076 | /// |
1077 | /// This method requires that Err be Error() or an ECError, otherwise it |
1078 | /// will trigger a call to abort(). |
1079 | std::error_code errorToErrorCode(Error Err); |
1080 | |
1081 | /// Convert an ErrorOr<T> to an Expected<T>. |
1082 | template <typename T> Expected<T> errorOrToExpected(ErrorOr<T> &&EO) { |
1083 | if (auto EC = EO.getError()) |
1084 | return errorCodeToError(EC); |
1085 | return std::move(*EO); |
1086 | } |
1087 | |
1088 | /// Convert an Expected<T> to an ErrorOr<T>. |
1089 | template <typename T> ErrorOr<T> expectedToErrorOr(Expected<T> &&E) { |
1090 | if (auto Err = E.takeError()) |
1091 | return errorToErrorCode(std::move(Err)); |
1092 | return std::move(*E); |
1093 | } |
1094 | |
1095 | /// This class wraps a string in an Error. |
1096 | /// |
1097 | /// StringError is useful in cases where the client is not expected to be able |
1098 | /// to consume the specific error message programmatically (for example, if the |
1099 | /// error message is to be presented to the user). |
1100 | class StringError : public ErrorInfo<StringError> { |
1101 | public: |
1102 | static char ID; |
1103 | |
1104 | StringError(const Twine &S, std::error_code EC); |
1105 | |
1106 | void log(raw_ostream &OS) const override; |
1107 | std::error_code convertToErrorCode() const override; |
1108 | |
1109 | const std::string &getMessage() const { return Msg; } |
1110 | |
1111 | private: |
1112 | std::string Msg; |
1113 | std::error_code EC; |
1114 | }; |
1115 | |
1116 | /// Helper for check-and-exit error handling. |
1117 | /// |
1118 | /// For tool use only. NOT FOR USE IN LIBRARY CODE. |
1119 | /// |
1120 | class ExitOnError { |
1121 | public: |
1122 | /// Create an error on exit helper. |
1123 | ExitOnError(std::string Banner = "", int DefaultErrorExitCode = 1) |
1124 | : Banner(std::move(Banner)), |
1125 | GetExitCode([=](const Error &) { return DefaultErrorExitCode; }) {} |
1126 | |
1127 | /// Set the banner string for any errors caught by operator(). |
1128 | void setBanner(std::string Banner) { this->Banner = std::move(Banner); } |
1129 | |
1130 | /// Set the exit-code mapper function. |
1131 | void setExitCodeMapper(std::function<int(const Error &)> GetExitCode) { |
1132 | this->GetExitCode = std::move(GetExitCode); |
1133 | } |
1134 | |
1135 | /// Check Err. If it's in a failure state log the error(s) and exit. |
1136 | void operator()(Error Err) const { checkError(std::move(Err)); } |
1137 | |
1138 | /// Check E. If it's in a success state then return the contained value. If |
1139 | /// it's in a failure state log the error(s) and exit. |
1140 | template <typename T> T operator()(Expected<T> &&E) const { |
1141 | checkError(E.takeError()); |
1142 | return std::move(*E); |
1143 | } |
1144 | |
1145 | /// Check E. If it's in a success state then return the contained reference. If |
1146 | /// it's in a failure state log the error(s) and exit. |
1147 | template <typename T> T& operator()(Expected<T&> &&E) const { |
1148 | checkError(E.takeError()); |
1149 | return *E; |
1150 | } |
1151 | |
1152 | private: |
1153 | void checkError(Error Err) const { |
1154 | if (Err) { |
1155 | int ExitCode = GetExitCode(Err); |
1156 | logAllUnhandledErrors(std::move(Err), errs(), Banner); |
1157 | exit(ExitCode); |
1158 | } |
1159 | } |
1160 | |
1161 | std::string Banner; |
1162 | std::function<int(const Error &)> GetExitCode; |
1163 | }; |
1164 | |
1165 | } // end namespace llvm |
1166 | |
1167 | #endif // LLVM_SUPPORT_ERROR_H |
1 | //===- llvm/ADT/STLExtras.h - Useful STL related functions ------*- C++ -*-===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file contains some templates that are useful if you are working with the |
11 | // STL at all. |
12 | // |
13 | // No library is required when using these functions. |
14 | // |
15 | //===----------------------------------------------------------------------===// |
16 | |
17 | #ifndef LLVM_ADT_STLEXTRAS_H |
18 | #define LLVM_ADT_STLEXTRAS_H |
19 | |
20 | #include "llvm/ADT/Optional.h" |
21 | #include "llvm/ADT/SmallVector.h" |
22 | #include "llvm/ADT/iterator.h" |
23 | #include "llvm/ADT/iterator_range.h" |
24 | #include "llvm/Support/ErrorHandling.h" |
25 | #include <algorithm> |
26 | #include <cassert> |
27 | #include <cstddef> |
28 | #include <cstdint> |
29 | #include <cstdlib> |
30 | #include <functional> |
31 | #include <initializer_list> |
32 | #include <iterator> |
33 | #include <limits> |
34 | #include <memory> |
35 | #include <tuple> |
36 | #include <type_traits> |
37 | #include <utility> |
38 | |
39 | namespace llvm { |
40 | |
41 | // Only used by compiler if both template types are the same. Useful when |
42 | // using SFINAE to test for the existence of member functions. |
43 | template <typename T, T> struct SameType; |
44 | |
45 | namespace detail { |
46 | |
47 | template <typename RangeT> |
48 | using IterOfRange = decltype(std::begin(std::declval<RangeT &>())); |
49 | |
50 | template <typename RangeT> |
51 | using ValueOfRange = typename std::remove_reference<decltype( |
52 | *std::begin(std::declval<RangeT &>()))>::type; |
53 | |
54 | } // end namespace detail |
55 | |
56 | //===----------------------------------------------------------------------===// |
57 | // Extra additions to <functional> |
58 | //===----------------------------------------------------------------------===// |
59 | |
60 | template <class Ty> struct identity { |
61 | using argument_type = Ty; |
62 | |
63 | Ty &operator()(Ty &self) const { |
64 | return self; |
65 | } |
66 | const Ty &operator()(const Ty &self) const { |
67 | return self; |
68 | } |
69 | }; |
70 | |
71 | template <class Ty> struct less_ptr { |
72 | bool operator()(const Ty* left, const Ty* right) const { |
73 | return *left < *right; |
74 | } |
75 | }; |
76 | |
77 | template <class Ty> struct greater_ptr { |
78 | bool operator()(const Ty* left, const Ty* right) const { |
79 | return *right < *left; |
80 | } |
81 | }; |
82 | |
83 | /// An efficient, type-erasing, non-owning reference to a callable. This is |
84 | /// intended for use as the type of a function parameter that is not used |
85 | /// after the function in question returns. |
86 | /// |
87 | /// This class does not own the callable, so it is not in general safe to store |
88 | /// a function_ref. |
89 | template<typename Fn> class function_ref; |
90 | |
91 | template<typename Ret, typename ...Params> |
92 | class function_ref<Ret(Params...)> { |
93 | Ret (*callback)(intptr_t callable, Params ...params) = nullptr; |
94 | intptr_t callable; |
95 | |
96 | template<typename Callable> |
97 | static Ret callback_fn(intptr_t callable, Params ...params) { |
98 | return (*reinterpret_cast<Callable*>(callable))( |
99 | std::forward<Params>(params)...); |
100 | } |
101 | |
102 | public: |
103 | function_ref() = default; |
104 | function_ref(std::nullptr_t) {} |
105 | |
106 | template <typename Callable> |
107 | function_ref(Callable &&callable, |
108 | typename std::enable_if< |
109 | !std::is_same<typename std::remove_reference<Callable>::type, |
110 | function_ref>::value>::type * = nullptr) |
111 | : callback(callback_fn<typename std::remove_reference<Callable>::type>), |
112 | callable(reinterpret_cast<intptr_t>(&callable)) {} |
113 | |
114 | Ret operator()(Params ...params) const { |
115 | return callback(callable, std::forward<Params>(params)...); |
116 | } |
117 | |
118 | operator bool() const { return callback; } |
119 | }; |
120 | |
121 | // deleter - Very very very simple method that is used to invoke operator |
122 | // delete on something. It is used like this: |
123 | // |
124 | // for_each(V.begin(), B.end(), deleter<Interval>); |
125 | template <class T> |
126 | inline void deleter(T *Ptr) { |
127 | delete Ptr; |
128 | } |
129 | |
130 | //===----------------------------------------------------------------------===// |
131 | // Extra additions to <iterator> |
132 | //===----------------------------------------------------------------------===// |
133 | |
134 | namespace adl_detail { |
135 | |
136 | using std::begin; |
137 | |
138 | template <typename ContainerTy> |
139 | auto adl_begin(ContainerTy &&container) |
140 | -> decltype(begin(std::forward<ContainerTy>(container))) { |
141 | return begin(std::forward<ContainerTy>(container)); |
142 | } |
143 | |
144 | using std::end; |
145 | |
146 | template <typename ContainerTy> |
147 | auto adl_end(ContainerTy &&container) |
148 | -> decltype(end(std::forward<ContainerTy>(container))) { |
149 | return end(std::forward<ContainerTy>(container)); |
150 | } |
151 | |
152 | using std::swap; |
153 | |
154 | template <typename T> |
155 | void adl_swap(T &&lhs, T &&rhs) noexcept(noexcept(swap(std::declval<T>(), |
156 | std::declval<T>()))) { |
157 | swap(std::forward<T>(lhs), std::forward<T>(rhs)); |
158 | } |
159 | |
160 | } // end namespace adl_detail |
161 | |
162 | template <typename ContainerTy> |
163 | auto adl_begin(ContainerTy &&container) |
164 | -> decltype(adl_detail::adl_begin(std::forward<ContainerTy>(container))) { |
165 | return adl_detail::adl_begin(std::forward<ContainerTy>(container)); |
166 | } |
167 | |
168 | template <typename ContainerTy> |
169 | auto adl_end(ContainerTy &&container) |
170 | -> decltype(adl_detail::adl_end(std::forward<ContainerTy>(container))) { |
171 | return adl_detail::adl_end(std::forward<ContainerTy>(container)); |
172 | } |
173 | |
174 | template <typename T> |
175 | void adl_swap(T &&lhs, T &&rhs) noexcept( |
176 | noexcept(adl_detail::adl_swap(std::declval<T>(), std::declval<T>()))) { |
177 | adl_detail::adl_swap(std::forward<T>(lhs), std::forward<T>(rhs)); |
178 | } |
179 | |
180 | // mapped_iterator - This is a simple iterator adapter that causes a function to |
181 | // be applied whenever operator* is invoked on the iterator. |
182 | |
183 | template <typename ItTy, typename FuncTy, |
184 | typename FuncReturnTy = |
185 | decltype(std::declval<FuncTy>()(*std::declval<ItTy>()))> |
186 | class mapped_iterator |
187 | : public iterator_adaptor_base< |
188 | mapped_iterator<ItTy, FuncTy>, ItTy, |
189 | typename std::iterator_traits<ItTy>::iterator_category, |
190 | typename std::remove_reference<FuncReturnTy>::type> { |
191 | public: |
192 | mapped_iterator(ItTy U, FuncTy F) |
193 | : mapped_iterator::iterator_adaptor_base(std::move(U)), F(std::move(F)) {} |
194 | |
195 | ItTy getCurrent() { return this->I; } |
196 | |
197 | FuncReturnTy operator*() { return F(*this->I); } |
198 | |
199 | private: |
200 | FuncTy F; |
201 | }; |
202 | |
203 | // map_iterator - Provide a convenient way to create mapped_iterators, just like |
204 | // make_pair is useful for creating pairs... |
205 | template <class ItTy, class FuncTy> |
206 | inline mapped_iterator<ItTy, FuncTy> map_iterator(ItTy I, FuncTy F) { |
207 | return mapped_iterator<ItTy, FuncTy>(std::move(I), std::move(F)); |
208 | } |
209 | |
210 | /// Helper to determine if type T has a member called rbegin(). |
211 | template <typename Ty> class has_rbegin_impl { |
212 | using yes = char[1]; |
213 | using no = char[2]; |
214 | |
215 | template <typename Inner> |
216 | static yes& test(Inner *I, decltype(I->rbegin()) * = nullptr); |
217 | |
218 | template <typename> |
219 | static no& test(...); |
220 | |
221 | public: |
222 | static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes); |
223 | }; |
224 | |
225 | /// Metafunction to determine if T& or T has a member called rbegin(). |
226 | template <typename Ty> |
227 | struct has_rbegin : has_rbegin_impl<typename std::remove_reference<Ty>::type> { |
228 | }; |
229 | |
230 | // Returns an iterator_range over the given container which iterates in reverse. |
231 | // Note that the container must have rbegin()/rend() methods for this to work. |
232 | template <typename ContainerTy> |
233 | auto reverse(ContainerTy &&C, |
234 | typename std::enable_if<has_rbegin<ContainerTy>::value>::type * = |
235 | nullptr) -> decltype(make_range(C.rbegin(), C.rend())) { |
236 | return make_range(C.rbegin(), C.rend()); |
237 | } |
238 | |
239 | // Returns a std::reverse_iterator wrapped around the given iterator. |
240 | template <typename IteratorTy> |
241 | std::reverse_iterator<IteratorTy> make_reverse_iterator(IteratorTy It) { |
242 | return std::reverse_iterator<IteratorTy>(It); |
243 | } |
244 | |
245 | // Returns an iterator_range over the given container which iterates in reverse. |
246 | // Note that the container must have begin()/end() methods which return |
247 | // bidirectional iterators for this to work. |
248 | template <typename ContainerTy> |
249 | auto reverse( |
250 | ContainerTy &&C, |
251 | typename std::enable_if<!has_rbegin<ContainerTy>::value>::type * = nullptr) |
252 | -> decltype(make_range(llvm::make_reverse_iterator(std::end(C)), |
253 | llvm::make_reverse_iterator(std::begin(C)))) { |
254 | return make_range(llvm::make_reverse_iterator(std::end(C)), |
255 | llvm::make_reverse_iterator(std::begin(C))); |
256 | } |
257 | |
258 | /// An iterator adaptor that filters the elements of given inner iterators. |
259 | /// |
260 | /// The predicate parameter should be a callable object that accepts the wrapped |
261 | /// iterator's reference type and returns a bool. When incrementing or |
262 | /// decrementing the iterator, it will call the predicate on each element and |
263 | /// skip any where it returns false. |
264 | /// |
265 | /// \code |
266 | /// int A[] = { 1, 2, 3, 4 }; |
267 | /// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; }); |
268 | /// // R contains { 1, 3 }. |
269 | /// \endcode |
270 | template <typename WrappedIteratorT, typename PredicateT> |
271 | class filter_iterator |
272 | : public iterator_adaptor_base< |
273 | filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT, |
274 | typename std::common_type< |
275 | std::forward_iterator_tag, |
276 | typename std::iterator_traits< |
277 | WrappedIteratorT>::iterator_category>::type> { |
278 | using BaseT = iterator_adaptor_base< |
279 | filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT, |
280 | typename std::common_type< |
281 | std::forward_iterator_tag, |
282 | typename std::iterator_traits<WrappedIteratorT>::iterator_category>:: |
283 | type>; |
284 | |
285 | struct PayloadType { |
286 | WrappedIteratorT End; |
287 | PredicateT Pred; |
288 | }; |
289 | |
290 | Optional<PayloadType> Payload; |
291 | |
292 | void findNextValid() { |
293 | assert(Payload && "Payload should be engaged when findNextValid is called")(static_cast <bool> (Payload && "Payload should be engaged when findNextValid is called" ) ? void (0) : __assert_fail ("Payload && \"Payload should be engaged when findNextValid is called\"" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/STLExtras.h" , 293, __extension__ __PRETTY_FUNCTION__)); |
294 | while (this->I != Payload->End && !Payload->Pred(*this->I)) |
295 | BaseT::operator++(); |
296 | } |
297 | |
298 | // Construct the begin iterator. The begin iterator requires to know where end |
299 | // is, so that it can properly stop when it hits end. |
300 | filter_iterator(WrappedIteratorT Begin, WrappedIteratorT End, PredicateT Pred) |
301 | : BaseT(std::move(Begin)), |
302 | Payload(PayloadType{std::move(End), std::move(Pred)}) { |
303 | findNextValid(); |
304 | } |
305 | |
306 | // Construct the end iterator. It's not incrementable, so Payload doesn't |
307 | // have to be engaged. |
308 | filter_iterator(WrappedIteratorT End) : BaseT(End) {} |
309 | |
310 | public: |
311 | using BaseT::operator++; |
312 | |
313 | filter_iterator &operator++() { |
314 | BaseT::operator++(); |
315 | findNextValid(); |
316 | return *this; |
317 | } |
318 | |
319 | template <typename RT, typename PT> |
320 | friend iterator_range<filter_iterator<detail::IterOfRange<RT>, PT>> |
321 | make_filter_range(RT &&, PT); |
322 | }; |
323 | |
324 | /// Convenience function that takes a range of elements and a predicate, |
325 | /// and return a new filter_iterator range. |
326 | /// |
327 | /// FIXME: Currently if RangeT && is a rvalue reference to a temporary, the |
328 | /// lifetime of that temporary is not kept by the returned range object, and the |
329 | /// temporary is going to be dropped on the floor after the make_iterator_range |
330 | /// full expression that contains this function call. |
331 | template <typename RangeT, typename PredicateT> |
332 | iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>> |
333 | make_filter_range(RangeT &&Range, PredicateT Pred) { |
334 | using FilterIteratorT = |
335 | filter_iterator<detail::IterOfRange<RangeT>, PredicateT>; |
336 | return make_range(FilterIteratorT(std::begin(std::forward<RangeT>(Range)), |
337 | std::end(std::forward<RangeT>(Range)), |
338 | std::move(Pred)), |
339 | FilterIteratorT(std::end(std::forward<RangeT>(Range)))); |
340 | } |
341 | |
342 | // forward declarations required by zip_shortest/zip_first |
343 | template <typename R, typename UnaryPredicate> |
344 | bool all_of(R &&range, UnaryPredicate P); |
345 | |
346 | template <size_t... I> struct index_sequence; |
347 | |
348 | template <class... Ts> struct index_sequence_for; |
349 | |
350 | namespace detail { |
351 | |
352 | using std::declval; |
353 | |
354 | // We have to alias this since inlining the actual type at the usage site |
355 | // in the parameter list of iterator_facade_base<> below ICEs MSVC 2017. |
356 | template<typename... Iters> struct ZipTupleType { |
357 | using type = std::tuple<decltype(*declval<Iters>())...>; |
358 | }; |
359 | |
360 | template <typename ZipType, typename... Iters> |
361 | using zip_traits = iterator_facade_base< |
362 | ZipType, typename std::common_type<std::bidirectional_iterator_tag, |
363 | typename std::iterator_traits< |
364 | Iters>::iterator_category...>::type, |
365 | // ^ TODO: Implement random access methods. |
366 | typename ZipTupleType<Iters...>::type, |
367 | typename std::iterator_traits<typename std::tuple_element< |
368 | 0, std::tuple<Iters...>>::type>::difference_type, |
369 | // ^ FIXME: This follows boost::make_zip_iterator's assumption that all |
370 | // inner iterators have the same difference_type. It would fail if, for |
371 | // instance, the second field's difference_type were non-numeric while the |
372 | // first is. |
373 | typename ZipTupleType<Iters...>::type *, |
374 | typename ZipTupleType<Iters...>::type>; |
375 | |
376 | template <typename ZipType, typename... Iters> |
377 | struct zip_common : public zip_traits<ZipType, Iters...> { |
378 | using Base = zip_traits<ZipType, Iters...>; |
379 | using value_type = typename Base::value_type; |
380 | |
381 | std::tuple<Iters...> iterators; |
382 | |
383 | protected: |
384 | template <size_t... Ns> value_type deref(index_sequence<Ns...>) const { |
385 | return value_type(*std::get<Ns>(iterators)...); |
386 | } |
387 | |
388 | template <size_t... Ns> |
389 | decltype(iterators) tup_inc(index_sequence<Ns...>) const { |
390 | return std::tuple<Iters...>(std::next(std::get<Ns>(iterators))...); |
391 | } |
392 | |
393 | template <size_t... Ns> |
394 | decltype(iterators) tup_dec(index_sequence<Ns...>) const { |
395 | return std::tuple<Iters...>(std::prev(std::get<Ns>(iterators))...); |
396 | } |
397 | |
398 | public: |
399 | zip_common(Iters &&... ts) : iterators(std::forward<Iters>(ts)...) {} |
400 | |
401 | value_type operator*() { return deref(index_sequence_for<Iters...>{}); } |
402 | |
403 | const value_type operator*() const { |
404 | return deref(index_sequence_for<Iters...>{}); |
405 | } |
406 | |
407 | ZipType &operator++() { |
408 | iterators = tup_inc(index_sequence_for<Iters...>{}); |
409 | return *reinterpret_cast<ZipType *>(this); |
410 | } |
411 | |
412 | ZipType &operator--() { |
413 | static_assert(Base::IsBidirectional, |
414 | "All inner iterators must be at least bidirectional."); |
415 | iterators = tup_dec(index_sequence_for<Iters...>{}); |
416 | return *reinterpret_cast<ZipType *>(this); |
417 | } |
418 | }; |
419 | |
420 | template <typename... Iters> |
421 | struct zip_first : public zip_common<zip_first<Iters...>, Iters...> { |
422 | using Base = zip_common<zip_first<Iters...>, Iters...>; |
423 | |
424 | bool operator==(const zip_first<Iters...> &other) const { |
425 | return std::get<0>(this->iterators) == std::get<0>(other.iterators); |
426 | } |
427 | |
428 | zip_first(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {} |
429 | }; |
430 | |
431 | template <typename... Iters> |
432 | class zip_shortest : public zip_common<zip_shortest<Iters...>, Iters...> { |
433 | template <size_t... Ns> |
434 | bool test(const zip_shortest<Iters...> &other, index_sequence<Ns...>) const { |
435 | return all_of(std::initializer_list<bool>{std::get<Ns>(this->iterators) != |
436 | std::get<Ns>(other.iterators)...}, |
437 | identity<bool>{}); |
438 | } |
439 | |
440 | public: |
441 | using Base = zip_common<zip_shortest<Iters...>, Iters...>; |
442 | |
443 | zip_shortest(Iters &&... ts) : Base(std::forward<Iters>(ts)...) {} |
444 | |
445 | bool operator==(const zip_shortest<Iters...> &other) const { |
446 | return !test(other, index_sequence_for<Iters...>{}); |
447 | } |
448 | }; |
449 | |
450 | template <template <typename...> class ItType, typename... Args> class zippy { |
451 | public: |
452 | using iterator = ItType<decltype(std::begin(std::declval<Args>()))...>; |
453 | using iterator_category = typename iterator::iterator_category; |
454 | using value_type = typename iterator::value_type; |
455 | using difference_type = typename iterator::difference_type; |
456 | using pointer = typename iterator::pointer; |
457 | using reference = typename iterator::reference; |
458 | |
459 | private: |
460 | std::tuple<Args...> ts; |
461 | |
462 | template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) const { |
463 | return iterator(std::begin(std::get<Ns>(ts))...); |
464 | } |
465 | template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) const { |
466 | return iterator(std::end(std::get<Ns>(ts))...); |
467 | } |
468 | |
469 | public: |
470 | zippy(Args &&... ts_) : ts(std::forward<Args>(ts_)...) {} |
471 | |
472 | iterator begin() const { return begin_impl(index_sequence_for<Args...>{}); } |
473 | iterator end() const { return end_impl(index_sequence_for<Args...>{}); } |
474 | }; |
475 | |
476 | } // end namespace detail |
477 | |
478 | /// zip iterator for two or more iteratable types. |
479 | template <typename T, typename U, typename... Args> |
480 | detail::zippy<detail::zip_shortest, T, U, Args...> zip(T &&t, U &&u, |
481 | Args &&... args) { |
482 | return detail::zippy<detail::zip_shortest, T, U, Args...>( |
483 | std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...); |
484 | } |
485 | |
486 | /// zip iterator that, for the sake of efficiency, assumes the first iteratee to |
487 | /// be the shortest. |
488 | template <typename T, typename U, typename... Args> |
489 | detail::zippy<detail::zip_first, T, U, Args...> zip_first(T &&t, U &&u, |
490 | Args &&... args) { |
491 | return detail::zippy<detail::zip_first, T, U, Args...>( |
492 | std::forward<T>(t), std::forward<U>(u), std::forward<Args>(args)...); |
493 | } |
494 | |
495 | /// Iterator wrapper that concatenates sequences together. |
496 | /// |
497 | /// This can concatenate different iterators, even with different types, into |
498 | /// a single iterator provided the value types of all the concatenated |
499 | /// iterators expose `reference` and `pointer` types that can be converted to |
500 | /// `ValueT &` and `ValueT *` respectively. It doesn't support more |
501 | /// interesting/customized pointer or reference types. |
502 | /// |
503 | /// Currently this only supports forward or higher iterator categories as |
504 | /// inputs and always exposes a forward iterator interface. |
505 | template <typename ValueT, typename... IterTs> |
506 | class concat_iterator |
507 | : public iterator_facade_base<concat_iterator<ValueT, IterTs...>, |
508 | std::forward_iterator_tag, ValueT> { |
509 | using BaseT = typename concat_iterator::iterator_facade_base; |
510 | |
511 | /// We store both the current and end iterators for each concatenated |
512 | /// sequence in a tuple of pairs. |
513 | /// |
514 | /// Note that something like iterator_range seems nice at first here, but the |
515 | /// range properties are of little benefit and end up getting in the way |
516 | /// because we need to do mutation on the current iterators. |
517 | std::tuple<std::pair<IterTs, IterTs>...> IterPairs; |
518 | |
519 | /// Attempts to increment a specific iterator. |
520 | /// |
521 | /// Returns true if it was able to increment the iterator. Returns false if |
522 | /// the iterator is already at the end iterator. |
523 | template <size_t Index> bool incrementHelper() { |
524 | auto &IterPair = std::get<Index>(IterPairs); |
525 | if (IterPair.first == IterPair.second) |
526 | return false; |
527 | |
528 | ++IterPair.first; |
529 | return true; |
530 | } |
531 | |
532 | /// Increments the first non-end iterator. |
533 | /// |
534 | /// It is an error to call this with all iterators at the end. |
535 | template <size_t... Ns> void increment(index_sequence<Ns...>) { |
536 | // Build a sequence of functions to increment each iterator if possible. |
537 | bool (concat_iterator::*IncrementHelperFns[])() = { |
538 | &concat_iterator::incrementHelper<Ns>...}; |
539 | |
540 | // Loop over them, and stop as soon as we succeed at incrementing one. |
541 | for (auto &IncrementHelperFn : IncrementHelperFns) |
542 | if ((this->*IncrementHelperFn)()) |
543 | return; |
544 | |
545 | llvm_unreachable("Attempted to increment an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to increment an end concat iterator!" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/STLExtras.h" , 545); |
546 | } |
547 | |
548 | /// Returns null if the specified iterator is at the end. Otherwise, |
549 | /// dereferences the iterator and returns the address of the resulting |
550 | /// reference. |
551 | template <size_t Index> ValueT *getHelper() const { |
552 | auto &IterPair = std::get<Index>(IterPairs); |
553 | if (IterPair.first == IterPair.second) |
554 | return nullptr; |
555 | |
556 | return &*IterPair.first; |
557 | } |
558 | |
559 | /// Finds the first non-end iterator, dereferences, and returns the resulting |
560 | /// reference. |
561 | /// |
562 | /// It is an error to call this with all iterators at the end. |
563 | template <size_t... Ns> ValueT &get(index_sequence<Ns...>) const { |
564 | // Build a sequence of functions to get from iterator if possible. |
565 | ValueT *(concat_iterator::*GetHelperFns[])() const = { |
566 | &concat_iterator::getHelper<Ns>...}; |
567 | |
568 | // Loop over them, and return the first result we find. |
569 | for (auto &GetHelperFn : GetHelperFns) |
570 | if (ValueT *P = (this->*GetHelperFn)()) |
571 | return *P; |
572 | |
573 | llvm_unreachable("Attempted to get a pointer from an end concat iterator!")::llvm::llvm_unreachable_internal("Attempted to get a pointer from an end concat iterator!" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/STLExtras.h" , 573); |
574 | } |
575 | |
576 | public: |
577 | /// Constructs an iterator from a squence of ranges. |
578 | /// |
579 | /// We need the full range to know how to switch between each of the |
580 | /// iterators. |
581 | template <typename... RangeTs> |
582 | explicit concat_iterator(RangeTs &&... Ranges) |
583 | : IterPairs({std::begin(Ranges), std::end(Ranges)}...) {} |
584 | |
585 | using BaseT::operator++; |
586 | |
587 | concat_iterator &operator++() { |
588 | increment(index_sequence_for<IterTs...>()); |
589 | return *this; |
590 | } |
591 | |
592 | ValueT &operator*() const { return get(index_sequence_for<IterTs...>()); } |
593 | |
594 | bool operator==(const concat_iterator &RHS) const { |
595 | return IterPairs == RHS.IterPairs; |
596 | } |
597 | }; |
598 | |
599 | namespace detail { |
600 | |
601 | /// Helper to store a sequence of ranges being concatenated and access them. |
602 | /// |
603 | /// This is designed to facilitate providing actual storage when temporaries |
604 | /// are passed into the constructor such that we can use it as part of range |
605 | /// based for loops. |
606 | template <typename ValueT, typename... RangeTs> class concat_range { |
607 | public: |
608 | using iterator = |
609 | concat_iterator<ValueT, |
610 | decltype(std::begin(std::declval<RangeTs &>()))...>; |
611 | |
612 | private: |
613 | std::tuple<RangeTs...> Ranges; |
614 | |
615 | template <size_t... Ns> iterator begin_impl(index_sequence<Ns...>) { |
616 | return iterator(std::get<Ns>(Ranges)...); |
617 | } |
618 | template <size_t... Ns> iterator end_impl(index_sequence<Ns...>) { |
619 | return iterator(make_range(std::end(std::get<Ns>(Ranges)), |
620 | std::end(std::get<Ns>(Ranges)))...); |
621 | } |
622 | |
623 | public: |
624 | concat_range(RangeTs &&... Ranges) |
625 | : Ranges(std::forward<RangeTs>(Ranges)...) {} |
626 | |
627 | iterator begin() { return begin_impl(index_sequence_for<RangeTs...>{}); } |
628 | iterator end() { return end_impl(index_sequence_for<RangeTs...>{}); } |
629 | }; |
630 | |
631 | } // end namespace detail |
632 | |
633 | /// Concatenated range across two or more ranges. |
634 | /// |
635 | /// The desired value type must be explicitly specified. |
636 | template <typename ValueT, typename... RangeTs> |
637 | detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) { |
638 | static_assert(sizeof...(RangeTs) > 1, |
639 | "Need more than one range to concatenate!"); |
640 | return detail::concat_range<ValueT, RangeTs...>( |
641 | std::forward<RangeTs>(Ranges)...); |
642 | } |
643 | |
644 | //===----------------------------------------------------------------------===// |
645 | // Extra additions to <utility> |
646 | //===----------------------------------------------------------------------===// |
647 | |
648 | /// \brief Function object to check whether the first component of a std::pair |
649 | /// compares less than the first component of another std::pair. |
650 | struct less_first { |
651 | template <typename T> bool operator()(const T &lhs, const T &rhs) const { |
652 | return lhs.first < rhs.first; |
653 | } |
654 | }; |
655 | |
656 | /// \brief Function object to check whether the second component of a std::pair |
657 | /// compares less than the second component of another std::pair. |
658 | struct less_second { |
659 | template <typename T> bool operator()(const T &lhs, const T &rhs) const { |
660 | return lhs.second < rhs.second; |
661 | } |
662 | }; |
663 | |
664 | // A subset of N3658. More stuff can be added as-needed. |
665 | |
666 | /// \brief Represents a compile-time sequence of integers. |
667 | template <class T, T... I> struct integer_sequence { |
668 | using value_type = T; |
669 | |
670 | static constexpr size_t size() { return sizeof...(I); } |
671 | }; |
672 | |
673 | /// \brief Alias for the common case of a sequence of size_ts. |
674 | template <size_t... I> |
675 | struct index_sequence : integer_sequence<std::size_t, I...> {}; |
676 | |
677 | template <std::size_t N, std::size_t... I> |
678 | struct build_index_impl : build_index_impl<N - 1, N - 1, I...> {}; |
679 | template <std::size_t... I> |
680 | struct build_index_impl<0, I...> : index_sequence<I...> {}; |
681 | |
682 | /// \brief Creates a compile-time integer sequence for a parameter pack. |
683 | template <class... Ts> |
684 | struct index_sequence_for : build_index_impl<sizeof...(Ts)> {}; |
685 | |
686 | /// Utility type to build an inheritance chain that makes it easy to rank |
687 | /// overload candidates. |
688 | template <int N> struct rank : rank<N - 1> {}; |
689 | template <> struct rank<0> {}; |
690 | |
691 | /// \brief traits class for checking whether type T is one of any of the given |
692 | /// types in the variadic list. |
693 | template <typename T, typename... Ts> struct is_one_of { |
694 | static const bool value = false; |
695 | }; |
696 | |
697 | template <typename T, typename U, typename... Ts> |
698 | struct is_one_of<T, U, Ts...> { |
699 | static const bool value = |
700 | std::is_same<T, U>::value || is_one_of<T, Ts...>::value; |
701 | }; |
702 | |
703 | /// \brief traits class for checking whether type T is a base class for all |
704 | /// the given types in the variadic list. |
705 | template <typename T, typename... Ts> struct are_base_of { |
706 | static const bool value = true; |
707 | }; |
708 | |
709 | template <typename T, typename U, typename... Ts> |
710 | struct are_base_of<T, U, Ts...> { |
711 | static const bool value = |
712 | std::is_base_of<T, U>::value && are_base_of<T, Ts...>::value; |
713 | }; |
714 | |
715 | //===----------------------------------------------------------------------===// |
716 | // Extra additions for arrays |
717 | //===----------------------------------------------------------------------===// |
718 | |
719 | /// Find the length of an array. |
720 | template <class T, std::size_t N> |
721 | constexpr inline size_t array_lengthof(T (&)[N]) { |
722 | return N; |
723 | } |
724 | |
725 | /// Adapt std::less<T> for array_pod_sort. |
726 | template<typename T> |
727 | inline int array_pod_sort_comparator(const void *P1, const void *P2) { |
728 | if (std::less<T>()(*reinterpret_cast<const T*>(P1), |
729 | *reinterpret_cast<const T*>(P2))) |
730 | return -1; |
731 | if (std::less<T>()(*reinterpret_cast<const T*>(P2), |
732 | *reinterpret_cast<const T*>(P1))) |
733 | return 1; |
734 | return 0; |
735 | } |
736 | |
737 | /// get_array_pod_sort_comparator - This is an internal helper function used to |
738 | /// get type deduction of T right. |
739 | template<typename T> |
740 | inline int (*get_array_pod_sort_comparator(const T &)) |
741 | (const void*, const void*) { |
742 | return array_pod_sort_comparator<T>; |
743 | } |
744 | |
745 | /// array_pod_sort - This sorts an array with the specified start and end |
746 | /// extent. This is just like std::sort, except that it calls qsort instead of |
747 | /// using an inlined template. qsort is slightly slower than std::sort, but |
748 | /// most sorts are not performance critical in LLVM and std::sort has to be |
749 | /// template instantiated for each type, leading to significant measured code |
750 | /// bloat. This function should generally be used instead of std::sort where |
751 | /// possible. |
752 | /// |
753 | /// This function assumes that you have simple POD-like types that can be |
754 | /// compared with std::less and can be moved with memcpy. If this isn't true, |
755 | /// you should use std::sort. |
756 | /// |
757 | /// NOTE: If qsort_r were portable, we could allow a custom comparator and |
758 | /// default to std::less. |
759 | template<class IteratorTy> |
760 | inline void array_pod_sort(IteratorTy Start, IteratorTy End) { |
761 | // Don't inefficiently call qsort with one element or trigger undefined |
762 | // behavior with an empty sequence. |
763 | auto NElts = End - Start; |
764 | if (NElts <= 1) return; |
765 | qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start)); |
766 | } |
767 | |
768 | template <class IteratorTy> |
769 | inline void array_pod_sort( |
770 | IteratorTy Start, IteratorTy End, |
771 | int (*Compare)( |
772 | const typename std::iterator_traits<IteratorTy>::value_type *, |
773 | const typename std::iterator_traits<IteratorTy>::value_type *)) { |
774 | // Don't inefficiently call qsort with one element or trigger undefined |
775 | // behavior with an empty sequence. |
776 | auto NElts = End - Start; |
777 | if (NElts <= 1) return; |
778 | qsort(&*Start, NElts, sizeof(*Start), |
779 | reinterpret_cast<int (*)(const void *, const void *)>(Compare)); |
780 | } |
781 | |
782 | //===----------------------------------------------------------------------===// |
783 | // Extra additions to <algorithm> |
784 | //===----------------------------------------------------------------------===// |
785 | |
786 | /// For a container of pointers, deletes the pointers and then clears the |
787 | /// container. |
788 | template<typename Container> |
789 | void DeleteContainerPointers(Container &C) { |
790 | for (auto V : C) |
791 | delete V; |
792 | C.clear(); |
793 | } |
794 | |
795 | /// In a container of pairs (usually a map) whose second element is a pointer, |
796 | /// deletes the second elements and then clears the container. |
797 | template<typename Container> |
798 | void DeleteContainerSeconds(Container &C) { |
799 | for (auto &V : C) |
800 | delete V.second; |
801 | C.clear(); |
802 | } |
803 | |
804 | /// Provide wrappers to std::for_each which take ranges instead of having to |
805 | /// pass begin/end explicitly. |
806 | template <typename R, typename UnaryPredicate> |
807 | UnaryPredicate for_each(R &&Range, UnaryPredicate P) { |
808 | return std::for_each(adl_begin(Range), adl_end(Range), P); |
809 | } |
810 | |
811 | /// Provide wrappers to std::all_of which take ranges instead of having to pass |
812 | /// begin/end explicitly. |
813 | template <typename R, typename UnaryPredicate> |
814 | bool all_of(R &&Range, UnaryPredicate P) { |
815 | return std::all_of(adl_begin(Range), adl_end(Range), P); |
816 | } |
817 | |
818 | /// Provide wrappers to std::any_of which take ranges instead of having to pass |
819 | /// begin/end explicitly. |
820 | template <typename R, typename UnaryPredicate> |
821 | bool any_of(R &&Range, UnaryPredicate P) { |
822 | return std::any_of(adl_begin(Range), adl_end(Range), P); |
823 | } |
824 | |
825 | /// Provide wrappers to std::none_of which take ranges instead of having to pass |
826 | /// begin/end explicitly. |
827 | template <typename R, typename UnaryPredicate> |
828 | bool none_of(R &&Range, UnaryPredicate P) { |
829 | return std::none_of(adl_begin(Range), adl_end(Range), P); |
830 | } |
831 | |
832 | /// Provide wrappers to std::find which take ranges instead of having to pass |
833 | /// begin/end explicitly. |
834 | template <typename R, typename T> |
835 | auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range)) { |
836 | return std::find(adl_begin(Range), adl_end(Range), Val); |
837 | } |
838 | |
839 | /// Provide wrappers to std::find_if which take ranges instead of having to pass |
840 | /// begin/end explicitly. |
841 | template <typename R, typename UnaryPredicate> |
842 | auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) { |
843 | return std::find_if(adl_begin(Range), adl_end(Range), P); |
844 | } |
845 | |
846 | template <typename R, typename UnaryPredicate> |
847 | auto find_if_not(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) { |
848 | return std::find_if_not(adl_begin(Range), adl_end(Range), P); |
849 | } |
850 | |
851 | /// Provide wrappers to std::remove_if which take ranges instead of having to |
852 | /// pass begin/end explicitly. |
853 | template <typename R, typename UnaryPredicate> |
854 | auto remove_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) { |
855 | return std::remove_if(adl_begin(Range), adl_end(Range), P); |
856 | } |
857 | |
858 | /// Provide wrappers to std::copy_if which take ranges instead of having to |
859 | /// pass begin/end explicitly. |
860 | template <typename R, typename OutputIt, typename UnaryPredicate> |
861 | OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) { |
862 | return std::copy_if(adl_begin(Range), adl_end(Range), Out, P); |
863 | } |
864 | |
865 | template <typename R, typename OutputIt> |
866 | OutputIt copy(R &&Range, OutputIt Out) { |
867 | return std::copy(adl_begin(Range), adl_end(Range), Out); |
868 | } |
869 | |
870 | /// Wrapper function around std::find to detect if an element exists |
871 | /// in a container. |
872 | template <typename R, typename E> |
873 | bool is_contained(R &&Range, const E &Element) { |
874 | return std::find(adl_begin(Range), adl_end(Range), Element) != adl_end(Range); |
875 | } |
876 | |
877 | /// Wrapper function around std::count to count the number of times an element |
878 | /// \p Element occurs in the given range \p Range. |
879 | template <typename R, typename E> |
880 | auto count(R &&Range, const E &Element) -> |
881 | typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type { |
882 | return std::count(adl_begin(Range), adl_end(Range), Element); |
883 | } |
884 | |
885 | /// Wrapper function around std::count_if to count the number of times an |
886 | /// element satisfying a given predicate occurs in a range. |
887 | template <typename R, typename UnaryPredicate> |
888 | auto count_if(R &&Range, UnaryPredicate P) -> |
889 | typename std::iterator_traits<decltype(adl_begin(Range))>::difference_type { |
890 | return std::count_if(adl_begin(Range), adl_end(Range), P); |
891 | } |
892 | |
893 | /// Wrapper function around std::transform to apply a function to a range and |
894 | /// store the result elsewhere. |
895 | template <typename R, typename OutputIt, typename UnaryPredicate> |
896 | OutputIt transform(R &&Range, OutputIt d_first, UnaryPredicate P) { |
897 | return std::transform(adl_begin(Range), adl_end(Range), d_first, P); |
898 | } |
899 | |
900 | /// Provide wrappers to std::partition which take ranges instead of having to |
901 | /// pass begin/end explicitly. |
902 | template <typename R, typename UnaryPredicate> |
903 | auto partition(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range)) { |
904 | return std::partition(adl_begin(Range), adl_end(Range), P); |
905 | } |
906 | |
907 | /// Provide wrappers to std::lower_bound which take ranges instead of having to |
908 | /// pass begin/end explicitly. |
909 | template <typename R, typename ForwardIt> |
910 | auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range)) { |
911 | return std::lower_bound(adl_begin(Range), adl_end(Range), I); |
912 | } |
913 | |
914 | /// \brief Given a range of type R, iterate the entire range and return a |
915 | /// SmallVector with elements of the vector. This is useful, for example, |
916 | /// when you want to iterate a range and then sort the results. |
917 | template <unsigned Size, typename R> |
918 | SmallVector<typename std::remove_const<detail::ValueOfRange<R>>::type, Size> |
919 | to_vector(R &&Range) { |
920 | return {adl_begin(Range), adl_end(Range)}; |
921 | } |
922 | |
923 | /// Provide a container algorithm similar to C++ Library Fundamentals v2's |
924 | /// `erase_if` which is equivalent to: |
925 | /// |
926 | /// C.erase(remove_if(C, pred), C.end()); |
927 | /// |
928 | /// This version works for any container with an erase method call accepting |
929 | /// two iterators. |
930 | template <typename Container, typename UnaryPredicate> |
931 | void erase_if(Container &C, UnaryPredicate P) { |
932 | C.erase(remove_if(C, P), C.end()); |
933 | } |
934 | |
935 | //===----------------------------------------------------------------------===// |
936 | // Extra additions to <memory> |
937 | //===----------------------------------------------------------------------===// |
938 | |
939 | // Implement make_unique according to N3656. |
940 | |
941 | /// \brief Constructs a `new T()` with the given args and returns a |
942 | /// `unique_ptr<T>` which owns the object. |
943 | /// |
944 | /// Example: |
945 | /// |
946 | /// auto p = make_unique<int>(); |
947 | /// auto p = make_unique<std::tuple<int, int>>(0, 1); |
948 | template <class T, class... Args> |
949 | typename std::enable_if<!std::is_array<T>::value, std::unique_ptr<T>>::type |
950 | make_unique(Args &&... args) { |
951 | return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); |
952 | } |
953 | |
954 | /// \brief Constructs a `new T[n]` with the given args and returns a |
955 | /// `unique_ptr<T[]>` which owns the object. |
956 | /// |
957 | /// \param n size of the new array. |
958 | /// |
959 | /// Example: |
960 | /// |
961 | /// auto p = make_unique<int[]>(2); // value-initializes the array with 0's. |
962 | template <class T> |
963 | typename std::enable_if<std::is_array<T>::value && std::extent<T>::value == 0, |
964 | std::unique_ptr<T>>::type |
965 | make_unique(size_t n) { |
966 | return std::unique_ptr<T>(new typename std::remove_extent<T>::type[n]()); |
967 | } |
968 | |
969 | /// This function isn't used and is only here to provide better compile errors. |
970 | template <class T, class... Args> |
971 | typename std::enable_if<std::extent<T>::value != 0>::type |
972 | make_unique(Args &&...) = delete; |
973 | |
974 | struct FreeDeleter { |
975 | void operator()(void* v) { |
976 | ::free(v); |
977 | } |
978 | }; |
979 | |
980 | template<typename First, typename Second> |
981 | struct pair_hash { |
982 | size_t operator()(const std::pair<First, Second> &P) const { |
983 | return std::hash<First>()(P.first) * 31 + std::hash<Second>()(P.second); |
984 | } |
985 | }; |
986 | |
987 | /// A functor like C++14's std::less<void> in its absence. |
988 | struct less { |
989 | template <typename A, typename B> bool operator()(A &&a, B &&b) const { |
990 | return std::forward<A>(a) < std::forward<B>(b); |
991 | } |
992 | }; |
993 | |
994 | /// A functor like C++14's std::equal<void> in its absence. |
995 | struct equal { |
996 | template <typename A, typename B> bool operator()(A &&a, B &&b) const { |
997 | return std::forward<A>(a) == std::forward<B>(b); |
998 | } |
999 | }; |
1000 | |
1001 | /// Binary functor that adapts to any other binary functor after dereferencing |
1002 | /// operands. |
1003 | template <typename T> struct deref { |
1004 | T func; |
1005 | |
1006 | // Could be further improved to cope with non-derivable functors and |
1007 | // non-binary functors (should be a variadic template member function |
1008 | // operator()). |
1009 | template <typename A, typename B> |
1010 | auto operator()(A &lhs, B &rhs) const -> decltype(func(*lhs, *rhs)) { |
1011 | assert(lhs)(static_cast <bool> (lhs) ? void (0) : __assert_fail ("lhs" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/STLExtras.h" , 1011, __extension__ __PRETTY_FUNCTION__)); |
1012 | assert(rhs)(static_cast <bool> (rhs) ? void (0) : __assert_fail ("rhs" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/STLExtras.h" , 1012, __extension__ __PRETTY_FUNCTION__)); |
1013 | return func(*lhs, *rhs); |
1014 | } |
1015 | }; |
1016 | |
1017 | namespace detail { |
1018 | |
1019 | template <typename R> class enumerator_iter; |
1020 | |
1021 | template <typename R> struct result_pair { |
1022 | friend class enumerator_iter<R>; |
1023 | |
1024 | result_pair() = default; |
1025 | result_pair(std::size_t Index, IterOfRange<R> Iter) |
1026 | : Index(Index), Iter(Iter) {} |
1027 | |
1028 | result_pair<R> &operator=(const result_pair<R> &Other) { |
1029 | Index = Other.Index; |
1030 | Iter = Other.Iter; |
1031 | return *this; |
1032 | } |
1033 | |
1034 | std::size_t index() const { return Index; } |
1035 | const ValueOfRange<R> &value() const { return *Iter; } |
1036 | ValueOfRange<R> &value() { return *Iter; } |
1037 | |
1038 | private: |
1039 | std::size_t Index = std::numeric_limits<std::size_t>::max(); |
1040 | IterOfRange<R> Iter; |
1041 | }; |
1042 | |
1043 | template <typename R> |
1044 | class enumerator_iter |
1045 | : public iterator_facade_base< |
1046 | enumerator_iter<R>, std::forward_iterator_tag, result_pair<R>, |
1047 | typename std::iterator_traits<IterOfRange<R>>::difference_type, |
1048 | typename std::iterator_traits<IterOfRange<R>>::pointer, |
1049 | typename std::iterator_traits<IterOfRange<R>>::reference> { |
1050 | using result_type = result_pair<R>; |
1051 | |
1052 | public: |
1053 | explicit enumerator_iter(IterOfRange<R> EndIter) |
1054 | : Result(std::numeric_limits<size_t>::max(), EndIter) {} |
1055 | |
1056 | enumerator_iter(std::size_t Index, IterOfRange<R> Iter) |
1057 | : Result(Index, Iter) {} |
1058 | |
1059 | result_type &operator*() { return Result; } |
1060 | const result_type &operator*() const { return Result; } |
1061 | |
1062 | enumerator_iter<R> &operator++() { |
1063 | assert(Result.Index != std::numeric_limits<size_t>::max())(static_cast <bool> (Result.Index != std::numeric_limits <size_t>::max()) ? void (0) : __assert_fail ("Result.Index != std::numeric_limits<size_t>::max()" , "/build/llvm-toolchain-snapshot-7~svn326246/include/llvm/ADT/STLExtras.h" , 1063, __extension__ __PRETTY_FUNCTION__)); |
1064 | ++Result.Iter; |
1065 | ++Result.Index; |
1066 | return *this; |
1067 | } |
1068 | |
1069 | bool operator==(const enumerator_iter<R> &RHS) const { |
1070 | // Don't compare indices here, only iterators. It's possible for an end |
1071 | // iterator to have different indices depending on whether it was created |
1072 | // by calling std::end() versus incrementing a valid iterator. |
1073 | return Result.Iter == RHS.Result.Iter; |
1074 | } |
1075 | |
1076 | enumerator_iter<R> &operator=(const enumerator_iter<R> &Other) { |
1077 | Result = Other.Result; |
1078 | return *this; |
1079 | } |
1080 | |
1081 | private: |
1082 | result_type Result; |
1083 | }; |
1084 | |
1085 | template <typename R> class enumerator { |
1086 | public: |
1087 | explicit enumerator(R &&Range) : TheRange(std::forward<R>(Range)) {} |
1088 | |
1089 | enumerator_iter<R> begin() { |
1090 | return enumerator_iter<R>(0, std::begin(TheRange)); |
1091 | } |
1092 | |
1093 | enumerator_iter<R> end() { |
1094 | return enumerator_iter<R>(std::end(TheRange)); |
1095 | } |
1096 | |
1097 | private: |
1098 | R TheRange; |
1099 | }; |
1100 | |
1101 | } // end namespace detail |
1102 | |
1103 | /// Given an input range, returns a new range whose values are are pair (A,B) |
1104 | /// such that A is the 0-based index of the item in the sequence, and B is |
1105 | /// the value from the original sequence. Example: |
1106 | /// |
1107 | /// std::vector<char> Items = {'A', 'B', 'C', 'D'}; |
1108 | /// for (auto X : enumerate(Items)) { |
1109 | /// printf("Item %d - %c\n", X.index(), X.value()); |
1110 | /// } |
1111 | /// |
1112 | /// Output: |
1113 | /// Item 0 - A |
1114 | /// Item 1 - B |
1115 | /// Item 2 - C |
1116 | /// Item 3 - D |
1117 | /// |
1118 | template <typename R> detail::enumerator<R> enumerate(R &&TheRange) { |
1119 | return detail::enumerator<R>(std::forward<R>(TheRange)); |
1120 | } |
1121 | |
1122 | namespace detail { |
1123 | |
1124 | template <typename F, typename Tuple, std::size_t... I> |
1125 | auto apply_tuple_impl(F &&f, Tuple &&t, index_sequence<I...>) |
1126 | -> decltype(std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...)) { |
1127 | return std::forward<F>(f)(std::get<I>(std::forward<Tuple>(t))...); |
1128 | } |
1129 | |
1130 | } // end namespace detail |
1131 | |
1132 | /// Given an input tuple (a1, a2, ..., an), pass the arguments of the |
1133 | /// tuple variadically to f as if by calling f(a1, a2, ..., an) and |
1134 | /// return the result. |
1135 | template <typename F, typename Tuple> |
1136 | auto apply_tuple(F &&f, Tuple &&t) -> decltype(detail::apply_tuple_impl( |
1137 | std::forward<F>(f), std::forward<Tuple>(t), |
1138 | build_index_impl< |
1139 | std::tuple_size<typename std::decay<Tuple>::type>::value>{})) { |
1140 | using Indices = build_index_impl< |
1141 | std::tuple_size<typename std::decay<Tuple>::type>::value>; |
1142 | |
1143 | return detail::apply_tuple_impl(std::forward<F>(f), std::forward<Tuple>(t), |
1144 | Indices{}); |
1145 | } |
1146 | |
1147 | } // end namespace llvm |
1148 | |
1149 | #endif // LLVM_ADT_STLEXTRAS_H |