Bug Summary

File:tools/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugLine.cpp
Warning:line 67, column 29
Use of memory after it is freed

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name DWARFDebugLine.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-eagerly-assume -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -mrelocation-model pic -pic-level 2 -mthread-model posix -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -momit-leaf-frame-pointer -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-7/lib/clang/7.0.0 -D HAVE_ROUND -D LLDB_CONFIGURATION_RELEASE -D LLDB_USE_BUILTIN_DEMANGLER -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/tools/lldb/source/Plugins/SymbolFile/DWARF -I /build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Plugins/SymbolFile/DWARF -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/tools/lldb/include -I /build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/include -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/include -I /build/llvm-toolchain-snapshot-7~svn326246/include -I /usr/include/python2.7 -I /build/llvm-toolchain-snapshot-7~svn326246/tools/clang/include -I /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/tools/lldb/../clang/include -I /build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/. -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/x86_64-linux-gnu/c++/7.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/backward -internal-isystem /usr/include/clang/7.0.0/include/ -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-7/lib/clang/7.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -Wno-deprecated-declarations -Wno-unknown-pragmas -Wno-strict-aliasing -Wno-deprecated-register -Wno-vla-extension -std=c++11 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-7~svn326246/build-llvm/tools/lldb/source/Plugins/SymbolFile/DWARF -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-checker optin.performance.Padding -analyzer-output=html -analyzer-config stable-report-filename=true -o /tmp/scan-build-2018-02-28-041547-14988-1 -x c++ /build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugLine.cpp

/build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugLine.cpp

1//===-- DWARFDebugLine.cpp --------------------------------------*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "DWARFDebugLine.h"
11
12//#define ENABLE_DEBUG_PRINTF // DO NOT LEAVE THIS DEFINED: DEBUG ONLY!!!
13#include <assert.h>
14
15#include "lldb/Core/FileSpecList.h"
16#include "lldb/Core/Module.h"
17#include "lldb/Host/Host.h"
18#include "lldb/Utility/Log.h"
19#include "lldb/Utility/Timer.h"
20
21#include "LogChannelDWARF.h"
22#include "SymbolFileDWARF.h"
23
24using namespace lldb;
25using namespace lldb_private;
26using namespace std;
27
28//----------------------------------------------------------------------
29// Parse
30//
31// Parse all information in the debug_line_data into an internal
32// representation.
33//----------------------------------------------------------------------
34void DWARFDebugLine::Parse(const DWARFDataExtractor &debug_line_data) {
35 m_lineTableMap.clear();
36 lldb::offset_t offset = 0;
37 LineTable::shared_ptr line_table_sp(new LineTable);
38 while (debug_line_data.ValidOffset(offset)) {
4
Loop condition is true. Entering loop body
8
Loop condition is true. Entering loop body
31
Loop condition is true. Entering loop body
39 const lldb::offset_t debug_line_offset = offset;
40
41 if (line_table_sp.get() == NULL__null)
5
Taking false branch
9
Taking false branch
32
Assuming the condition is false
33
Taking false branch
42 break;
43
44 if (ParseStatementTable(debug_line_data, &offset, line_table_sp.get())) {
6
Assuming the condition is false
7
Taking false branch
10
Assuming the condition is true
11
Taking true branch
34
Assuming the condition is true
35
Taking true branch
45 // Make sure we don't don't loop infinitely
46 if (offset <= debug_line_offset)
12
Taking false branch
36
Taking false branch
47 break;
48 // DEBUG_PRINTF("m_lineTableMap[0x%8.8x] = line_table_sp\n",
49 // debug_line_offset);
50 m_lineTableMap[debug_line_offset] = line_table_sp;
13
Calling defaulted copy assignment operator for 'shared_ptr'
30
Returning; memory was released
37
Calling defaulted copy assignment operator for 'shared_ptr'
51 line_table_sp.reset(new LineTable);
52 } else
53 ++offset; // Try next byte in line table
54 }
55}
56
57void DWARFDebugLine::ParseIfNeeded(const DWARFDataExtractor &debug_line_data) {
58 if (m_lineTableMap.empty())
1
Assuming the condition is true
2
Taking true branch
59 Parse(debug_line_data);
3
Calling 'DWARFDebugLine::Parse'
60}
61
62//----------------------------------------------------------------------
63// DWARFDebugLine::GetLineTable
64//----------------------------------------------------------------------
65DWARFDebugLine::LineTable::shared_ptr
66DWARFDebugLine::GetLineTable(const dw_offset_t offset) const {
67 DWARFDebugLine::LineTable::shared_ptr line_table_shared_ptr;
68 LineTableConstIter pos = m_lineTableMap.find(offset);
69 if (pos != m_lineTableMap.end())
70 line_table_shared_ptr = pos->second;
71 return line_table_shared_ptr;
72}
73
74//----------------------------------------------------------------------
75// DumpStateToFile
76//----------------------------------------------------------------------
77static void DumpStateToFile(dw_offset_t offset,
78 const DWARFDebugLine::State &state,
79 void *userData) {
80 Log *log = (Log *)userData;
81 if (state.row == DWARFDebugLine::State::StartParsingLineTable) {
82 // If the row is zero we are being called with the prologue only
83 state.prologue->Dump(log);
84 log->PutCString("Address Line Column File");
85 log->PutCString("------------------ ------ ------ ------");
86 } else if (state.row == DWARFDebugLine::State::DoneParsingLineTable) {
87 // Done parsing line table
88 } else {
89 log->Printf("0x%16.16" PRIx64"l" "x" " %6u %6u %6u%s\n", state.address, state.line,
90 state.column, state.file, state.end_sequence ? " END" : "");
91 }
92}
93
94//----------------------------------------------------------------------
95// DWARFDebugLine::DumpLineTableRows
96//----------------------------------------------------------------------
97bool DWARFDebugLine::DumpLineTableRows(Log *log, SymbolFileDWARF *dwarf2Data,
98 dw_offset_t debug_line_offset) {
99 const DWARFDataExtractor &debug_line_data = dwarf2Data->get_debug_line_data();
100
101 if (debug_line_offset == DW_INVALID_OFFSET(~(dw_offset_t)0)) {
102 // Dump line table to a single file only
103 debug_line_offset = 0;
104 while (debug_line_data.ValidOffset(debug_line_offset))
105 debug_line_offset =
106 DumpStatementTable(log, debug_line_data, debug_line_offset);
107 } else {
108 // Dump line table to a single file only
109 DumpStatementTable(log, debug_line_data, debug_line_offset);
110 }
111 return false;
112}
113
114//----------------------------------------------------------------------
115// DWARFDebugLine::DumpStatementTable
116//----------------------------------------------------------------------
117dw_offset_t
118DWARFDebugLine::DumpStatementTable(Log *log,
119 const DWARFDataExtractor &debug_line_data,
120 const dw_offset_t debug_line_offset) {
121 if (debug_line_data.ValidOffset(debug_line_offset)) {
122 lldb::offset_t offset = debug_line_offset;
123 log->Printf("--------------------------------------------------------------"
124 "--------\n"
125 "debug_line[0x%8.8x]\n"
126 "--------------------------------------------------------------"
127 "--------\n",
128 debug_line_offset);
129
130 if (ParseStatementTable(debug_line_data, &offset, DumpStateToFile, log))
131 return offset;
132 else
133 return debug_line_offset + 1; // Skip to next byte in .debug_line section
134 }
135
136 return DW_INVALID_OFFSET(~(dw_offset_t)0);
137}
138
139//----------------------------------------------------------------------
140// DumpOpcodes
141//----------------------------------------------------------------------
142bool DWARFDebugLine::DumpOpcodes(Log *log, SymbolFileDWARF *dwarf2Data,
143 dw_offset_t debug_line_offset,
144 uint32_t dump_flags) {
145 const DWARFDataExtractor &debug_line_data = dwarf2Data->get_debug_line_data();
146
147 if (debug_line_data.GetByteSize() == 0) {
148 log->Printf("< EMPTY >\n");
149 return false;
150 }
151
152 if (debug_line_offset == DW_INVALID_OFFSET(~(dw_offset_t)0)) {
153 // Dump line table to a single file only
154 debug_line_offset = 0;
155 while (debug_line_data.ValidOffset(debug_line_offset))
156 debug_line_offset = DumpStatementOpcodes(log, debug_line_data,
157 debug_line_offset, dump_flags);
158 } else {
159 // Dump line table to a single file only
160 DumpStatementOpcodes(log, debug_line_data, debug_line_offset, dump_flags);
161 }
162 return false;
163}
164
165//----------------------------------------------------------------------
166// DumpStatementOpcodes
167//----------------------------------------------------------------------
168dw_offset_t DWARFDebugLine::DumpStatementOpcodes(
169 Log *log, const DWARFDataExtractor &debug_line_data,
170 const dw_offset_t debug_line_offset, uint32_t flags) {
171 lldb::offset_t offset = debug_line_offset;
172 if (debug_line_data.ValidOffset(offset)) {
173 Prologue prologue;
174
175 if (ParsePrologue(debug_line_data, &offset, &prologue)) {
176 log->PutCString("--------------------------------------------------------"
177 "--------------");
178 log->Printf("debug_line[0x%8.8x]", debug_line_offset);
179 log->PutCString("--------------------------------------------------------"
180 "--------------\n");
181 prologue.Dump(log);
182 } else {
183 offset = debug_line_offset;
184 log->Printf("0x%8.8" PRIx64"l" "x" ": skipping pad byte %2.2x", offset,
185 debug_line_data.GetU8(&offset));
186 return offset;
187 }
188
189 Row row(prologue.default_is_stmt);
190 const dw_offset_t end_offset = debug_line_offset + prologue.total_length +
191 sizeof(prologue.total_length);
192
193 assert(debug_line_data.ValidOffset(end_offset - 1))(static_cast <bool> (debug_line_data.ValidOffset(end_offset
- 1)) ? void (0) : __assert_fail ("debug_line_data.ValidOffset(end_offset - 1)"
, "/build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugLine.cpp"
, 193, __extension__ __PRETTY_FUNCTION__))
;
194
195 while (offset < end_offset) {
196 const uint32_t op_offset = offset;
197 uint8_t opcode = debug_line_data.GetU8(&offset);
198 switch (opcode) {
199 case 0: // Extended Opcodes always start with a zero opcode followed by
200 { // a uleb128 length so you can skip ones you don't know about
201
202 dw_offset_t ext_offset = offset;
203 dw_uleb128_t len = debug_line_data.GetULEB128(&offset);
204 dw_offset_t arg_size = len - (offset - ext_offset);
205 uint8_t sub_opcode = debug_line_data.GetU8(&offset);
206 // if (verbose)
207 // log->Printf( "Extended: <%u> %2.2x ", len,
208 // sub_opcode);
209
210 switch (sub_opcode) {
211 case DW_LNE_end_sequence:
212 log->Printf("0x%8.8x: DW_LNE_end_sequence", op_offset);
213 row.Dump(log);
214 row.Reset(prologue.default_is_stmt);
215 break;
216
217 case DW_LNE_set_address: {
218 row.address = debug_line_data.GetMaxU64(&offset, arg_size);
219 log->Printf("0x%8.8x: DW_LNE_set_address (0x%" PRIx64"l" "x" ")", op_offset,
220 row.address);
221 } break;
222
223 case DW_LNE_define_file: {
224 FileNameEntry fileEntry;
225 fileEntry.name = debug_line_data.GetCStr(&offset);
226 fileEntry.dir_idx = debug_line_data.GetULEB128(&offset);
227 fileEntry.mod_time = debug_line_data.GetULEB128(&offset);
228 fileEntry.length = debug_line_data.GetULEB128(&offset);
229 log->Printf("0x%8.8x: DW_LNE_define_file('%s', dir=%i, "
230 "mod_time=0x%8.8x, length=%i )",
231 op_offset, fileEntry.name, fileEntry.dir_idx,
232 fileEntry.mod_time, fileEntry.length);
233 prologue.file_names.push_back(fileEntry);
234 } break;
235
236 case DW_LNE_set_discriminator: {
237 uint64_t discriminator = debug_line_data.GetULEB128(&offset);
238 log->Printf("0x%8.8x: DW_LNE_set_discriminator (0x%" PRIx64"l" "x" ")",
239 op_offset, discriminator);
240 } break;
241 default:
242 log->Printf("0x%8.8x: DW_LNE_??? (%2.2x) - Skipping unknown upcode",
243 op_offset, opcode);
244 // Length doesn't include the zero opcode byte or the length itself,
245 // but
246 // it does include the sub_opcode, so we have to adjust for that below
247 offset += arg_size;
248 break;
249 }
250 } break;
251
252 // Standard Opcodes
253 case DW_LNS_copy:
254 log->Printf("0x%8.8x: DW_LNS_copy", op_offset);
255 row.Dump(log);
256 break;
257
258 case DW_LNS_advance_pc: {
259 dw_uleb128_t addr_offset_n = debug_line_data.GetULEB128(&offset);
260 dw_uleb128_t addr_offset = addr_offset_n * prologue.min_inst_length;
261 log->Printf("0x%8.8x: DW_LNS_advance_pc (0x%x)", op_offset,
262 addr_offset);
263 row.address += addr_offset;
264 } break;
265
266 case DW_LNS_advance_line: {
267 dw_sleb128_t line_offset = debug_line_data.GetSLEB128(&offset);
268 log->Printf("0x%8.8x: DW_LNS_advance_line (%i)", op_offset,
269 line_offset);
270 row.line += line_offset;
271 } break;
272
273 case DW_LNS_set_file:
274 row.file = debug_line_data.GetULEB128(&offset);
275 log->Printf("0x%8.8x: DW_LNS_set_file (%u)", op_offset, row.file);
276 break;
277
278 case DW_LNS_set_column:
279 row.column = debug_line_data.GetULEB128(&offset);
280 log->Printf("0x%8.8x: DW_LNS_set_column (%u)", op_offset, row.column);
281 break;
282
283 case DW_LNS_negate_stmt:
284 row.is_stmt = !row.is_stmt;
285 log->Printf("0x%8.8x: DW_LNS_negate_stmt", op_offset);
286 break;
287
288 case DW_LNS_set_basic_block:
289 row.basic_block = true;
290 log->Printf("0x%8.8x: DW_LNS_set_basic_block", op_offset);
291 break;
292
293 case DW_LNS_const_add_pc: {
294 uint8_t adjust_opcode = 255 - prologue.opcode_base;
295 dw_addr_t addr_offset =
296 (adjust_opcode / prologue.line_range) * prologue.min_inst_length;
297 log->Printf("0x%8.8x: DW_LNS_const_add_pc (0x%8.8" PRIx64"l" "x" ")",
298 op_offset, addr_offset);
299 row.address += addr_offset;
300 } break;
301
302 case DW_LNS_fixed_advance_pc: {
303 uint16_t pc_offset = debug_line_data.GetU16(&offset);
304 log->Printf("0x%8.8x: DW_LNS_fixed_advance_pc (0x%4.4x)", op_offset,
305 pc_offset);
306 row.address += pc_offset;
307 } break;
308
309 case DW_LNS_set_prologue_end:
310 row.prologue_end = true;
311 log->Printf("0x%8.8x: DW_LNS_set_prologue_end", op_offset);
312 break;
313
314 case DW_LNS_set_epilogue_begin:
315 row.epilogue_begin = true;
316 log->Printf("0x%8.8x: DW_LNS_set_epilogue_begin", op_offset);
317 break;
318
319 case DW_LNS_set_isa:
320 row.isa = debug_line_data.GetULEB128(&offset);
321 log->Printf("0x%8.8x: DW_LNS_set_isa (%u)", op_offset, row.isa);
322 break;
323
324 // Special Opcodes
325 default:
326 if (opcode < prologue.opcode_base) {
327 // We have an opcode that this parser doesn't know about, skip
328 // the number of ULEB128 numbers that is says to skip in the
329 // prologue's standard_opcode_lengths array
330 uint8_t n = prologue.standard_opcode_lengths[opcode - 1];
331 log->Printf("0x%8.8x: Special : Unknown skipping %u ULEB128 values.",
332 op_offset, n);
333 while (n > 0) {
334 debug_line_data.GetULEB128(&offset);
335 --n;
336 }
337 } else {
338 uint8_t adjust_opcode = opcode - prologue.opcode_base;
339 dw_addr_t addr_offset =
340 (adjust_opcode / prologue.line_range) * prologue.min_inst_length;
341 int32_t line_offset =
342 prologue.line_base + (adjust_opcode % prologue.line_range);
343 log->Printf("0x%8.8x: address += 0x%" PRIx64"l" "x" ", line += %i\n",
344 op_offset, (uint64_t)addr_offset, line_offset);
345 row.address += addr_offset;
346 row.line += line_offset;
347 row.Dump(log);
348 }
349 break;
350 }
351 }
352 return end_offset;
353 }
354 return DW_INVALID_OFFSET(~(dw_offset_t)0);
355}
356
357//----------------------------------------------------------------------
358// Parse
359//
360// Parse the entire line table contents calling callback each time a
361// new prologue is parsed and every time a new row is to be added to
362// the line table.
363//----------------------------------------------------------------------
364void DWARFDebugLine::Parse(const DWARFDataExtractor &debug_line_data,
365 DWARFDebugLine::State::Callback callback,
366 void *userData) {
367 lldb::offset_t offset = 0;
368 if (debug_line_data.ValidOffset(offset)) {
369 if (!ParseStatementTable(debug_line_data, &offset, callback, userData))
370 ++offset; // Skip to next byte in .debug_line section
371 }
372}
373
374//----------------------------------------------------------------------
375// DWARFDebugLine::ParsePrologue
376//----------------------------------------------------------------------
377bool DWARFDebugLine::ParsePrologue(const DWARFDataExtractor &debug_line_data,
378 lldb::offset_t *offset_ptr,
379 Prologue *prologue) {
380 const lldb::offset_t prologue_offset = *offset_ptr;
381
382 // DEBUG_PRINTF("0x%8.8x: ParsePrologue()\n", *offset_ptr);
383
384 prologue->Clear();
385 uint32_t i;
386 const char *s;
387 prologue->total_length = debug_line_data.GetDWARFInitialLength(offset_ptr);
388 prologue->version = debug_line_data.GetU16(offset_ptr);
389 if (prologue->version < 2 || prologue->version > 4)
390 return false;
391
392 prologue->prologue_length = debug_line_data.GetDWARFOffset(offset_ptr);
393 const lldb::offset_t end_prologue_offset =
394 prologue->prologue_length + *offset_ptr;
395 prologue->min_inst_length = debug_line_data.GetU8(offset_ptr);
396 if (prologue->version >= 4)
397 prologue->maximum_operations_per_instruction =
398 debug_line_data.GetU8(offset_ptr);
399 else
400 prologue->maximum_operations_per_instruction = 1;
401 prologue->default_is_stmt = debug_line_data.GetU8(offset_ptr);
402 prologue->line_base = debug_line_data.GetU8(offset_ptr);
403 prologue->line_range = debug_line_data.GetU8(offset_ptr);
404 prologue->opcode_base = debug_line_data.GetU8(offset_ptr);
405
406 prologue->standard_opcode_lengths.reserve(prologue->opcode_base - 1);
407
408 for (i = 1; i < prologue->opcode_base; ++i) {
409 uint8_t op_len = debug_line_data.GetU8(offset_ptr);
410 prologue->standard_opcode_lengths.push_back(op_len);
411 }
412
413 while (*offset_ptr < end_prologue_offset) {
414 s = debug_line_data.GetCStr(offset_ptr);
415 if (s && s[0])
416 prologue->include_directories.push_back(s);
417 else
418 break;
419 }
420
421 while (*offset_ptr < end_prologue_offset) {
422 const char *name = debug_line_data.GetCStr(offset_ptr);
423 if (name && name[0]) {
424 FileNameEntry fileEntry;
425 fileEntry.name = name;
426 fileEntry.dir_idx = debug_line_data.GetULEB128(offset_ptr);
427 fileEntry.mod_time = debug_line_data.GetULEB128(offset_ptr);
428 fileEntry.length = debug_line_data.GetULEB128(offset_ptr);
429 prologue->file_names.push_back(fileEntry);
430 } else
431 break;
432 }
433
434 // XXX GNU as is broken for 64-Bit DWARF
435 if (*offset_ptr != end_prologue_offset) {
436 Host::SystemLog(Host::eSystemLogWarning,
437 "warning: parsing line table prologue at 0x%8.8" PRIx64"l" "x"
438 " should have ended at 0x%8.8" PRIx64"l" "x"
439 " but it ended at 0x%8.8" PRIx64"l" "x" "\n",
440 prologue_offset, end_prologue_offset, *offset_ptr);
441 }
442 return end_prologue_offset;
443}
444
445bool DWARFDebugLine::ParseSupportFiles(
446 const lldb::ModuleSP &module_sp, const DWARFDataExtractor &debug_line_data,
447 const char *cu_comp_dir, dw_offset_t stmt_list,
448 FileSpecList &support_files) {
449 lldb::offset_t offset = stmt_list;
450
451 Prologue prologue;
452 if (!ParsePrologue(debug_line_data, &offset, &prologue)) {
453 Host::SystemLog(Host::eSystemLogError, "error: parsing line table prologue "
454 "at 0x%8.8x (parsing ended around "
455 "0x%8.8" PRIx64"l" "x" "\n",
456 stmt_list, offset);
457 return false;
458 }
459
460 FileSpec file_spec;
461 std::string remapped_file;
462
463 for (uint32_t file_idx = 1;
464 prologue.GetFile(file_idx, cu_comp_dir, file_spec); ++file_idx) {
465 if (module_sp->RemapSourceFile(file_spec.GetPath(), remapped_file))
466 file_spec.SetFile(remapped_file, false);
467 support_files.Append(file_spec);
468 }
469 return true;
470}
471
472//----------------------------------------------------------------------
473// ParseStatementTable
474//
475// Parse a single line table (prologue and all rows) and call the
476// callback function once for the prologue (row in state will be zero)
477// and each time a row is to be added to the line table.
478//----------------------------------------------------------------------
479bool DWARFDebugLine::ParseStatementTable(
480 const DWARFDataExtractor &debug_line_data, lldb::offset_t *offset_ptr,
481 DWARFDebugLine::State::Callback callback, void *userData) {
482 Log *log(LogChannelDWARF::GetLogIfAll(DWARF_LOG_DEBUG_LINE(1u << 2)));
483 Prologue::shared_ptr prologue(new Prologue());
484
485 const dw_offset_t debug_line_offset = *offset_ptr;
486
487 static Timer::Category func_cat(LLVM_PRETTY_FUNCTION__PRETTY_FUNCTION__);
488 Timer scoped_timer(
489 func_cat, "DWARFDebugLine::ParseStatementTable (.debug_line[0x%8.8x])",
490 debug_line_offset);
491
492 if (!ParsePrologue(debug_line_data, offset_ptr, prologue.get())) {
493 if (log)
494 log->Error("failed to parse DWARF line table prologue");
495 // Restore our offset and return false to indicate failure!
496 *offset_ptr = debug_line_offset;
497 return false;
498 }
499
500 if (log)
501 prologue->Dump(log);
502
503 const dw_offset_t end_offset =
504 debug_line_offset + prologue->total_length +
505 (debug_line_data.GetDWARFSizeofInitialLength());
506
507 State state(prologue, log, callback, userData);
508
509 while (*offset_ptr < end_offset) {
510 // DEBUG_PRINTF("0x%8.8x: ", *offset_ptr);
511 uint8_t opcode = debug_line_data.GetU8(offset_ptr);
512
513 if (opcode == 0) {
514 // Extended Opcodes always start with a zero opcode followed by
515 // a uleb128 length so you can skip ones you don't know about
516 lldb::offset_t ext_offset = *offset_ptr;
517 dw_uleb128_t len = debug_line_data.GetULEB128(offset_ptr);
518 dw_offset_t arg_size = len - (*offset_ptr - ext_offset);
519
520 // DEBUG_PRINTF("Extended: <%2u> ", len);
521 uint8_t sub_opcode = debug_line_data.GetU8(offset_ptr);
522 switch (sub_opcode) {
523 case DW_LNE_end_sequence:
524 // Set the end_sequence register of the state machine to true and
525 // append a row to the matrix using the current values of the
526 // state-machine registers. Then reset the registers to the initial
527 // values specified above. Every statement program sequence must end
528 // with a DW_LNE_end_sequence instruction which creates a row whose
529 // address is that of the byte after the last target machine instruction
530 // of the sequence.
531 state.end_sequence = true;
532 state.AppendRowToMatrix(*offset_ptr);
533 state.Reset();
534 break;
535
536 case DW_LNE_set_address:
537 // Takes a single relocatable address as an operand. The size of the
538 // operand is the size appropriate to hold an address on the target
539 // machine. Set the address register to the value given by the
540 // relocatable address. All of the other statement program opcodes
541 // that affect the address register add a delta to it. This instruction
542 // stores a relocatable value into it instead.
543 if (arg_size == 4)
544 state.address = debug_line_data.GetU32(offset_ptr);
545 else // arg_size == 8
546 state.address = debug_line_data.GetU64(offset_ptr);
547 break;
548
549 case DW_LNE_define_file:
550 // Takes 4 arguments. The first is a null terminated string containing
551 // a source file name. The second is an unsigned LEB128 number
552 // representing
553 // the directory index of the directory in which the file was found. The
554 // third is an unsigned LEB128 number representing the time of last
555 // modification of the file. The fourth is an unsigned LEB128 number
556 // representing the length in bytes of the file. The time and length
557 // fields may contain LEB128(0) if the information is not available.
558 //
559 // The directory index represents an entry in the include_directories
560 // section of the statement program prologue. The index is LEB128(0)
561 // if the file was found in the current directory of the compilation,
562 // LEB128(1) if it was found in the first directory in the
563 // include_directories section, and so on. The directory index is
564 // ignored for file names that represent full path names.
565 //
566 // The files are numbered, starting at 1, in the order in which they
567 // appear; the names in the prologue come before names defined by
568 // the DW_LNE_define_file instruction. These numbers are used in the
569 // file register of the state machine.
570 {
571 FileNameEntry fileEntry;
572 fileEntry.name = debug_line_data.GetCStr(offset_ptr);
573 fileEntry.dir_idx = debug_line_data.GetULEB128(offset_ptr);
574 fileEntry.mod_time = debug_line_data.GetULEB128(offset_ptr);
575 fileEntry.length = debug_line_data.GetULEB128(offset_ptr);
576 state.prologue->file_names.push_back(fileEntry);
577 }
578 break;
579
580 default:
581 // Length doesn't include the zero opcode byte or the length itself, but
582 // it does include the sub_opcode, so we have to adjust for that below
583 (*offset_ptr) += arg_size;
584 break;
585 }
586 } else if (opcode < prologue->opcode_base) {
587 switch (opcode) {
588 // Standard Opcodes
589 case DW_LNS_copy:
590 // Takes no arguments. Append a row to the matrix using the
591 // current values of the state-machine registers. Then set
592 // the basic_block register to false.
593 state.AppendRowToMatrix(*offset_ptr);
594 break;
595
596 case DW_LNS_advance_pc:
597 // Takes a single unsigned LEB128 operand, multiplies it by the
598 // min_inst_length field of the prologue, and adds the
599 // result to the address register of the state machine.
600 state.address +=
601 debug_line_data.GetULEB128(offset_ptr) * prologue->min_inst_length;
602 break;
603
604 case DW_LNS_advance_line:
605 // Takes a single signed LEB128 operand and adds that value to
606 // the line register of the state machine.
607 state.line += debug_line_data.GetSLEB128(offset_ptr);
608 break;
609
610 case DW_LNS_set_file:
611 // Takes a single unsigned LEB128 operand and stores it in the file
612 // register of the state machine.
613 state.file = debug_line_data.GetULEB128(offset_ptr);
614 break;
615
616 case DW_LNS_set_column:
617 // Takes a single unsigned LEB128 operand and stores it in the
618 // column register of the state machine.
619 state.column = debug_line_data.GetULEB128(offset_ptr);
620 break;
621
622 case DW_LNS_negate_stmt:
623 // Takes no arguments. Set the is_stmt register of the state
624 // machine to the logical negation of its current value.
625 state.is_stmt = !state.is_stmt;
626 break;
627
628 case DW_LNS_set_basic_block:
629 // Takes no arguments. Set the basic_block register of the
630 // state machine to true
631 state.basic_block = true;
632 break;
633
634 case DW_LNS_const_add_pc:
635 // Takes no arguments. Add to the address register of the state
636 // machine the address increment value corresponding to special
637 // opcode 255. The motivation for DW_LNS_const_add_pc is this:
638 // when the statement program needs to advance the address by a
639 // small amount, it can use a single special opcode, which occupies
640 // a single byte. When it needs to advance the address by up to
641 // twice the range of the last special opcode, it can use
642 // DW_LNS_const_add_pc followed by a special opcode, for a total
643 // of two bytes. Only if it needs to advance the address by more
644 // than twice that range will it need to use both DW_LNS_advance_pc
645 // and a special opcode, requiring three or more bytes.
646 {
647 uint8_t adjust_opcode = 255 - prologue->opcode_base;
648 dw_addr_t addr_offset = (adjust_opcode / prologue->line_range) *
649 prologue->min_inst_length;
650 state.address += addr_offset;
651 }
652 break;
653
654 case DW_LNS_fixed_advance_pc:
655 // Takes a single uhalf operand. Add to the address register of
656 // the state machine the value of the (unencoded) operand. This
657 // is the only extended opcode that takes an argument that is not
658 // a variable length number. The motivation for DW_LNS_fixed_advance_pc
659 // is this: existing assemblers cannot emit DW_LNS_advance_pc or
660 // special opcodes because they cannot encode LEB128 numbers or
661 // judge when the computation of a special opcode overflows and
662 // requires the use of DW_LNS_advance_pc. Such assemblers, however,
663 // can use DW_LNS_fixed_advance_pc instead, sacrificing compression.
664 state.address += debug_line_data.GetU16(offset_ptr);
665 break;
666
667 case DW_LNS_set_prologue_end:
668 // Takes no arguments. Set the prologue_end register of the
669 // state machine to true
670 state.prologue_end = true;
671 break;
672
673 case DW_LNS_set_epilogue_begin:
674 // Takes no arguments. Set the basic_block register of the
675 // state machine to true
676 state.epilogue_begin = true;
677 break;
678
679 case DW_LNS_set_isa:
680 // Takes a single unsigned LEB128 operand and stores it in the
681 // column register of the state machine.
682 state.isa = debug_line_data.GetULEB128(offset_ptr);
683 break;
684
685 default:
686 // Handle any unknown standard opcodes here. We know the lengths
687 // of such opcodes because they are specified in the prologue
688 // as a multiple of LEB128 operands for each opcode.
689 {
690 uint8_t i;
691 assert(static_cast<size_t>(opcode - 1) <(static_cast <bool> (static_cast<size_t>(opcode -
1) < prologue->standard_opcode_lengths.size()) ? void (
0) : __assert_fail ("static_cast<size_t>(opcode - 1) < prologue->standard_opcode_lengths.size()"
, "/build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugLine.cpp"
, 692, __extension__ __PRETTY_FUNCTION__))
692 prologue->standard_opcode_lengths.size())(static_cast <bool> (static_cast<size_t>(opcode -
1) < prologue->standard_opcode_lengths.size()) ? void (
0) : __assert_fail ("static_cast<size_t>(opcode - 1) < prologue->standard_opcode_lengths.size()"
, "/build/llvm-toolchain-snapshot-7~svn326246/tools/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugLine.cpp"
, 692, __extension__ __PRETTY_FUNCTION__))
;
693 const uint8_t opcode_length =
694 prologue->standard_opcode_lengths[opcode - 1];
695 for (i = 0; i < opcode_length; ++i)
696 debug_line_data.Skip_LEB128(offset_ptr);
697 }
698 break;
699 }
700 } else {
701 // Special Opcodes
702
703 // A special opcode value is chosen based on the amount that needs
704 // to be added to the line and address registers. The maximum line
705 // increment for a special opcode is the value of the line_base
706 // field in the header, plus the value of the line_range field,
707 // minus 1 (line base + line range - 1). If the desired line
708 // increment is greater than the maximum line increment, a standard
709 // opcode must be used instead of a special opcode. The "address
710 // advance" is calculated by dividing the desired address increment
711 // by the minimum_instruction_length field from the header. The
712 // special opcode is then calculated using the following formula:
713 //
714 // opcode = (desired line increment - line_base) + (line_range * address
715 // advance) + opcode_base
716 //
717 // If the resulting opcode is greater than 255, a standard opcode
718 // must be used instead.
719 //
720 // To decode a special opcode, subtract the opcode_base from the
721 // opcode itself to give the adjusted opcode. The amount to
722 // increment the address register is the result of the adjusted
723 // opcode divided by the line_range multiplied by the
724 // minimum_instruction_length field from the header. That is:
725 //
726 // address increment = (adjusted opcode / line_range) *
727 // minimum_instruction_length
728 //
729 // The amount to increment the line register is the line_base plus
730 // the result of the adjusted opcode modulo the line_range. That is:
731 //
732 // line increment = line_base + (adjusted opcode % line_range)
733
734 uint8_t adjust_opcode = opcode - prologue->opcode_base;
735 dw_addr_t addr_offset =
736 (adjust_opcode / prologue->line_range) * prologue->min_inst_length;
737 int32_t line_offset =
738 prologue->line_base + (adjust_opcode % prologue->line_range);
739 state.line += line_offset;
740 state.address += addr_offset;
741 state.AppendRowToMatrix(*offset_ptr);
742 }
743 }
744
745 state.Finalize(*offset_ptr);
746
747 return end_offset;
748}
749
750//----------------------------------------------------------------------
751// ParseStatementTableCallback
752//----------------------------------------------------------------------
753static void ParseStatementTableCallback(dw_offset_t offset,
754 const DWARFDebugLine::State &state,
755 void *userData) {
756 DWARFDebugLine::LineTable *line_table = (DWARFDebugLine::LineTable *)userData;
757 if (state.row == DWARFDebugLine::State::StartParsingLineTable) {
758 // Just started parsing the line table, so lets keep a reference to
759 // the prologue using the supplied shared pointer
760 line_table->prologue = state.prologue;
761 } else if (state.row == DWARFDebugLine::State::DoneParsingLineTable) {
762 // Done parsing line table, nothing to do for the cleanup
763 } else {
764 // We have a new row, lets append it
765 line_table->AppendRow(state);
766 }
767}
768
769//----------------------------------------------------------------------
770// ParseStatementTable
771//
772// Parse a line table at offset and populate the LineTable class with
773// the prologue and all rows.
774//----------------------------------------------------------------------
775bool DWARFDebugLine::ParseStatementTable(
776 const DWARFDataExtractor &debug_line_data, lldb::offset_t *offset_ptr,
777 LineTable *line_table) {
778 return ParseStatementTable(debug_line_data, offset_ptr,
779 ParseStatementTableCallback, line_table);
780}
781
782inline bool DWARFDebugLine::Prologue::IsValid() const {
783 return SymbolFileDWARF::SupportedVersion(version);
784}
785
786//----------------------------------------------------------------------
787// DWARFDebugLine::Prologue::Dump
788//----------------------------------------------------------------------
789void DWARFDebugLine::Prologue::Dump(Log *log) {
790 uint32_t i;
791
792 log->Printf("Line table prologue:");
793 log->Printf(" total_length: 0x%8.8x", total_length);
794 log->Printf(" version: %u", version);
795 log->Printf("prologue_length: 0x%8.8x", prologue_length);
796 log->Printf("min_inst_length: %u", min_inst_length);
797 log->Printf("default_is_stmt: %u", default_is_stmt);
798 log->Printf(" line_base: %i", line_base);
799 log->Printf(" line_range: %u", line_range);
800 log->Printf(" opcode_base: %u", opcode_base);
801
802 for (i = 0; i < standard_opcode_lengths.size(); ++i) {
803 log->Printf("standard_opcode_lengths[%s] = %u", DW_LNS_value_to_name(i + 1),
804 standard_opcode_lengths[i]);
805 }
806
807 if (!include_directories.empty()) {
808 for (i = 0; i < include_directories.size(); ++i) {
809 log->Printf("include_directories[%3u] = '%s'", i + 1,
810 include_directories[i]);
811 }
812 }
813
814 if (!file_names.empty()) {
815 log->PutCString(" Dir Mod Time File Len File Name");
816 log->PutCString(" ---- ---------- ---------- "
817 "---------------------------");
818 for (i = 0; i < file_names.size(); ++i) {
819 const FileNameEntry &fileEntry = file_names[i];
820 log->Printf("file_names[%3u] %4u 0x%8.8x 0x%8.8x %s", i + 1,
821 fileEntry.dir_idx, fileEntry.mod_time, fileEntry.length,
822 fileEntry.name);
823 }
824 }
825}
826
827//----------------------------------------------------------------------
828// DWARFDebugLine::ParsePrologue::Append
829//
830// Append the contents of the prologue to the binary stream buffer
831//----------------------------------------------------------------------
832// void
833// DWARFDebugLine::Prologue::Append(BinaryStreamBuf& buff) const
834//{
835// uint32_t i;
836//
837// buff.Append32(total_length);
838// buff.Append16(version);
839// buff.Append32(prologue_length);
840// buff.Append8(min_inst_length);
841// buff.Append8(default_is_stmt);
842// buff.Append8(line_base);
843// buff.Append8(line_range);
844// buff.Append8(opcode_base);
845//
846// for (i=0; i<standard_opcode_lengths.size(); ++i)
847// buff.Append8(standard_opcode_lengths[i]);
848//
849// for (i=0; i<include_directories.size(); ++i)
850// buff.AppendCStr(include_directories[i].c_str());
851// buff.Append8(0); // Terminate the include directory section with empty
852// string
853//
854// for (i=0; i<file_names.size(); ++i)
855// {
856// buff.AppendCStr(file_names[i].name.c_str());
857// buff.Append32_as_ULEB128(file_names[i].dir_idx);
858// buff.Append32_as_ULEB128(file_names[i].mod_time);
859// buff.Append32_as_ULEB128(file_names[i].length);
860// }
861// buff.Append8(0); // Terminate the file names section with empty string
862//}
863
864bool DWARFDebugLine::Prologue::GetFile(uint32_t file_idx, const char *comp_dir,
865 FileSpec &file) const {
866 uint32_t idx = file_idx - 1; // File indexes are 1 based...
867 if (idx < file_names.size()) {
868 file.SetFile(file_names[idx].name, false);
869 if (file.IsRelative()) {
870 if (file_names[idx].dir_idx > 0) {
871 const uint32_t dir_idx = file_names[idx].dir_idx - 1;
872 if (dir_idx < include_directories.size()) {
873 file.PrependPathComponent(include_directories[dir_idx]);
874 if (!file.IsRelative())
875 return true;
876 }
877 }
878
879 if (comp_dir && comp_dir[0])
880 file.PrependPathComponent(comp_dir);
881 }
882 return true;
883 }
884 return false;
885}
886
887//----------------------------------------------------------------------
888// DWARFDebugLine::LineTable::Dump
889//----------------------------------------------------------------------
890void DWARFDebugLine::LineTable::Dump(Log *log) const {
891 if (prologue.get())
892 prologue->Dump(log);
893
894 if (!rows.empty()) {
895 log->PutCString("Address Line Column File ISA Flags");
896 log->PutCString(
897 "------------------ ------ ------ ------ --- -------------");
898 Row::const_iterator pos = rows.begin();
899 Row::const_iterator end = rows.end();
900 while (pos != end) {
901 (*pos).Dump(log);
902 ++pos;
903 }
904 }
905}
906
907void DWARFDebugLine::LineTable::AppendRow(const DWARFDebugLine::Row &state) {
908 rows.push_back(state);
909}
910
911//----------------------------------------------------------------------
912// Compare function for the binary search in
913// DWARFDebugLine::LineTable::LookupAddress()
914//----------------------------------------------------------------------
915static bool FindMatchingAddress(const DWARFDebugLine::Row &row1,
916 const DWARFDebugLine::Row &row2) {
917 return row1.address < row2.address;
918}
919
920//----------------------------------------------------------------------
921// DWARFDebugLine::LineTable::LookupAddress
922//----------------------------------------------------------------------
923uint32_t DWARFDebugLine::LineTable::LookupAddress(dw_addr_t address,
924 dw_addr_t cu_high_pc) const {
925 uint32_t index = UINT32_MAX(4294967295U);
926 if (!rows.empty()) {
927 // Use the lower_bound algorithm to perform a binary search since we know
928 // that our line table data is ordered by address.
929 DWARFDebugLine::Row row;
930 row.address = address;
931 Row::const_iterator begin_pos = rows.begin();
932 Row::const_iterator end_pos = rows.end();
933 Row::const_iterator pos =
934 lower_bound(begin_pos, end_pos, row, FindMatchingAddress);
935 if (pos == end_pos) {
936 if (address < cu_high_pc)
937 return rows.size() - 1;
938 } else {
939 // Rely on fact that we are using a std::vector and we can do
940 // pointer arithmetic to find the row index (which will be one less
941 // that what we found since it will find the first position after
942 // the current address) since std::vector iterators are just
943 // pointers to the container type.
944 index = pos - begin_pos;
945 if (pos->address > address) {
946 if (index > 0)
947 --index;
948 else
949 index = UINT32_MAX(4294967295U);
950 }
951 }
952 }
953 return index; // Failed to find address
954}
955
956//----------------------------------------------------------------------
957// DWARFDebugLine::Row::Row
958//----------------------------------------------------------------------
959DWARFDebugLine::Row::Row(bool default_is_stmt)
960 : address(0), line(1), column(0), file(1), is_stmt(default_is_stmt),
961 basic_block(false), end_sequence(false), prologue_end(false),
962 epilogue_begin(false), isa(0) {}
963
964//----------------------------------------------------------------------
965// Called after a row is appended to the matrix
966//----------------------------------------------------------------------
967void DWARFDebugLine::Row::PostAppend() {
968 basic_block = false;
969 prologue_end = false;
970 epilogue_begin = false;
971}
972
973//----------------------------------------------------------------------
974// DWARFDebugLine::Row::Reset
975//----------------------------------------------------------------------
976void DWARFDebugLine::Row::Reset(bool default_is_stmt) {
977 address = 0;
978 line = 1;
979 column = 0;
980 file = 1;
981 is_stmt = default_is_stmt;
982 basic_block = false;
983 end_sequence = false;
984 prologue_end = false;
985 epilogue_begin = false;
986 isa = 0;
987}
988//----------------------------------------------------------------------
989// DWARFDebugLine::Row::Dump
990//----------------------------------------------------------------------
991void DWARFDebugLine::Row::Dump(Log *log) const {
992 log->Printf("0x%16.16" PRIx64"l" "x" " %6u %6u %6u %3u %s%s%s%s%s", address, line,
993 column, file, isa, is_stmt ? " is_stmt" : "",
994 basic_block ? " basic_block" : "",
995 prologue_end ? " prologue_end" : "",
996 epilogue_begin ? " epilogue_begin" : "",
997 end_sequence ? " end_sequence" : "");
998}
999
1000//----------------------------------------------------------------------
1001// Compare function LineTable structures
1002//----------------------------------------------------------------------
1003static bool AddressLessThan(const DWARFDebugLine::Row &a,
1004 const DWARFDebugLine::Row &b) {
1005 return a.address < b.address;
1006}
1007
1008// Insert a row at the correct address if the addresses can be out of
1009// order which can only happen when we are linking a line table that
1010// may have had it's contents rearranged.
1011void DWARFDebugLine::Row::Insert(Row::collection &state_coll,
1012 const Row &state) {
1013 // If we don't have anything yet, or if the address of the last state in our
1014 // line table is less than the current one, just append the current state
1015 if (state_coll.empty() || AddressLessThan(state_coll.back(), state)) {
1016 state_coll.push_back(state);
1017 } else {
1018 // Do a binary search for the correct entry
1019 pair<Row::iterator, Row::iterator> range(equal_range(
1020 state_coll.begin(), state_coll.end(), state, AddressLessThan));
1021
1022 // If the addresses are equal, we can safely replace the previous entry
1023 // with the current one if the one it is replacing is an end_sequence entry.
1024 // We currently always place an extra end sequence when ever we exit a valid
1025 // address range for a function in case the functions get rearranged by
1026 // optimizations or by order specifications. These extra end sequences will
1027 // disappear by getting replaced with valid consecutive entries within a
1028 // compile unit if there are no gaps.
1029 if (range.first == range.second) {
1030 state_coll.insert(range.first, state);
1031 } else {
1032 if ((distance(range.first, range.second) == 1) &&
1033 range.first->end_sequence == true) {
1034 *range.first = state;
1035 } else {
1036 state_coll.insert(range.second, state);
1037 }
1038 }
1039 }
1040}
1041
1042void DWARFDebugLine::Row::Dump(Log *log, const Row::collection &state_coll) {
1043 std::for_each(state_coll.begin(), state_coll.end(),
1044 bind2nd(std::mem_fun_ref(&Row::Dump), log));
1045}
1046
1047//----------------------------------------------------------------------
1048// DWARFDebugLine::State::State
1049//----------------------------------------------------------------------
1050DWARFDebugLine::State::State(Prologue::shared_ptr &p, Log *l,
1051 DWARFDebugLine::State::Callback cb, void *userData)
1052 : Row(p->default_is_stmt), prologue(p), log(l), callback(cb),
1053 callbackUserData(userData), row(StartParsingLineTable) {
1054 // Call the callback with the initial row state of zero for the prologue
1055 if (callback)
1056 callback(0, *this, callbackUserData);
1057}
1058
1059//----------------------------------------------------------------------
1060// DWARFDebugLine::State::Reset
1061//----------------------------------------------------------------------
1062void DWARFDebugLine::State::Reset() { Row::Reset(prologue->default_is_stmt); }
1063
1064//----------------------------------------------------------------------
1065// DWARFDebugLine::State::AppendRowToMatrix
1066//----------------------------------------------------------------------
1067void DWARFDebugLine::State::AppendRowToMatrix(dw_offset_t offset) {
1068 // Each time we are to add an entry into the line table matrix
1069 // call the callback function so that someone can do something with
1070 // the current state of the state machine (like build a line table
1071 // or dump the line table!)
1072 if (log) {
1073 if (row == 0) {
1074 log->PutCString("Address Line Column File ISA Flags");
1075 log->PutCString(
1076 "------------------ ------ ------ ------ --- -------------");
1077 }
1078 Dump(log);
1079 }
1080
1081 ++row; // Increase the row number before we call our callback for a real row
1082 if (callback)
1083 callback(offset, *this, callbackUserData);
1084 PostAppend();
1085}
1086
1087//----------------------------------------------------------------------
1088// DWARFDebugLine::State::Finalize
1089//----------------------------------------------------------------------
1090void DWARFDebugLine::State::Finalize(dw_offset_t offset) {
1091 // Call the callback with a special row state when we are done parsing a
1092 // line table
1093 row = DoneParsingLineTable;
1094 if (callback)
1095 callback(offset, *this, callbackUserData);
1096}
1097
1098// void
1099// DWARFDebugLine::AppendLineTableData
1100//(
1101// const DWARFDebugLine::Prologue* prologue,
1102// const DWARFDebugLine::Row::collection& state_coll,
1103// const uint32_t addr_size,
1104// BinaryStreamBuf &debug_line_data
1105//)
1106//{
1107// if (state_coll.empty())
1108// {
1109// // We have no entries, just make an empty line table
1110// debug_line_data.Append8(0);
1111// debug_line_data.Append8(1);
1112// debug_line_data.Append8(DW_LNE_end_sequence);
1113// }
1114// else
1115// {
1116// DWARFDebugLine::Row::const_iterator pos;
1117// Row::const_iterator end = state_coll.end();
1118// bool default_is_stmt = prologue->default_is_stmt;
1119// const DWARFDebugLine::Row reset_state(default_is_stmt);
1120// const DWARFDebugLine::Row* prev_state = &reset_state;
1121// const int32_t max_line_increment_for_special_opcode =
1122// prologue->MaxLineIncrementForSpecialOpcode();
1123// for (pos = state_coll.begin(); pos != end; ++pos)
1124// {
1125// const DWARFDebugLine::Row& curr_state = *pos;
1126// int32_t line_increment = 0;
1127// dw_addr_t addr_offset = curr_state.address - prev_state->address;
1128// dw_addr_t addr_advance = (addr_offset) / prologue->min_inst_length;
1129// line_increment = (int32_t)(curr_state.line - prev_state->line);
1130//
1131// // If our previous state was the reset state, then let's emit the
1132// // address to keep GDB's DWARF parser happy. If we don't start each
1133// // sequence with a DW_LNE_set_address opcode, the line table won't
1134// // get slid properly in GDB.
1135//
1136// if (prev_state == &reset_state)
1137// {
1138// debug_line_data.Append8(0); // Extended opcode
1139// debug_line_data.Append32_as_ULEB128(addr_size + 1); // Length of
1140// opcode bytes
1141// debug_line_data.Append8(DW_LNE_set_address);
1142// debug_line_data.AppendMax64(curr_state.address, addr_size);
1143// addr_advance = 0;
1144// }
1145//
1146// if (prev_state->file != curr_state.file)
1147// {
1148// debug_line_data.Append8(DW_LNS_set_file);
1149// debug_line_data.Append32_as_ULEB128(curr_state.file);
1150// }
1151//
1152// if (prev_state->column != curr_state.column)
1153// {
1154// debug_line_data.Append8(DW_LNS_set_column);
1155// debug_line_data.Append32_as_ULEB128(curr_state.column);
1156// }
1157//
1158// // Don't do anything fancy if we are at the end of a sequence
1159// // as we don't want to push any extra rows since the
1160// DW_LNE_end_sequence
1161// // will push a row itself!
1162// if (curr_state.end_sequence)
1163// {
1164// if (line_increment != 0)
1165// {
1166// debug_line_data.Append8(DW_LNS_advance_line);
1167// debug_line_data.Append32_as_SLEB128(line_increment);
1168// }
1169//
1170// if (addr_advance > 0)
1171// {
1172// debug_line_data.Append8(DW_LNS_advance_pc);
1173// debug_line_data.Append32_as_ULEB128(addr_advance);
1174// }
1175//
1176// // Now push the end sequence on!
1177// debug_line_data.Append8(0);
1178// debug_line_data.Append8(1);
1179// debug_line_data.Append8(DW_LNE_end_sequence);
1180//
1181// prev_state = &reset_state;
1182// }
1183// else
1184// {
1185// if (line_increment || addr_advance)
1186// {
1187// if (line_increment > max_line_increment_for_special_opcode)
1188// {
1189// debug_line_data.Append8(DW_LNS_advance_line);
1190// debug_line_data.Append32_as_SLEB128(line_increment);
1191// line_increment = 0;
1192// }
1193//
1194// uint32_t special_opcode = (line_increment >=
1195// prologue->line_base) ? ((line_increment -
1196// prologue->line_base) + (prologue->line_range * addr_advance)
1197// + prologue->opcode_base) : 256;
1198// if (special_opcode > 255)
1199// {
1200// // Both the address and line won't fit in one special
1201// opcode
1202// // check to see if just the line advance will?
1203// uint32_t special_opcode_line = ((line_increment >=
1204// prologue->line_base) && (line_increment != 0)) ?
1205// ((line_increment - prologue->line_base) +
1206// prologue->opcode_base) : 256;
1207//
1208//
1209// if (special_opcode_line > 255)
1210// {
1211// // Nope, the line advance won't fit by itself, check
1212// the address increment by itself
1213// uint32_t special_opcode_addr = addr_advance ?
1214// ((0 - prologue->line_base) +
1215// (prologue->line_range * addr_advance) +
1216// prologue->opcode_base) : 256;
1217//
1218// if (special_opcode_addr > 255)
1219// {
1220// // Neither the address nor the line will fit in
1221// a
1222// // special opcode, we must manually enter both
1223// then
1224// // do a DW_LNS_copy to push a row (special
1225// opcode
1226// // automatically imply a new row is pushed)
1227// if (line_increment != 0)
1228// {
1229// debug_line_data.Append8(DW_LNS_advance_line);
1230// debug_line_data.Append32_as_SLEB128(line_increment);
1231// }
1232//
1233// if (addr_advance > 0)
1234// {
1235// debug_line_data.Append8(DW_LNS_advance_pc);
1236// debug_line_data.Append32_as_ULEB128(addr_advance);
1237// }
1238//
1239// // Now push a row onto the line table manually
1240// debug_line_data.Append8(DW_LNS_copy);
1241//
1242// }
1243// else
1244// {
1245// // The address increment alone will fit into a
1246// special opcode
1247// // so modify our line change, then issue a
1248// special opcode
1249// // for the address increment and it will push a
1250// row into the
1251// // line table
1252// if (line_increment != 0)
1253// {
1254// debug_line_data.Append8(DW_LNS_advance_line);
1255// debug_line_data.Append32_as_SLEB128(line_increment);
1256// }
1257//
1258// // Advance of line and address will fit into a
1259// single byte special opcode
1260// // and this will also push a row onto the line
1261// table
1262// debug_line_data.Append8(special_opcode_addr);
1263// }
1264// }
1265// else
1266// {
1267// // The line change alone will fit into a special
1268// opcode
1269// // so modify our address increment first, then issue
1270// a
1271// // special opcode for the line change and it will
1272// push
1273// // a row into the line table
1274// if (addr_advance > 0)
1275// {
1276// debug_line_data.Append8(DW_LNS_advance_pc);
1277// debug_line_data.Append32_as_ULEB128(addr_advance);
1278// }
1279//
1280// // Advance of line and address will fit into a
1281// single byte special opcode
1282// // and this will also push a row onto the line table
1283// debug_line_data.Append8(special_opcode_line);
1284// }
1285// }
1286// else
1287// {
1288// // Advance of line and address will fit into a single
1289// byte special opcode
1290// // and this will also push a row onto the line table
1291// debug_line_data.Append8(special_opcode);
1292// }
1293// }
1294// prev_state = &curr_state;
1295// }
1296// }
1297// }
1298//}

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/bits/shared_ptr.h

1// shared_ptr and weak_ptr implementation -*- C++ -*-
2
3// Copyright (C) 2007-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25// GCC Note: Based on files from version 1.32.0 of the Boost library.
26
27// shared_count.hpp
28// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
29
30// shared_ptr.hpp
31// Copyright (C) 1998, 1999 Greg Colvin and Beman Dawes.
32// Copyright (C) 2001, 2002, 2003 Peter Dimov
33
34// weak_ptr.hpp
35// Copyright (C) 2001, 2002, 2003 Peter Dimov
36
37// enable_shared_from_this.hpp
38// Copyright (C) 2002 Peter Dimov
39
40// Distributed under the Boost Software License, Version 1.0. (See
41// accompanying file LICENSE_1_0.txt or copy at
42// http://www.boost.org/LICENSE_1_0.txt)
43
44/** @file
45 * This is an internal header file, included by other library headers.
46 * Do not attempt to use it directly. @headername{memory}
47 */
48
49#ifndef _SHARED_PTR_H1
50#define _SHARED_PTR_H1 1
51
52#include <bits/shared_ptr_base.h>
53
54namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
55{
56_GLIBCXX_BEGIN_NAMESPACE_VERSION
57
58 /**
59 * @addtogroup pointer_abstractions
60 * @{
61 */
62
63 /// 20.7.2.2.11 shared_ptr I/O
64 template<typename _Ch, typename _Tr, typename _Tp, _Lock_policy _Lp>
65 inline std::basic_ostream<_Ch, _Tr>&
66 operator<<(std::basic_ostream<_Ch, _Tr>& __os,
67 const __shared_ptr<_Tp, _Lp>& __p)
68 {
69 __os << __p.get();
70 return __os;
71 }
72
73 /// 20.7.2.2.10 shared_ptr get_deleter
74 template<typename _Del, typename _Tp, _Lock_policy _Lp>
75 inline _Del*
76 get_deleter(const __shared_ptr<_Tp, _Lp>& __p) noexcept
77 {
78#if __cpp_rtti199711
79 return static_cast<_Del*>(__p._M_get_deleter(typeid(_Del)));
80#else
81 return 0;
82#endif
83 }
84
85
86 /**
87 * @brief A smart pointer with reference-counted copy semantics.
88 *
89 * The object pointed to is deleted when the last shared_ptr pointing to
90 * it is destroyed or reset.
91 */
92 template<typename _Tp>
93 class shared_ptr : public __shared_ptr<_Tp>
94 {
95 template<typename... _Args>
96 using _Constructible = typename enable_if<
97 is_constructible<__shared_ptr<_Tp>, _Args...>::value
98 >::type;
99
100 template<typename _Arg>
101 using _Assignable = typename enable_if<
102 is_assignable<__shared_ptr<_Tp>&, _Arg>::value, shared_ptr&
103 >::type;
104
105 public:
106
107 using element_type = typename __shared_ptr<_Tp>::element_type;
108
109#if __cplusplus201103L > 201402L
110# define __cpp_lib_shared_ptr_weak_type 201606
111 using weak_type = weak_ptr<_Tp>;
112#endif
113 /**
114 * @brief Construct an empty %shared_ptr.
115 * @post use_count()==0 && get()==0
116 */
117 constexpr shared_ptr() noexcept : __shared_ptr<_Tp>() { }
118
119 shared_ptr(const shared_ptr&) noexcept = default;
120
121 /**
122 * @brief Construct a %shared_ptr that owns the pointer @a __p.
123 * @param __p A pointer that is convertible to element_type*.
124 * @post use_count() == 1 && get() == __p
125 * @throw std::bad_alloc, in which case @c delete @a __p is called.
126 */
127 template<typename _Yp, typename = _Constructible<_Yp*>>
128 explicit
129 shared_ptr(_Yp* __p) : __shared_ptr<_Tp>(__p) { }
130
131 /**
132 * @brief Construct a %shared_ptr that owns the pointer @a __p
133 * and the deleter @a __d.
134 * @param __p A pointer.
135 * @param __d A deleter.
136 * @post use_count() == 1 && get() == __p
137 * @throw std::bad_alloc, in which case @a __d(__p) is called.
138 *
139 * Requirements: _Deleter's copy constructor and destructor must
140 * not throw
141 *
142 * __shared_ptr will release __p by calling __d(__p)
143 */
144 template<typename _Yp, typename _Deleter,
145 typename = _Constructible<_Yp*, _Deleter>>
146 shared_ptr(_Yp* __p, _Deleter __d)
147 : __shared_ptr<_Tp>(__p, std::move(__d)) { }
148
149 /**
150 * @brief Construct a %shared_ptr that owns a null pointer
151 * and the deleter @a __d.
152 * @param __p A null pointer constant.
153 * @param __d A deleter.
154 * @post use_count() == 1 && get() == __p
155 * @throw std::bad_alloc, in which case @a __d(__p) is called.
156 *
157 * Requirements: _Deleter's copy constructor and destructor must
158 * not throw
159 *
160 * The last owner will call __d(__p)
161 */
162 template<typename _Deleter>
163 shared_ptr(nullptr_t __p, _Deleter __d)
164 : __shared_ptr<_Tp>(__p, std::move(__d)) { }
165
166 /**
167 * @brief Construct a %shared_ptr that owns the pointer @a __p
168 * and the deleter @a __d.
169 * @param __p A pointer.
170 * @param __d A deleter.
171 * @param __a An allocator.
172 * @post use_count() == 1 && get() == __p
173 * @throw std::bad_alloc, in which case @a __d(__p) is called.
174 *
175 * Requirements: _Deleter's copy constructor and destructor must
176 * not throw _Alloc's copy constructor and destructor must not
177 * throw.
178 *
179 * __shared_ptr will release __p by calling __d(__p)
180 */
181 template<typename _Yp, typename _Deleter, typename _Alloc,
182 typename = _Constructible<_Yp*, _Deleter, _Alloc>>
183 shared_ptr(_Yp* __p, _Deleter __d, _Alloc __a)
184 : __shared_ptr<_Tp>(__p, std::move(__d), std::move(__a)) { }
185
186 /**
187 * @brief Construct a %shared_ptr that owns a null pointer
188 * and the deleter @a __d.
189 * @param __p A null pointer constant.
190 * @param __d A deleter.
191 * @param __a An allocator.
192 * @post use_count() == 1 && get() == __p
193 * @throw std::bad_alloc, in which case @a __d(__p) is called.
194 *
195 * Requirements: _Deleter's copy constructor and destructor must
196 * not throw _Alloc's copy constructor and destructor must not
197 * throw.
198 *
199 * The last owner will call __d(__p)
200 */
201 template<typename _Deleter, typename _Alloc>
202 shared_ptr(nullptr_t __p, _Deleter __d, _Alloc __a)
203 : __shared_ptr<_Tp>(__p, std::move(__d), std::move(__a)) { }
204
205 // Aliasing constructor
206
207 /**
208 * @brief Constructs a %shared_ptr instance that stores @a __p
209 * and shares ownership with @a __r.
210 * @param __r A %shared_ptr.
211 * @param __p A pointer that will remain valid while @a *__r is valid.
212 * @post get() == __p && use_count() == __r.use_count()
213 *
214 * This can be used to construct a @c shared_ptr to a sub-object
215 * of an object managed by an existing @c shared_ptr.
216 *
217 * @code
218 * shared_ptr< pair<int,int> > pii(new pair<int,int>());
219 * shared_ptr<int> pi(pii, &pii->first);
220 * assert(pii.use_count() == 2);
221 * @endcode
222 */
223 template<typename _Yp>
224 shared_ptr(const shared_ptr<_Yp>& __r, element_type* __p) noexcept
225 : __shared_ptr<_Tp>(__r, __p) { }
226
227 /**
228 * @brief If @a __r is empty, constructs an empty %shared_ptr;
229 * otherwise construct a %shared_ptr that shares ownership
230 * with @a __r.
231 * @param __r A %shared_ptr.
232 * @post get() == __r.get() && use_count() == __r.use_count()
233 */
234 template<typename _Yp,
235 typename = _Constructible<const shared_ptr<_Yp>&>>
236 shared_ptr(const shared_ptr<_Yp>& __r) noexcept
237 : __shared_ptr<_Tp>(__r) { }
238
239 /**
240 * @brief Move-constructs a %shared_ptr instance from @a __r.
241 * @param __r A %shared_ptr rvalue.
242 * @post *this contains the old value of @a __r, @a __r is empty.
243 */
244 shared_ptr(shared_ptr&& __r) noexcept
245 : __shared_ptr<_Tp>(std::move(__r)) { }
246
247 /**
248 * @brief Move-constructs a %shared_ptr instance from @a __r.
249 * @param __r A %shared_ptr rvalue.
250 * @post *this contains the old value of @a __r, @a __r is empty.
251 */
252 template<typename _Yp, typename = _Constructible<shared_ptr<_Yp>>>
253 shared_ptr(shared_ptr<_Yp>&& __r) noexcept
254 : __shared_ptr<_Tp>(std::move(__r)) { }
255
256 /**
257 * @brief Constructs a %shared_ptr that shares ownership with @a __r
258 * and stores a copy of the pointer stored in @a __r.
259 * @param __r A weak_ptr.
260 * @post use_count() == __r.use_count()
261 * @throw bad_weak_ptr when __r.expired(),
262 * in which case the constructor has no effect.
263 */
264 template<typename _Yp, typename = _Constructible<const weak_ptr<_Yp>&>>
265 explicit shared_ptr(const weak_ptr<_Yp>& __r)
266 : __shared_ptr<_Tp>(__r) { }
267
268#if _GLIBCXX_USE_DEPRECATED1
269 template<typename _Yp, typename = _Constructible<auto_ptr<_Yp>>>
270 shared_ptr(auto_ptr<_Yp>&& __r);
271#endif
272
273 // _GLIBCXX_RESOLVE_LIB_DEFECTS
274 // 2399. shared_ptr's constructor from unique_ptr should be constrained
275 template<typename _Yp, typename _Del,
276 typename = _Constructible<unique_ptr<_Yp, _Del>>>
277 shared_ptr(unique_ptr<_Yp, _Del>&& __r)
278 : __shared_ptr<_Tp>(std::move(__r)) { }
279
280#if __cplusplus201103L <= 201402L && _GLIBCXX_USE_DEPRECATED1
281 // This non-standard constructor exists to support conversions that
282 // were possible in C++11 and C++14 but are ill-formed in C++17.
283 // If an exception is thrown this constructor has no effect.
284 template<typename _Yp, typename _Del,
285 _Constructible<unique_ptr<_Yp, _Del>, __sp_array_delete>* = 0>
286 shared_ptr(unique_ptr<_Yp, _Del>&& __r)
287 : __shared_ptr<_Tp>(std::move(__r), __sp_array_delete()) { }
288#endif
289
290 /**
291 * @brief Construct an empty %shared_ptr.
292 * @post use_count() == 0 && get() == nullptr
293 */
294 constexpr shared_ptr(nullptr_t) noexcept : shared_ptr() { }
295
296 shared_ptr& operator=(const shared_ptr&) noexcept = default;
14
Calling defaulted copy assignment operator for '__shared_ptr'
29
Returning; memory was released
38
Calling defaulted copy assignment operator for '__shared_ptr'
297
298 template<typename _Yp>
299 _Assignable<const shared_ptr<_Yp>&>
300 operator=(const shared_ptr<_Yp>& __r) noexcept
301 {
302 this->__shared_ptr<_Tp>::operator=(__r);
303 return *this;
304 }
305
306#if _GLIBCXX_USE_DEPRECATED1
307 template<typename _Yp>
308 _Assignable<auto_ptr<_Yp>>
309 operator=(auto_ptr<_Yp>&& __r)
310 {
311 this->__shared_ptr<_Tp>::operator=(std::move(__r));
312 return *this;
313 }
314#endif
315
316 shared_ptr&
317 operator=(shared_ptr&& __r) noexcept
318 {
319 this->__shared_ptr<_Tp>::operator=(std::move(__r));
320 return *this;
321 }
322
323 template<class _Yp>
324 _Assignable<shared_ptr<_Yp>>
325 operator=(shared_ptr<_Yp>&& __r) noexcept
326 {
327 this->__shared_ptr<_Tp>::operator=(std::move(__r));
328 return *this;
329 }
330
331 template<typename _Yp, typename _Del>
332 _Assignable<unique_ptr<_Yp, _Del>>
333 operator=(unique_ptr<_Yp, _Del>&& __r)
334 {
335 this->__shared_ptr<_Tp>::operator=(std::move(__r));
336 return *this;
337 }
338
339 private:
340 // This constructor is non-standard, it is used by allocate_shared.
341 template<typename _Alloc, typename... _Args>
342 shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
343 _Args&&... __args)
344 : __shared_ptr<_Tp>(__tag, __a, std::forward<_Args>(__args)...)
345 { }
346
347 template<typename _Yp, typename _Alloc, typename... _Args>
348 friend shared_ptr<_Yp>
349 allocate_shared(const _Alloc& __a, _Args&&... __args);
350
351 // This constructor is non-standard, it is used by weak_ptr::lock().
352 shared_ptr(const weak_ptr<_Tp>& __r, std::nothrow_t)
353 : __shared_ptr<_Tp>(__r, std::nothrow) { }
354
355 friend class weak_ptr<_Tp>;
356 };
357
358#if __cpp_deduction_guides >= 201606
359 template<typename _Tp>
360 shared_ptr(weak_ptr<_Tp>) -> shared_ptr<_Tp>;
361 template<typename _Tp, typename _Del>
362 shared_ptr(unique_ptr<_Tp, _Del>) -> shared_ptr<_Tp>;
363#endif
364
365 // 20.7.2.2.7 shared_ptr comparisons
366 template<typename _Tp, typename _Up>
367 inline bool
368 operator==(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
369 { return __a.get() == __b.get(); }
370
371 template<typename _Tp>
372 inline bool
373 operator==(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
374 { return !__a; }
375
376 template<typename _Tp>
377 inline bool
378 operator==(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
379 { return !__a; }
380
381 template<typename _Tp, typename _Up>
382 inline bool
383 operator!=(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
384 { return __a.get() != __b.get(); }
385
386 template<typename _Tp>
387 inline bool
388 operator!=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
389 { return (bool)__a; }
390
391 template<typename _Tp>
392 inline bool
393 operator!=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
394 { return (bool)__a; }
395
396 template<typename _Tp, typename _Up>
397 inline bool
398 operator<(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
399 {
400 using _Tp_elt = typename shared_ptr<_Tp>::element_type;
401 using _Up_elt = typename shared_ptr<_Up>::element_type;
402 using _Vp = typename common_type<_Tp_elt*, _Up_elt*>::type;
403 return less<_Vp>()(__a.get(), __b.get());
404 }
405
406 template<typename _Tp>
407 inline bool
408 operator<(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
409 {
410 using _Tp_elt = typename shared_ptr<_Tp>::element_type;
411 return less<_Tp_elt*>()(__a.get(), nullptr);
412 }
413
414 template<typename _Tp>
415 inline bool
416 operator<(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
417 {
418 using _Tp_elt = typename shared_ptr<_Tp>::element_type;
419 return less<_Tp_elt*>()(nullptr, __a.get());
420 }
421
422 template<typename _Tp, typename _Up>
423 inline bool
424 operator<=(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
425 { return !(__b < __a); }
426
427 template<typename _Tp>
428 inline bool
429 operator<=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
430 { return !(nullptr < __a); }
431
432 template<typename _Tp>
433 inline bool
434 operator<=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
435 { return !(__a < nullptr); }
436
437 template<typename _Tp, typename _Up>
438 inline bool
439 operator>(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
440 { return (__b < __a); }
441
442 template<typename _Tp>
443 inline bool
444 operator>(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
445 { return nullptr < __a; }
446
447 template<typename _Tp>
448 inline bool
449 operator>(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
450 { return __a < nullptr; }
451
452 template<typename _Tp, typename _Up>
453 inline bool
454 operator>=(const shared_ptr<_Tp>& __a, const shared_ptr<_Up>& __b) noexcept
455 { return !(__a < __b); }
456
457 template<typename _Tp>
458 inline bool
459 operator>=(const shared_ptr<_Tp>& __a, nullptr_t) noexcept
460 { return !(__a < nullptr); }
461
462 template<typename _Tp>
463 inline bool
464 operator>=(nullptr_t, const shared_ptr<_Tp>& __a) noexcept
465 { return !(nullptr < __a); }
466
467 template<typename _Tp>
468 struct less<shared_ptr<_Tp>> : public _Sp_less<shared_ptr<_Tp>>
469 { };
470
471 // 20.7.2.2.8 shared_ptr specialized algorithms.
472 template<typename _Tp>
473 inline void
474 swap(shared_ptr<_Tp>& __a, shared_ptr<_Tp>& __b) noexcept
475 { __a.swap(__b); }
476
477 // 20.7.2.2.9 shared_ptr casts.
478 template<typename _Tp, typename _Up>
479 inline shared_ptr<_Tp>
480 static_pointer_cast(const shared_ptr<_Up>& __r) noexcept
481 {
482 using _Sp = shared_ptr<_Tp>;
483 return _Sp(__r, static_cast<typename _Sp::element_type*>(__r.get()));
484 }
485
486 template<typename _Tp, typename _Up>
487 inline shared_ptr<_Tp>
488 const_pointer_cast(const shared_ptr<_Up>& __r) noexcept
489 {
490 using _Sp = shared_ptr<_Tp>;
491 return _Sp(__r, const_cast<typename _Sp::element_type*>(__r.get()));
492 }
493
494 template<typename _Tp, typename _Up>
495 inline shared_ptr<_Tp>
496 dynamic_pointer_cast(const shared_ptr<_Up>& __r) noexcept
497 {
498 using _Sp = shared_ptr<_Tp>;
499 if (auto* __p = dynamic_cast<typename _Sp::element_type*>(__r.get()))
500 return _Sp(__r, __p);
501 return _Sp();
502 }
503
504#if __cplusplus201103L > 201402L
505 template<typename _Tp, typename _Up>
506 inline shared_ptr<_Tp>
507 reinterpret_pointer_cast(const shared_ptr<_Up>& __r) noexcept
508 {
509 using _Sp = shared_ptr<_Tp>;
510 return _Sp(__r, reinterpret_cast<typename _Sp::element_type*>(__r.get()));
511 }
512#endif
513
514 /**
515 * @brief A smart pointer with weak semantics.
516 *
517 * With forwarding constructors and assignment operators.
518 */
519 template<typename _Tp>
520 class weak_ptr : public __weak_ptr<_Tp>
521 {
522 template<typename _Arg>
523 using _Constructible = typename enable_if<
524 is_constructible<__weak_ptr<_Tp>, _Arg>::value
525 >::type;
526
527 template<typename _Arg>
528 using _Assignable = typename enable_if<
529 is_assignable<__weak_ptr<_Tp>&, _Arg>::value, weak_ptr&
530 >::type;
531
532 public:
533 constexpr weak_ptr() noexcept = default;
534
535 template<typename _Yp,
536 typename = _Constructible<const shared_ptr<_Yp>&>>
537 weak_ptr(const shared_ptr<_Yp>& __r) noexcept
538 : __weak_ptr<_Tp>(__r) { }
539
540 weak_ptr(const weak_ptr&) noexcept = default;
541
542 template<typename _Yp, typename = _Constructible<const weak_ptr<_Yp>&>>
543 weak_ptr(const weak_ptr<_Yp>& __r) noexcept
544 : __weak_ptr<_Tp>(__r) { }
545
546 weak_ptr(weak_ptr&&) noexcept = default;
547
548 template<typename _Yp, typename = _Constructible<weak_ptr<_Yp>>>
549 weak_ptr(weak_ptr<_Yp>&& __r) noexcept
550 : __weak_ptr<_Tp>(std::move(__r)) { }
551
552 weak_ptr&
553 operator=(const weak_ptr& __r) noexcept = default;
554
555 template<typename _Yp>
556 _Assignable<const weak_ptr<_Yp>&>
557 operator=(const weak_ptr<_Yp>& __r) noexcept
558 {
559 this->__weak_ptr<_Tp>::operator=(__r);
560 return *this;
561 }
562
563 template<typename _Yp>
564 _Assignable<const shared_ptr<_Yp>&>
565 operator=(const shared_ptr<_Yp>& __r) noexcept
566 {
567 this->__weak_ptr<_Tp>::operator=(__r);
568 return *this;
569 }
570
571 weak_ptr&
572 operator=(weak_ptr&& __r) noexcept = default;
573
574 template<typename _Yp>
575 _Assignable<weak_ptr<_Yp>>
576 operator=(weak_ptr<_Yp>&& __r) noexcept
577 {
578 this->__weak_ptr<_Tp>::operator=(std::move(__r));
579 return *this;
580 }
581
582 shared_ptr<_Tp>
583 lock() const noexcept
584 { return shared_ptr<_Tp>(*this, std::nothrow); }
585 };
586
587#if __cpp_deduction_guides >= 201606
588 template<typename _Tp>
589 weak_ptr(shared_ptr<_Tp>) -> weak_ptr<_Tp>;
590#endif
591
592 // 20.7.2.3.6 weak_ptr specialized algorithms.
593 template<typename _Tp>
594 inline void
595 swap(weak_ptr<_Tp>& __a, weak_ptr<_Tp>& __b) noexcept
596 { __a.swap(__b); }
597
598
599 /// Primary template owner_less
600 template<typename _Tp = void>
601 struct owner_less;
602
603 /// Void specialization of owner_less
604 template<>
605 struct owner_less<void> : _Sp_owner_less<void, void>
606 { };
607
608 /// Partial specialization of owner_less for shared_ptr.
609 template<typename _Tp>
610 struct owner_less<shared_ptr<_Tp>>
611 : public _Sp_owner_less<shared_ptr<_Tp>, weak_ptr<_Tp>>
612 { };
613
614 /// Partial specialization of owner_less for weak_ptr.
615 template<typename _Tp>
616 struct owner_less<weak_ptr<_Tp>>
617 : public _Sp_owner_less<weak_ptr<_Tp>, shared_ptr<_Tp>>
618 { };
619
620 /**
621 * @brief Base class allowing use of member function shared_from_this.
622 */
623 template<typename _Tp>
624 class enable_shared_from_this
625 {
626 protected:
627 constexpr enable_shared_from_this() noexcept { }
628
629 enable_shared_from_this(const enable_shared_from_this&) noexcept { }
630
631 enable_shared_from_this&
632 operator=(const enable_shared_from_this&) noexcept
633 { return *this; }
634
635 ~enable_shared_from_this() { }
636
637 public:
638 shared_ptr<_Tp>
639 shared_from_this()
640 { return shared_ptr<_Tp>(this->_M_weak_this); }
641
642 shared_ptr<const _Tp>
643 shared_from_this() const
644 { return shared_ptr<const _Tp>(this->_M_weak_this); }
645
646#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
647#define __cpp_lib_enable_shared_from_this 201603
648 weak_ptr<_Tp>
649 weak_from_this() noexcept
650 { return this->_M_weak_this; }
651
652 weak_ptr<const _Tp>
653 weak_from_this() const noexcept
654 { return this->_M_weak_this; }
655#endif
656
657 private:
658 template<typename _Tp1>
659 void
660 _M_weak_assign(_Tp1* __p, const __shared_count<>& __n) const noexcept
661 { _M_weak_this._M_assign(__p, __n); }
662
663 // Found by ADL when this is an associated class.
664 friend const enable_shared_from_this*
665 __enable_shared_from_this_base(const __shared_count<>&,
666 const enable_shared_from_this* __p)
667 { return __p; }
668
669 template<typename, _Lock_policy>
670 friend class __shared_ptr;
671
672 mutable weak_ptr<_Tp> _M_weak_this;
673 };
674
675 /**
676 * @brief Create an object that is owned by a shared_ptr.
677 * @param __a An allocator.
678 * @param __args Arguments for the @a _Tp object's constructor.
679 * @return A shared_ptr that owns the newly created object.
680 * @throw An exception thrown from @a _Alloc::allocate or from the
681 * constructor of @a _Tp.
682 *
683 * A copy of @a __a will be used to allocate memory for the shared_ptr
684 * and the new object.
685 */
686 template<typename _Tp, typename _Alloc, typename... _Args>
687 inline shared_ptr<_Tp>
688 allocate_shared(const _Alloc& __a, _Args&&... __args)
689 {
690 return shared_ptr<_Tp>(_Sp_make_shared_tag(), __a,
691 std::forward<_Args>(__args)...);
692 }
693
694 /**
695 * @brief Create an object that is owned by a shared_ptr.
696 * @param __args Arguments for the @a _Tp object's constructor.
697 * @return A shared_ptr that owns the newly created object.
698 * @throw std::bad_alloc, or an exception thrown from the
699 * constructor of @a _Tp.
700 */
701 template<typename _Tp, typename... _Args>
702 inline shared_ptr<_Tp>
703 make_shared(_Args&&... __args)
704 {
705 typedef typename std::remove_const<_Tp>::type _Tp_nc;
706 return std::allocate_shared<_Tp>(std::allocator<_Tp_nc>(),
707 std::forward<_Args>(__args)...);
708 }
709
710 /// std::hash specialization for shared_ptr.
711 template<typename _Tp>
712 struct hash<shared_ptr<_Tp>>
713 : public __hash_base<size_t, shared_ptr<_Tp>>
714 {
715 size_t
716 operator()(const shared_ptr<_Tp>& __s) const noexcept
717 {
718 return std::hash<typename shared_ptr<_Tp>::element_type*>()(__s.get());
719 }
720 };
721
722 // @} group pointer_abstractions
723
724_GLIBCXX_END_NAMESPACE_VERSION
725} // namespace
726
727#endif // _SHARED_PTR_H

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/bits/shared_ptr_base.h

1// shared_ptr and weak_ptr implementation details -*- C++ -*-
2
3// Copyright (C) 2007-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25// GCC Note: Based on files from version 1.32.0 of the Boost library.
26
27// shared_count.hpp
28// Copyright (c) 2001, 2002, 2003 Peter Dimov and Multi Media Ltd.
29
30// shared_ptr.hpp
31// Copyright (C) 1998, 1999 Greg Colvin and Beman Dawes.
32// Copyright (C) 2001, 2002, 2003 Peter Dimov
33
34// weak_ptr.hpp
35// Copyright (C) 2001, 2002, 2003 Peter Dimov
36
37// enable_shared_from_this.hpp
38// Copyright (C) 2002 Peter Dimov
39
40// Distributed under the Boost Software License, Version 1.0. (See
41// accompanying file LICENSE_1_0.txt or copy at
42// http://www.boost.org/LICENSE_1_0.txt)
43
44/** @file bits/shared_ptr_base.h
45 * This is an internal header file, included by other library headers.
46 * Do not attempt to use it directly. @headername{memory}
47 */
48
49#ifndef _SHARED_PTR_BASE_H1
50#define _SHARED_PTR_BASE_H1 1
51
52#if __cpp_rtti199711
53# include <typeinfo>
54#endif
55#include <bits/allocated_ptr.h>
56#include <bits/refwrap.h>
57#include <bits/stl_function.h>
58#include <ext/aligned_buffer.h>
59
60namespace std _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
61{
62_GLIBCXX_BEGIN_NAMESPACE_VERSION
63
64#if _GLIBCXX_USE_DEPRECATED1
65 template<typename> class auto_ptr;
66#endif
67
68 /**
69 * @brief Exception possibly thrown by @c shared_ptr.
70 * @ingroup exceptions
71 */
72 class bad_weak_ptr : public std::exception
73 {
74 public:
75 virtual char const* what() const noexcept;
76
77 virtual ~bad_weak_ptr() noexcept;
78 };
79
80 // Substitute for bad_weak_ptr object in the case of -fno-exceptions.
81 inline void
82 __throw_bad_weak_ptr()
83 { _GLIBCXX_THROW_OR_ABORT(bad_weak_ptr())(__builtin_abort()); }
84
85 using __gnu_cxx::_Lock_policy;
86 using __gnu_cxx::__default_lock_policy;
87 using __gnu_cxx::_S_single;
88 using __gnu_cxx::_S_mutex;
89 using __gnu_cxx::_S_atomic;
90
91 // Empty helper class except when the template argument is _S_mutex.
92 template<_Lock_policy _Lp>
93 class _Mutex_base
94 {
95 protected:
96 // The atomic policy uses fully-fenced builtins, single doesn't care.
97 enum { _S_need_barriers = 0 };
98 };
99
100 template<>
101 class _Mutex_base<_S_mutex>
102 : public __gnu_cxx::__mutex
103 {
104 protected:
105 // This policy is used when atomic builtins are not available.
106 // The replacement atomic operations might not have the necessary
107 // memory barriers.
108 enum { _S_need_barriers = 1 };
109 };
110
111 template<_Lock_policy _Lp = __default_lock_policy>
112 class _Sp_counted_base
113 : public _Mutex_base<_Lp>
114 {
115 public:
116 _Sp_counted_base() noexcept
117 : _M_use_count(1), _M_weak_count(1) { }
118
119 virtual
120 ~_Sp_counted_base() noexcept
121 { }
122
123 // Called when _M_use_count drops to zero, to release the resources
124 // managed by *this.
125 virtual void
126 _M_dispose() noexcept = 0;
127
128 // Called when _M_weak_count drops to zero.
129 virtual void
130 _M_destroy() noexcept
131 { delete this; }
25
Memory is released
132
133 virtual void*
134 _M_get_deleter(const std::type_info&) noexcept = 0;
135
136 void
137 _M_add_ref_copy()
138 { __gnu_cxx::__atomic_add_dispatch(&_M_use_count, 1); }
139
140 void
141 _M_add_ref_lock();
142
143 bool
144 _M_add_ref_lock_nothrow();
145
146 void
147 _M_release() noexcept
148 {
149 // Be race-detector-friendly. For more info see bits/c++config.
150 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_use_count);
151 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, -1) == 1)
20
Assuming the condition is true
21
Taking true branch
45
Calling '__exchange_and_add_dispatch'
152 {
153 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_use_count);
154 _M_dispose();
155 // There must be a memory barrier between dispose() and destroy()
156 // to ensure that the effects of dispose() are observed in the
157 // thread that runs destroy().
158 // See http://gcc.gnu.org/ml/libstdc++/2005-11/msg00136.html
159 if (_Mutex_base<_Lp>::_S_need_barriers)
22
Taking false branch
160 {
161 __atomic_thread_fence (__ATOMIC_ACQ_REL4);
162 }
163
164 // Be race-detector-friendly. For more info see bits/c++config.
165 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
166 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count,
23
Taking true branch
167 -1) == 1)
168 {
169 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
170 _M_destroy();
24
Calling '_Sp_counted_base::_M_destroy'
26
Returning; memory was released
171 }
172 }
173 }
174
175 void
176 _M_weak_add_ref() noexcept
177 { __gnu_cxx::__atomic_add_dispatch(&_M_weak_count, 1); }
178
179 void
180 _M_weak_release() noexcept
181 {
182 // Be race-detector-friendly. For more info see bits/c++config.
183 _GLIBCXX_SYNCHRONIZATION_HAPPENS_BEFORE(&_M_weak_count);
184 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_weak_count, -1) == 1)
185 {
186 _GLIBCXX_SYNCHRONIZATION_HAPPENS_AFTER(&_M_weak_count);
187 if (_Mutex_base<_Lp>::_S_need_barriers)
188 {
189 // See _M_release(),
190 // destroy() must observe results of dispose()
191 __atomic_thread_fence (__ATOMIC_ACQ_REL4);
192 }
193 _M_destroy();
194 }
195 }
196
197 long
198 _M_get_use_count() const noexcept
199 {
200 // No memory barrier is used here so there is no synchronization
201 // with other threads.
202 return __atomic_load_n(&_M_use_count, __ATOMIC_RELAXED0);
203 }
204
205 private:
206 _Sp_counted_base(_Sp_counted_base const&) = delete;
207 _Sp_counted_base& operator=(_Sp_counted_base const&) = delete;
208
209 _Atomic_word _M_use_count; // #shared
210 _Atomic_word _M_weak_count; // #weak + (#shared != 0)
211 };
212
213 template<>
214 inline void
215 _Sp_counted_base<_S_single>::
216 _M_add_ref_lock()
217 {
218 if (_M_use_count == 0)
219 __throw_bad_weak_ptr();
220 ++_M_use_count;
221 }
222
223 template<>
224 inline void
225 _Sp_counted_base<_S_mutex>::
226 _M_add_ref_lock()
227 {
228 __gnu_cxx::__scoped_lock sentry(*this);
229 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
230 {
231 _M_use_count = 0;
232 __throw_bad_weak_ptr();
233 }
234 }
235
236 template<>
237 inline void
238 _Sp_counted_base<_S_atomic>::
239 _M_add_ref_lock()
240 {
241 // Perform lock-free add-if-not-zero operation.
242 _Atomic_word __count = _M_get_use_count();
243 do
244 {
245 if (__count == 0)
246 __throw_bad_weak_ptr();
247 // Replace the current counter value with the old value + 1, as
248 // long as it's not changed meanwhile.
249 }
250 while (!__atomic_compare_exchange_n(&_M_use_count, &__count, __count + 1,
251 true, __ATOMIC_ACQ_REL4,
252 __ATOMIC_RELAXED0));
253 }
254
255 template<>
256 inline bool
257 _Sp_counted_base<_S_single>::
258 _M_add_ref_lock_nothrow()
259 {
260 if (_M_use_count == 0)
261 return false;
262 ++_M_use_count;
263 return true;
264 }
265
266 template<>
267 inline bool
268 _Sp_counted_base<_S_mutex>::
269 _M_add_ref_lock_nothrow()
270 {
271 __gnu_cxx::__scoped_lock sentry(*this);
272 if (__gnu_cxx::__exchange_and_add_dispatch(&_M_use_count, 1) == 0)
273 {
274 _M_use_count = 0;
275 return false;
276 }
277 return true;
278 }
279
280 template<>
281 inline bool
282 _Sp_counted_base<_S_atomic>::
283 _M_add_ref_lock_nothrow()
284 {
285 // Perform lock-free add-if-not-zero operation.
286 _Atomic_word __count = _M_get_use_count();
287 do
288 {
289 if (__count == 0)
290 return false;
291 // Replace the current counter value with the old value + 1, as
292 // long as it's not changed meanwhile.
293 }
294 while (!__atomic_compare_exchange_n(&_M_use_count, &__count, __count + 1,
295 true, __ATOMIC_ACQ_REL4,
296 __ATOMIC_RELAXED0));
297 return true;
298 }
299
300 template<>
301 inline void
302 _Sp_counted_base<_S_single>::_M_add_ref_copy()
303 { ++_M_use_count; }
304
305 template<>
306 inline void
307 _Sp_counted_base<_S_single>::_M_release() noexcept
308 {
309 if (--_M_use_count == 0)
310 {
311 _M_dispose();
312 if (--_M_weak_count == 0)
313 _M_destroy();
314 }
315 }
316
317 template<>
318 inline void
319 _Sp_counted_base<_S_single>::_M_weak_add_ref() noexcept
320 { ++_M_weak_count; }
321
322 template<>
323 inline void
324 _Sp_counted_base<_S_single>::_M_weak_release() noexcept
325 {
326 if (--_M_weak_count == 0)
327 _M_destroy();
328 }
329
330 template<>
331 inline long
332 _Sp_counted_base<_S_single>::_M_get_use_count() const noexcept
333 { return _M_use_count; }
334
335
336 // Forward declarations.
337 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
338 class __shared_ptr;
339
340 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
341 class __weak_ptr;
342
343 template<typename _Tp, _Lock_policy _Lp = __default_lock_policy>
344 class __enable_shared_from_this;
345
346 template<typename _Tp>
347 class shared_ptr;
348
349 template<typename _Tp>
350 class weak_ptr;
351
352 template<typename _Tp>
353 struct owner_less;
354
355 template<typename _Tp>
356 class enable_shared_from_this;
357
358 template<_Lock_policy _Lp = __default_lock_policy>
359 class __weak_count;
360
361 template<_Lock_policy _Lp = __default_lock_policy>
362 class __shared_count;
363
364
365 // Counted ptr with no deleter or allocator support
366 template<typename _Ptr, _Lock_policy _Lp>
367 class _Sp_counted_ptr final : public _Sp_counted_base<_Lp>
368 {
369 public:
370 explicit
371 _Sp_counted_ptr(_Ptr __p) noexcept
372 : _M_ptr(__p) { }
373
374 virtual void
375 _M_dispose() noexcept
376 { delete _M_ptr; }
377
378 virtual void
379 _M_destroy() noexcept
380 { delete this; }
381
382 virtual void*
383 _M_get_deleter(const std::type_info&) noexcept
384 { return nullptr; }
385
386 _Sp_counted_ptr(const _Sp_counted_ptr&) = delete;
387 _Sp_counted_ptr& operator=(const _Sp_counted_ptr&) = delete;
388
389 private:
390 _Ptr _M_ptr;
391 };
392
393 template<>
394 inline void
395 _Sp_counted_ptr<nullptr_t, _S_single>::_M_dispose() noexcept { }
396
397 template<>
398 inline void
399 _Sp_counted_ptr<nullptr_t, _S_mutex>::_M_dispose() noexcept { }
400
401 template<>
402 inline void
403 _Sp_counted_ptr<nullptr_t, _S_atomic>::_M_dispose() noexcept { }
404
405 template<int _Nm, typename _Tp,
406 bool __use_ebo = !__is_final(_Tp) && __is_empty(_Tp)>
407 struct _Sp_ebo_helper;
408
409 /// Specialization using EBO.
410 template<int _Nm, typename _Tp>
411 struct _Sp_ebo_helper<_Nm, _Tp, true> : private _Tp
412 {
413 explicit _Sp_ebo_helper(const _Tp& __tp) : _Tp(__tp) { }
414 explicit _Sp_ebo_helper(_Tp&& __tp) : _Tp(std::move(__tp)) { }
415
416 static _Tp&
417 _S_get(_Sp_ebo_helper& __eboh) { return static_cast<_Tp&>(__eboh); }
418 };
419
420 /// Specialization not using EBO.
421 template<int _Nm, typename _Tp>
422 struct _Sp_ebo_helper<_Nm, _Tp, false>
423 {
424 explicit _Sp_ebo_helper(const _Tp& __tp) : _M_tp(__tp) { }
425 explicit _Sp_ebo_helper(_Tp&& __tp) : _M_tp(std::move(__tp)) { }
426
427 static _Tp&
428 _S_get(_Sp_ebo_helper& __eboh)
429 { return __eboh._M_tp; }
430
431 private:
432 _Tp _M_tp;
433 };
434
435 // Support for custom deleter and/or allocator
436 template<typename _Ptr, typename _Deleter, typename _Alloc, _Lock_policy _Lp>
437 class _Sp_counted_deleter final : public _Sp_counted_base<_Lp>
438 {
439 class _Impl : _Sp_ebo_helper<0, _Deleter>, _Sp_ebo_helper<1, _Alloc>
440 {
441 typedef _Sp_ebo_helper<0, _Deleter> _Del_base;
442 typedef _Sp_ebo_helper<1, _Alloc> _Alloc_base;
443
444 public:
445 _Impl(_Ptr __p, _Deleter __d, const _Alloc& __a) noexcept
446 : _M_ptr(__p), _Del_base(std::move(__d)), _Alloc_base(__a)
447 { }
448
449 _Deleter& _M_del() noexcept { return _Del_base::_S_get(*this); }
450 _Alloc& _M_alloc() noexcept { return _Alloc_base::_S_get(*this); }
451
452 _Ptr _M_ptr;
453 };
454
455 public:
456 using __allocator_type = __alloc_rebind<_Alloc, _Sp_counted_deleter>;
457
458 // __d(__p) must not throw.
459 _Sp_counted_deleter(_Ptr __p, _Deleter __d) noexcept
460 : _M_impl(__p, std::move(__d), _Alloc()) { }
461
462 // __d(__p) must not throw.
463 _Sp_counted_deleter(_Ptr __p, _Deleter __d, const _Alloc& __a) noexcept
464 : _M_impl(__p, std::move(__d), __a) { }
465
466 ~_Sp_counted_deleter() noexcept { }
467
468 virtual void
469 _M_dispose() noexcept
470 { _M_impl._M_del()(_M_impl._M_ptr); }
471
472 virtual void
473 _M_destroy() noexcept
474 {
475 __allocator_type __a(_M_impl._M_alloc());
476 __allocated_ptr<__allocator_type> __guard_ptr{ __a, this };
477 this->~_Sp_counted_deleter();
478 }
479
480 virtual void*
481 _M_get_deleter(const std::type_info& __ti) noexcept
482 {
483#if __cpp_rtti199711
484 // _GLIBCXX_RESOLVE_LIB_DEFECTS
485 // 2400. shared_ptr's get_deleter() should use addressof()
486 return __ti == typeid(_Deleter)
487 ? std::__addressof(_M_impl._M_del())
488 : nullptr;
489#else
490 return nullptr;
491#endif
492 }
493
494 private:
495 _Impl _M_impl;
496 };
497
498 // helpers for make_shared / allocate_shared
499
500 struct _Sp_make_shared_tag { };
501
502 template<typename _Tp, typename _Alloc, _Lock_policy _Lp>
503 class _Sp_counted_ptr_inplace final : public _Sp_counted_base<_Lp>
504 {
505 class _Impl : _Sp_ebo_helper<0, _Alloc>
506 {
507 typedef _Sp_ebo_helper<0, _Alloc> _A_base;
508
509 public:
510 explicit _Impl(_Alloc __a) noexcept : _A_base(__a) { }
511
512 _Alloc& _M_alloc() noexcept { return _A_base::_S_get(*this); }
513
514 __gnu_cxx::__aligned_buffer<_Tp> _M_storage;
515 };
516
517 public:
518 using __allocator_type = __alloc_rebind<_Alloc, _Sp_counted_ptr_inplace>;
519
520 template<typename... _Args>
521 _Sp_counted_ptr_inplace(_Alloc __a, _Args&&... __args)
522 : _M_impl(__a)
523 {
524 // _GLIBCXX_RESOLVE_LIB_DEFECTS
525 // 2070. allocate_shared should use allocator_traits<A>::construct
526 allocator_traits<_Alloc>::construct(__a, _M_ptr(),
527 std::forward<_Args>(__args)...); // might throw
528 }
529
530 ~_Sp_counted_ptr_inplace() noexcept { }
531
532 virtual void
533 _M_dispose() noexcept
534 {
535 allocator_traits<_Alloc>::destroy(_M_impl._M_alloc(), _M_ptr());
536 }
537
538 // Override because the allocator needs to know the dynamic type
539 virtual void
540 _M_destroy() noexcept
541 {
542 __allocator_type __a(_M_impl._M_alloc());
543 __allocated_ptr<__allocator_type> __guard_ptr{ __a, this };
544 this->~_Sp_counted_ptr_inplace();
545 }
546
547 // Sneaky trick so __shared_ptr can get the managed pointer
548 virtual void*
549 _M_get_deleter(const std::type_info& __ti) noexcept
550 {
551#if __cpp_rtti199711
552 if (__ti == typeid(_Sp_make_shared_tag))
553 return const_cast<typename remove_cv<_Tp>::type*>(_M_ptr());
554#endif
555 return nullptr;
556 }
557
558 private:
559 _Tp* _M_ptr() noexcept { return _M_impl._M_storage._M_ptr(); }
560
561 _Impl _M_impl;
562 };
563
564 // The default deleter for shared_ptr<T[]> and shared_ptr<T[N]>.
565 struct __sp_array_delete
566 {
567 template<typename _Yp>
568 void operator()(_Yp* __p) const { delete[] __p; }
569 };
570
571 template<_Lock_policy _Lp>
572 class __shared_count
573 {
574 public:
575 constexpr __shared_count() noexcept : _M_pi(0)
576 { }
577
578 template<typename _Ptr>
579 explicit
580 __shared_count(_Ptr __p) : _M_pi(0)
581 {
582 __tryif (true)
583 {
584 _M_pi = new _Sp_counted_ptr<_Ptr, _Lp>(__p);
585 }
586 __catch(...)if (false)
587 {
588 delete __p;
589 __throw_exception_again;
590 }
591 }
592
593 template<typename _Ptr>
594 __shared_count(_Ptr __p, /* is_array = */ false_type)
595 : __shared_count(__p)
596 { }
597
598 template<typename _Ptr>
599 __shared_count(_Ptr __p, /* is_array = */ true_type)
600 : __shared_count(__p, __sp_array_delete{}, allocator<void>())
601 { }
602
603 template<typename _Ptr, typename _Deleter>
604 __shared_count(_Ptr __p, _Deleter __d)
605 : __shared_count(__p, std::move(__d), allocator<void>())
606 { }
607
608 template<typename _Ptr, typename _Deleter, typename _Alloc>
609 __shared_count(_Ptr __p, _Deleter __d, _Alloc __a) : _M_pi(0)
610 {
611 typedef _Sp_counted_deleter<_Ptr, _Deleter, _Alloc, _Lp> _Sp_cd_type;
612 __tryif (true)
613 {
614 typename _Sp_cd_type::__allocator_type __a2(__a);
615 auto __guard = std::__allocate_guarded(__a2);
616 _Sp_cd_type* __mem = __guard.get();
617 ::new (__mem) _Sp_cd_type(__p, std::move(__d), std::move(__a));
618 _M_pi = __mem;
619 __guard = nullptr;
620 }
621 __catch(...)if (false)
622 {
623 __d(__p); // Call _Deleter on __p.
624 __throw_exception_again;
625 }
626 }
627
628 template<typename _Tp, typename _Alloc, typename... _Args>
629 __shared_count(_Sp_make_shared_tag, _Tp*, const _Alloc& __a,
630 _Args&&... __args)
631 : _M_pi(0)
632 {
633 typedef _Sp_counted_ptr_inplace<_Tp, _Alloc, _Lp> _Sp_cp_type;
634 typename _Sp_cp_type::__allocator_type __a2(__a);
635 auto __guard = std::__allocate_guarded(__a2);
636 _Sp_cp_type* __mem = __guard.get();
637 ::new (__mem) _Sp_cp_type(std::move(__a),
638 std::forward<_Args>(__args)...);
639 _M_pi = __mem;
640 __guard = nullptr;
641 }
642
643#if _GLIBCXX_USE_DEPRECATED1
644 // Special case for auto_ptr<_Tp> to provide the strong guarantee.
645 template<typename _Tp>
646 explicit
647 __shared_count(std::auto_ptr<_Tp>&& __r);
648#endif
649
650 // Special case for unique_ptr<_Tp,_Del> to provide the strong guarantee.
651 template<typename _Tp, typename _Del>
652 explicit
653 __shared_count(std::unique_ptr<_Tp, _Del>&& __r) : _M_pi(0)
654 {
655 // _GLIBCXX_RESOLVE_LIB_DEFECTS
656 // 2415. Inconsistency between unique_ptr and shared_ptr
657 if (__r.get() == nullptr)
658 return;
659
660 using _Ptr = typename unique_ptr<_Tp, _Del>::pointer;
661 using _Del2 = typename conditional<is_reference<_Del>::value,
662 reference_wrapper<typename remove_reference<_Del>::type>,
663 _Del>::type;
664 using _Sp_cd_type
665 = _Sp_counted_deleter<_Ptr, _Del2, allocator<void>, _Lp>;
666 using _Alloc = allocator<_Sp_cd_type>;
667 using _Alloc_traits = allocator_traits<_Alloc>;
668 _Alloc __a;
669 _Sp_cd_type* __mem = _Alloc_traits::allocate(__a, 1);
670 _Alloc_traits::construct(__a, __mem, __r.release(),
671 __r.get_deleter()); // non-throwing
672 _M_pi = __mem;
673 }
674
675 // Throw bad_weak_ptr when __r._M_get_use_count() == 0.
676 explicit __shared_count(const __weak_count<_Lp>& __r);
677
678 // Does not throw if __r._M_get_use_count() == 0, caller must check.
679 explicit __shared_count(const __weak_count<_Lp>& __r, std::nothrow_t);
680
681 ~__shared_count() noexcept
682 {
683 if (_M_pi != nullptr)
684 _M_pi->_M_release();
685 }
686
687 __shared_count(const __shared_count& __r) noexcept
688 : _M_pi(__r._M_pi)
689 {
690 if (_M_pi != 0)
691 _M_pi->_M_add_ref_copy();
692 }
693
694 __shared_count&
695 operator=(const __shared_count& __r) noexcept
696 {
697 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
698 if (__tmp != _M_pi)
16
Taking true branch
40
Taking true branch
699 {
700 if (__tmp != 0)
17
Taking true branch
41
Assuming '__tmp' is equal to null
42
Taking false branch
701 __tmp->_M_add_ref_copy();
702 if (_M_pi != 0)
18
Taking true branch
43
Taking true branch
703 _M_pi->_M_release();
19
Calling '_Sp_counted_base::_M_release'
27
Returning; memory was released
44
Calling '_Sp_counted_base::_M_release'
704 _M_pi = __tmp;
705 }
706 return *this;
707 }
708
709 void
710 _M_swap(__shared_count& __r) noexcept
711 {
712 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
713 __r._M_pi = _M_pi;
714 _M_pi = __tmp;
715 }
716
717 long
718 _M_get_use_count() const noexcept
719 { return _M_pi != 0 ? _M_pi->_M_get_use_count() : 0; }
720
721 bool
722 _M_unique() const noexcept
723 { return this->_M_get_use_count() == 1; }
724
725 void*
726 _M_get_deleter(const std::type_info& __ti) const noexcept
727 { return _M_pi ? _M_pi->_M_get_deleter(__ti) : nullptr; }
728
729 bool
730 _M_less(const __shared_count& __rhs) const noexcept
731 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
732
733 bool
734 _M_less(const __weak_count<_Lp>& __rhs) const noexcept
735 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
736
737 // Friend function injected into enclosing namespace and found by ADL
738 friend inline bool
739 operator==(const __shared_count& __a, const __shared_count& __b) noexcept
740 { return __a._M_pi == __b._M_pi; }
741
742 private:
743 friend class __weak_count<_Lp>;
744
745 _Sp_counted_base<_Lp>* _M_pi;
746 };
747
748
749 template<_Lock_policy _Lp>
750 class __weak_count
751 {
752 public:
753 constexpr __weak_count() noexcept : _M_pi(nullptr)
754 { }
755
756 __weak_count(const __shared_count<_Lp>& __r) noexcept
757 : _M_pi(__r._M_pi)
758 {
759 if (_M_pi != nullptr)
760 _M_pi->_M_weak_add_ref();
761 }
762
763 __weak_count(const __weak_count& __r) noexcept
764 : _M_pi(__r._M_pi)
765 {
766 if (_M_pi != nullptr)
767 _M_pi->_M_weak_add_ref();
768 }
769
770 __weak_count(__weak_count&& __r) noexcept
771 : _M_pi(__r._M_pi)
772 { __r._M_pi = nullptr; }
773
774 ~__weak_count() noexcept
775 {
776 if (_M_pi != nullptr)
777 _M_pi->_M_weak_release();
778 }
779
780 __weak_count&
781 operator=(const __shared_count<_Lp>& __r) noexcept
782 {
783 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
784 if (__tmp != nullptr)
785 __tmp->_M_weak_add_ref();
786 if (_M_pi != nullptr)
787 _M_pi->_M_weak_release();
788 _M_pi = __tmp;
789 return *this;
790 }
791
792 __weak_count&
793 operator=(const __weak_count& __r) noexcept
794 {
795 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
796 if (__tmp != nullptr)
797 __tmp->_M_weak_add_ref();
798 if (_M_pi != nullptr)
799 _M_pi->_M_weak_release();
800 _M_pi = __tmp;
801 return *this;
802 }
803
804 __weak_count&
805 operator=(__weak_count&& __r) noexcept
806 {
807 if (_M_pi != nullptr)
808 _M_pi->_M_weak_release();
809 _M_pi = __r._M_pi;
810 __r._M_pi = nullptr;
811 return *this;
812 }
813
814 void
815 _M_swap(__weak_count& __r) noexcept
816 {
817 _Sp_counted_base<_Lp>* __tmp = __r._M_pi;
818 __r._M_pi = _M_pi;
819 _M_pi = __tmp;
820 }
821
822 long
823 _M_get_use_count() const noexcept
824 { return _M_pi != nullptr ? _M_pi->_M_get_use_count() : 0; }
825
826 bool
827 _M_less(const __weak_count& __rhs) const noexcept
828 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
829
830 bool
831 _M_less(const __shared_count<_Lp>& __rhs) const noexcept
832 { return std::less<_Sp_counted_base<_Lp>*>()(this->_M_pi, __rhs._M_pi); }
833
834 // Friend function injected into enclosing namespace and found by ADL
835 friend inline bool
836 operator==(const __weak_count& __a, const __weak_count& __b) noexcept
837 { return __a._M_pi == __b._M_pi; }
838
839 private:
840 friend class __shared_count<_Lp>;
841
842 _Sp_counted_base<_Lp>* _M_pi;
843 };
844
845 // Now that __weak_count is defined we can define this constructor:
846 template<_Lock_policy _Lp>
847 inline
848 __shared_count<_Lp>::__shared_count(const __weak_count<_Lp>& __r)
849 : _M_pi(__r._M_pi)
850 {
851 if (_M_pi != nullptr)
852 _M_pi->_M_add_ref_lock();
853 else
854 __throw_bad_weak_ptr();
855 }
856
857 // Now that __weak_count is defined we can define this constructor:
858 template<_Lock_policy _Lp>
859 inline
860 __shared_count<_Lp>::
861 __shared_count(const __weak_count<_Lp>& __r, std::nothrow_t)
862 : _M_pi(__r._M_pi)
863 {
864 if (_M_pi != nullptr)
865 if (!_M_pi->_M_add_ref_lock_nothrow())
866 _M_pi = nullptr;
867 }
868
869#define __cpp_lib_shared_ptr_arrays201603 201603
870
871 // Helper traits for shared_ptr of array:
872
873 // A pointer type Y* is said to be compatible with a pointer type T* when
874 // either Y* is convertible to T* or Y is U[N] and T is U cv [].
875 template<typename _Yp_ptr, typename _Tp_ptr>
876 struct __sp_compatible_with
877 : false_type
878 { };
879
880 template<typename _Yp, typename _Tp>
881 struct __sp_compatible_with<_Yp*, _Tp*>
882 : is_convertible<_Yp*, _Tp*>::type
883 { };
884
885 template<typename _Up, size_t _Nm>
886 struct __sp_compatible_with<_Up(*)[_Nm], _Up(*)[]>
887 : true_type
888 { };
889
890 template<typename _Up, size_t _Nm>
891 struct __sp_compatible_with<_Up(*)[_Nm], const _Up(*)[]>
892 : true_type
893 { };
894
895 template<typename _Up, size_t _Nm>
896 struct __sp_compatible_with<_Up(*)[_Nm], volatile _Up(*)[]>
897 : true_type
898 { };
899
900 template<typename _Up, size_t _Nm>
901 struct __sp_compatible_with<_Up(*)[_Nm], const volatile _Up(*)[]>
902 : true_type
903 { };
904
905 // Test conversion from Y(*)[N] to U(*)[N] without forming invalid type Y[N].
906 template<typename _Up, size_t _Nm, typename _Yp, typename = void>
907 struct __sp_is_constructible_arrN
908 : false_type
909 { };
910
911 template<typename _Up, size_t _Nm, typename _Yp>
912 struct __sp_is_constructible_arrN<_Up, _Nm, _Yp, __void_t<_Yp[_Nm]>>
913 : is_convertible<_Yp(*)[_Nm], _Up(*)[_Nm]>::type
914 { };
915
916 // Test conversion from Y(*)[] to U(*)[] without forming invalid type Y[].
917 template<typename _Up, typename _Yp, typename = void>
918 struct __sp_is_constructible_arr
919 : false_type
920 { };
921
922 template<typename _Up, typename _Yp>
923 struct __sp_is_constructible_arr<_Up, _Yp, __void_t<_Yp[]>>
924 : is_convertible<_Yp(*)[], _Up(*)[]>::type
925 { };
926
927 // Trait to check if shared_ptr<T> can be constructed from Y*.
928 template<typename _Tp, typename _Yp>
929 struct __sp_is_constructible;
930
931 // When T is U[N], Y(*)[N] shall be convertible to T*;
932 template<typename _Up, size_t _Nm, typename _Yp>
933 struct __sp_is_constructible<_Up[_Nm], _Yp>
934 : __sp_is_constructible_arrN<_Up, _Nm, _Yp>::type
935 { };
936
937 // when T is U[], Y(*)[] shall be convertible to T*;
938 template<typename _Up, typename _Yp>
939 struct __sp_is_constructible<_Up[], _Yp>
940 : __sp_is_constructible_arr<_Up, _Yp>::type
941 { };
942
943 // otherwise, Y* shall be convertible to T*.
944 template<typename _Tp, typename _Yp>
945 struct __sp_is_constructible
946 : is_convertible<_Yp*, _Tp*>::type
947 { };
948
949
950 // Define operator* and operator-> for shared_ptr<T>.
951 template<typename _Tp, _Lock_policy _Lp,
952 bool = is_array<_Tp>::value, bool = is_void<_Tp>::value>
953 class __shared_ptr_access
954 {
955 public:
956 using element_type = _Tp;
957
958 element_type&
959 operator*() const noexcept
960 {
961 __glibcxx_assert(_M_get() != nullptr);
962 return *_M_get();
963 }
964
965 element_type*
966 operator->() const noexcept
967 {
968 _GLIBCXX_DEBUG_PEDASSERT(_M_get() != nullptr);
969 return _M_get();
970 }
971
972 private:
973 element_type*
974 _M_get() const noexcept
975 { return static_cast<const __shared_ptr<_Tp, _Lp>*>(this)->get(); }
976 };
977
978 // Define operator-> for shared_ptr<cv void>.
979 template<typename _Tp, _Lock_policy _Lp>
980 class __shared_ptr_access<_Tp, _Lp, false, true>
981 {
982 public:
983 using element_type = _Tp;
984
985 element_type*
986 operator->() const noexcept
987 {
988 auto __ptr = static_cast<const __shared_ptr<_Tp, _Lp>*>(this)->get();
989 _GLIBCXX_DEBUG_PEDASSERT(__ptr != nullptr);
990 return __ptr;
991 }
992 };
993
994 // Define operator[] for shared_ptr<T[]> and shared_ptr<T[N]>.
995 template<typename _Tp, _Lock_policy _Lp>
996 class __shared_ptr_access<_Tp, _Lp, true, false>
997 {
998 public:
999 using element_type = typename remove_extent<_Tp>::type;
1000
1001#if __cplusplus201103L <= 201402L
1002 [[__deprecated__("shared_ptr<T[]>::operator* is absent from C++17")]]
1003 element_type&
1004 operator*() const noexcept
1005 {
1006 __glibcxx_assert(_M_get() != nullptr);
1007 return *_M_get();
1008 }
1009
1010 [[__deprecated__("shared_ptr<T[]>::operator-> is absent from C++17")]]
1011 element_type*
1012 operator->() const noexcept
1013 {
1014 _GLIBCXX_DEBUG_PEDASSERT(_M_get() != nullptr);
1015 return _M_get();
1016 }
1017#endif
1018
1019 element_type&
1020 operator[](ptrdiff_t __i) const
1021 {
1022 __glibcxx_assert(_M_get() != nullptr);
1023 __glibcxx_assert(!extent<_Tp>::value || __i < extent<_Tp>::value);
1024 return _M_get()[__i];
1025 }
1026
1027 private:
1028 element_type*
1029 _M_get() const noexcept
1030 { return static_cast<const __shared_ptr<_Tp, _Lp>*>(this)->get(); }
1031 };
1032
1033 template<typename _Tp, _Lock_policy _Lp>
1034 class __shared_ptr
1035 : public __shared_ptr_access<_Tp, _Lp>
1036 {
1037 public:
1038 using element_type = typename remove_extent<_Tp>::type;
1039
1040 private:
1041 // Constraint for taking ownership of a pointer of type _Yp*:
1042 template<typename _Yp>
1043 using _SafeConv
1044 = typename enable_if<__sp_is_constructible<_Tp, _Yp>::value>::type;
1045
1046 // Constraint for construction from shared_ptr and weak_ptr:
1047 template<typename _Yp, typename _Res = void>
1048 using _Compatible = typename
1049 enable_if<__sp_compatible_with<_Yp*, _Tp*>::value, _Res>::type;
1050
1051 // Constraint for assignment from shared_ptr and weak_ptr:
1052 template<typename _Yp>
1053 using _Assignable = _Compatible<_Yp, __shared_ptr&>;
1054
1055 // Constraint for construction from unique_ptr:
1056 template<typename _Yp, typename _Del, typename _Res = void,
1057 typename _Ptr = typename unique_ptr<_Yp, _Del>::pointer>
1058 using _UniqCompatible = typename enable_if<__and_<
1059 __sp_compatible_with<_Yp*, _Tp*>, is_convertible<_Ptr, element_type*>
1060 >::value, _Res>::type;
1061
1062 // Constraint for assignment from unique_ptr:
1063 template<typename _Yp, typename _Del>
1064 using _UniqAssignable = _UniqCompatible<_Yp, _Del, __shared_ptr&>;
1065
1066 public:
1067
1068#if __cplusplus201103L > 201402L
1069 using weak_type = __weak_ptr<_Tp, _Lp>;
1070#endif
1071
1072 constexpr __shared_ptr() noexcept
1073 : _M_ptr(0), _M_refcount()
1074 { }
1075
1076 template<typename _Yp, typename = _SafeConv<_Yp>>
1077 explicit
1078 __shared_ptr(_Yp* __p)
1079 : _M_ptr(__p), _M_refcount(__p, typename is_array<_Tp>::type())
1080 {
1081 static_assert( !is_void<_Yp>::value, "incomplete type" );
1082 static_assert( sizeof(_Yp) > 0, "incomplete type" );
1083 _M_enable_shared_from_this_with(__p);
1084 }
1085
1086 template<typename _Yp, typename _Deleter, typename = _SafeConv<_Yp>>
1087 __shared_ptr(_Yp* __p, _Deleter __d)
1088 : _M_ptr(__p), _M_refcount(__p, std::move(__d))
1089 {
1090 static_assert(__is_invocable<_Deleter&, _Yp*&>::value,
1091 "deleter expression d(p) is well-formed");
1092 _M_enable_shared_from_this_with(__p);
1093 }
1094
1095 template<typename _Yp, typename _Deleter, typename _Alloc,
1096 typename = _SafeConv<_Yp>>
1097 __shared_ptr(_Yp* __p, _Deleter __d, _Alloc __a)
1098 : _M_ptr(__p), _M_refcount(__p, std::move(__d), std::move(__a))
1099 {
1100 static_assert(__is_invocable<_Deleter&, _Yp*&>::value,
1101 "deleter expression d(p) is well-formed");
1102 _M_enable_shared_from_this_with(__p);
1103 }
1104
1105 template<typename _Deleter>
1106 __shared_ptr(nullptr_t __p, _Deleter __d)
1107 : _M_ptr(0), _M_refcount(__p, std::move(__d))
1108 { }
1109
1110 template<typename _Deleter, typename _Alloc>
1111 __shared_ptr(nullptr_t __p, _Deleter __d, _Alloc __a)
1112 : _M_ptr(0), _M_refcount(__p, std::move(__d), std::move(__a))
1113 { }
1114
1115 template<typename _Yp>
1116 __shared_ptr(const __shared_ptr<_Yp, _Lp>& __r,
1117 element_type* __p) noexcept
1118 : _M_ptr(__p), _M_refcount(__r._M_refcount) // never throws
1119 { }
1120
1121 __shared_ptr(const __shared_ptr&) noexcept = default;
1122 __shared_ptr& operator=(const __shared_ptr&) noexcept = default;
15
Calling copy assignment operator for '__shared_count'
28
Returning; memory was released
39
Calling copy assignment operator for '__shared_count'
1123 ~__shared_ptr() = default;
1124
1125 template<typename _Yp, typename = _Compatible<_Yp>>
1126 __shared_ptr(const __shared_ptr<_Yp, _Lp>& __r) noexcept
1127 : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount)
1128 { }
1129
1130 __shared_ptr(__shared_ptr&& __r) noexcept
1131 : _M_ptr(__r._M_ptr), _M_refcount()
1132 {
1133 _M_refcount._M_swap(__r._M_refcount);
1134 __r._M_ptr = 0;
1135 }
1136
1137 template<typename _Yp, typename = _Compatible<_Yp>>
1138 __shared_ptr(__shared_ptr<_Yp, _Lp>&& __r) noexcept
1139 : _M_ptr(__r._M_ptr), _M_refcount()
1140 {
1141 _M_refcount._M_swap(__r._M_refcount);
1142 __r._M_ptr = 0;
1143 }
1144
1145 template<typename _Yp, typename = _Compatible<_Yp>>
1146 explicit __shared_ptr(const __weak_ptr<_Yp, _Lp>& __r)
1147 : _M_refcount(__r._M_refcount) // may throw
1148 {
1149 // It is now safe to copy __r._M_ptr, as
1150 // _M_refcount(__r._M_refcount) did not throw.
1151 _M_ptr = __r._M_ptr;
1152 }
1153
1154 // If an exception is thrown this constructor has no effect.
1155 template<typename _Yp, typename _Del,
1156 typename = _UniqCompatible<_Yp, _Del>>
1157 __shared_ptr(unique_ptr<_Yp, _Del>&& __r)
1158 : _M_ptr(__r.get()), _M_refcount()
1159 {
1160 auto __raw = _S_raw_ptr(__r.get());
1161 _M_refcount = __shared_count<_Lp>(std::move(__r));
1162 _M_enable_shared_from_this_with(__raw);
1163 }
1164
1165#if __cplusplus201103L <= 201402L && _GLIBCXX_USE_DEPRECATED1
1166 protected:
1167 // If an exception is thrown this constructor has no effect.
1168 template<typename _Tp1, typename _Del,
1169 typename enable_if<__and_<
1170 __not_<is_array<_Tp>>, is_array<_Tp1>,
1171 is_convertible<typename unique_ptr<_Tp1, _Del>::pointer, _Tp*>
1172 >::value, bool>::type = true>
1173 __shared_ptr(unique_ptr<_Tp1, _Del>&& __r, __sp_array_delete)
1174 : _M_ptr(__r.get()), _M_refcount()
1175 {
1176 auto __raw = _S_raw_ptr(__r.get());
1177 _M_refcount = __shared_count<_Lp>(std::move(__r));
1178 _M_enable_shared_from_this_with(__raw);
1179 }
1180 public:
1181#endif
1182
1183#if _GLIBCXX_USE_DEPRECATED1
1184 // Postcondition: use_count() == 1 and __r.get() == 0
1185 template<typename _Yp, typename = _Compatible<_Yp>>
1186 __shared_ptr(auto_ptr<_Yp>&& __r);
1187#endif
1188
1189 constexpr __shared_ptr(nullptr_t) noexcept : __shared_ptr() { }
1190
1191 template<typename _Yp>
1192 _Assignable<_Yp>
1193 operator=(const __shared_ptr<_Yp, _Lp>& __r) noexcept
1194 {
1195 _M_ptr = __r._M_ptr;
1196 _M_refcount = __r._M_refcount; // __shared_count::op= doesn't throw
1197 return *this;
1198 }
1199
1200#if _GLIBCXX_USE_DEPRECATED1
1201 template<typename _Yp>
1202 _Assignable<_Yp>
1203 operator=(auto_ptr<_Yp>&& __r)
1204 {
1205 __shared_ptr(std::move(__r)).swap(*this);
1206 return *this;
1207 }
1208#endif
1209
1210 __shared_ptr&
1211 operator=(__shared_ptr&& __r) noexcept
1212 {
1213 __shared_ptr(std::move(__r)).swap(*this);
1214 return *this;
1215 }
1216
1217 template<class _Yp>
1218 _Assignable<_Yp>
1219 operator=(__shared_ptr<_Yp, _Lp>&& __r) noexcept
1220 {
1221 __shared_ptr(std::move(__r)).swap(*this);
1222 return *this;
1223 }
1224
1225 template<typename _Yp, typename _Del>
1226 _UniqAssignable<_Yp, _Del>
1227 operator=(unique_ptr<_Yp, _Del>&& __r)
1228 {
1229 __shared_ptr(std::move(__r)).swap(*this);
1230 return *this;
1231 }
1232
1233 void
1234 reset() noexcept
1235 { __shared_ptr().swap(*this); }
1236
1237 template<typename _Yp>
1238 _SafeConv<_Yp>
1239 reset(_Yp* __p) // _Yp must be complete.
1240 {
1241 // Catch self-reset errors.
1242 __glibcxx_assert(__p == 0 || __p != _M_ptr);
1243 __shared_ptr(__p).swap(*this);
1244 }
1245
1246 template<typename _Yp, typename _Deleter>
1247 _SafeConv<_Yp>
1248 reset(_Yp* __p, _Deleter __d)
1249 { __shared_ptr(__p, std::move(__d)).swap(*this); }
1250
1251 template<typename _Yp, typename _Deleter, typename _Alloc>
1252 _SafeConv<_Yp>
1253 reset(_Yp* __p, _Deleter __d, _Alloc __a)
1254 { __shared_ptr(__p, std::move(__d), std::move(__a)).swap(*this); }
1255
1256 element_type*
1257 get() const noexcept
1258 { return _M_ptr; }
1259
1260 explicit operator bool() const // never throws
1261 { return _M_ptr == 0 ? false : true; }
1262
1263 bool
1264 unique() const noexcept
1265 { return _M_refcount._M_unique(); }
1266
1267 long
1268 use_count() const noexcept
1269 { return _M_refcount._M_get_use_count(); }
1270
1271 void
1272 swap(__shared_ptr<_Tp, _Lp>& __other) noexcept
1273 {
1274 std::swap(_M_ptr, __other._M_ptr);
1275 _M_refcount._M_swap(__other._M_refcount);
1276 }
1277
1278 template<typename _Tp1>
1279 bool
1280 owner_before(__shared_ptr<_Tp1, _Lp> const& __rhs) const noexcept
1281 { return _M_refcount._M_less(__rhs._M_refcount); }
1282
1283 template<typename _Tp1>
1284 bool
1285 owner_before(__weak_ptr<_Tp1, _Lp> const& __rhs) const noexcept
1286 { return _M_refcount._M_less(__rhs._M_refcount); }
1287
1288#if __cpp_rtti199711
1289 protected:
1290 // This constructor is non-standard, it is used by allocate_shared.
1291 template<typename _Alloc, typename... _Args>
1292 __shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
1293 _Args&&... __args)
1294 : _M_ptr(), _M_refcount(__tag, (_Tp*)0, __a,
1295 std::forward<_Args>(__args)...)
1296 {
1297 // _M_ptr needs to point to the newly constructed object.
1298 // This relies on _Sp_counted_ptr_inplace::_M_get_deleter.
1299 void* __p = _M_refcount._M_get_deleter(typeid(__tag));
1300 _M_ptr = static_cast<_Tp*>(__p);
1301 _M_enable_shared_from_this_with(_M_ptr);
1302 }
1303#else
1304 template<typename _Alloc>
1305 struct _Deleter
1306 {
1307 void operator()(typename _Alloc::value_type* __ptr)
1308 {
1309 __allocated_ptr<_Alloc> __guard{ _M_alloc, __ptr };
1310 allocator_traits<_Alloc>::destroy(_M_alloc, __guard.get());
1311 }
1312 _Alloc _M_alloc;
1313 };
1314
1315 template<typename _Alloc, typename... _Args>
1316 __shared_ptr(_Sp_make_shared_tag __tag, const _Alloc& __a,
1317 _Args&&... __args)
1318 : _M_ptr(), _M_refcount()
1319 {
1320 typedef typename allocator_traits<_Alloc>::template
1321 rebind_traits<typename std::remove_cv<_Tp>::type> __traits;
1322 _Deleter<typename __traits::allocator_type> __del = { __a };
1323 auto __guard = std::__allocate_guarded(__del._M_alloc);
1324 auto __ptr = __guard.get();
1325 // _GLIBCXX_RESOLVE_LIB_DEFECTS
1326 // 2070. allocate_shared should use allocator_traits<A>::construct
1327 __traits::construct(__del._M_alloc, __ptr,
1328 std::forward<_Args>(__args)...);
1329 __guard = nullptr;
1330 __shared_count<_Lp> __count(__ptr, __del, __del._M_alloc);
1331 _M_refcount._M_swap(__count);
1332 _M_ptr = __ptr;
1333 _M_enable_shared_from_this_with(_M_ptr);
1334 }
1335#endif
1336
1337 template<typename _Tp1, _Lock_policy _Lp1, typename _Alloc,
1338 typename... _Args>
1339 friend __shared_ptr<_Tp1, _Lp1>
1340 __allocate_shared(const _Alloc& __a, _Args&&... __args);
1341
1342 // This constructor is used by __weak_ptr::lock() and
1343 // shared_ptr::shared_ptr(const weak_ptr&, std::nothrow_t).
1344 __shared_ptr(const __weak_ptr<_Tp, _Lp>& __r, std::nothrow_t)
1345 : _M_refcount(__r._M_refcount, std::nothrow)
1346 {
1347 _M_ptr = _M_refcount._M_get_use_count() ? __r._M_ptr : nullptr;
1348 }
1349
1350 friend class __weak_ptr<_Tp, _Lp>;
1351
1352 private:
1353
1354 template<typename _Yp>
1355 using __esft_base_t = decltype(__enable_shared_from_this_base(
1356 std::declval<const __shared_count<_Lp>&>(),
1357 std::declval<_Yp*>()));
1358
1359 // Detect an accessible and unambiguous enable_shared_from_this base.
1360 template<typename _Yp, typename = void>
1361 struct __has_esft_base
1362 : false_type { };
1363
1364 template<typename _Yp>
1365 struct __has_esft_base<_Yp, __void_t<__esft_base_t<_Yp>>>
1366 : __not_<is_array<_Tp>> { }; // No enable shared_from_this for arrays
1367
1368 template<typename _Yp, typename _Yp2 = typename remove_cv<_Yp>::type>
1369 typename enable_if<__has_esft_base<_Yp2>::value>::type
1370 _M_enable_shared_from_this_with(_Yp* __p) noexcept
1371 {
1372 if (auto __base = __enable_shared_from_this_base(_M_refcount, __p))
1373 __base->_M_weak_assign(const_cast<_Yp2*>(__p), _M_refcount);
1374 }
1375
1376 template<typename _Yp, typename _Yp2 = typename remove_cv<_Yp>::type>
1377 typename enable_if<!__has_esft_base<_Yp2>::value>::type
1378 _M_enable_shared_from_this_with(_Yp*) noexcept
1379 { }
1380
1381 void*
1382 _M_get_deleter(const std::type_info& __ti) const noexcept
1383 { return _M_refcount._M_get_deleter(__ti); }
1384
1385 template<typename _Tp1>
1386 static _Tp1*
1387 _S_raw_ptr(_Tp1* __ptr)
1388 { return __ptr; }
1389
1390 template<typename _Tp1>
1391 static auto
1392 _S_raw_ptr(_Tp1 __ptr) -> decltype(std::__addressof(*__ptr))
1393 { return std::__addressof(*__ptr); }
1394
1395 template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
1396 template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
1397
1398 template<typename _Del, typename _Tp1, _Lock_policy _Lp1>
1399 friend _Del* get_deleter(const __shared_ptr<_Tp1, _Lp1>&) noexcept;
1400
1401 element_type* _M_ptr; // Contained pointer.
1402 __shared_count<_Lp> _M_refcount; // Reference counter.
1403 };
1404
1405
1406 // 20.7.2.2.7 shared_ptr comparisons
1407 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1408 inline bool
1409 operator==(const __shared_ptr<_Tp1, _Lp>& __a,
1410 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1411 { return __a.get() == __b.get(); }
1412
1413 template<typename _Tp, _Lock_policy _Lp>
1414 inline bool
1415 operator==(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1416 { return !__a; }
1417
1418 template<typename _Tp, _Lock_policy _Lp>
1419 inline bool
1420 operator==(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1421 { return !__a; }
1422
1423 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1424 inline bool
1425 operator!=(const __shared_ptr<_Tp1, _Lp>& __a,
1426 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1427 { return __a.get() != __b.get(); }
1428
1429 template<typename _Tp, _Lock_policy _Lp>
1430 inline bool
1431 operator!=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1432 { return (bool)__a; }
1433
1434 template<typename _Tp, _Lock_policy _Lp>
1435 inline bool
1436 operator!=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1437 { return (bool)__a; }
1438
1439 template<typename _Tp, typename _Up, _Lock_policy _Lp>
1440 inline bool
1441 operator<(const __shared_ptr<_Tp, _Lp>& __a,
1442 const __shared_ptr<_Up, _Lp>& __b) noexcept
1443 {
1444 using _Tp_elt = typename __shared_ptr<_Tp, _Lp>::element_type;
1445 using _Up_elt = typename __shared_ptr<_Up, _Lp>::element_type;
1446 using _Vp = typename common_type<_Tp_elt*, _Up_elt*>::type;
1447 return less<_Vp>()(__a.get(), __b.get());
1448 }
1449
1450 template<typename _Tp, _Lock_policy _Lp>
1451 inline bool
1452 operator<(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1453 {
1454 using _Tp_elt = typename __shared_ptr<_Tp, _Lp>::element_type;
1455 return less<_Tp_elt*>()(__a.get(), nullptr);
1456 }
1457
1458 template<typename _Tp, _Lock_policy _Lp>
1459 inline bool
1460 operator<(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1461 {
1462 using _Tp_elt = typename __shared_ptr<_Tp, _Lp>::element_type;
1463 return less<_Tp_elt*>()(nullptr, __a.get());
1464 }
1465
1466 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1467 inline bool
1468 operator<=(const __shared_ptr<_Tp1, _Lp>& __a,
1469 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1470 { return !(__b < __a); }
1471
1472 template<typename _Tp, _Lock_policy _Lp>
1473 inline bool
1474 operator<=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1475 { return !(nullptr < __a); }
1476
1477 template<typename _Tp, _Lock_policy _Lp>
1478 inline bool
1479 operator<=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1480 { return !(__a < nullptr); }
1481
1482 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1483 inline bool
1484 operator>(const __shared_ptr<_Tp1, _Lp>& __a,
1485 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1486 { return (__b < __a); }
1487
1488 template<typename _Tp, _Lock_policy _Lp>
1489 inline bool
1490 operator>(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1491 { return nullptr < __a; }
1492
1493 template<typename _Tp, _Lock_policy _Lp>
1494 inline bool
1495 operator>(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1496 { return __a < nullptr; }
1497
1498 template<typename _Tp1, typename _Tp2, _Lock_policy _Lp>
1499 inline bool
1500 operator>=(const __shared_ptr<_Tp1, _Lp>& __a,
1501 const __shared_ptr<_Tp2, _Lp>& __b) noexcept
1502 { return !(__a < __b); }
1503
1504 template<typename _Tp, _Lock_policy _Lp>
1505 inline bool
1506 operator>=(const __shared_ptr<_Tp, _Lp>& __a, nullptr_t) noexcept
1507 { return !(__a < nullptr); }
1508
1509 template<typename _Tp, _Lock_policy _Lp>
1510 inline bool
1511 operator>=(nullptr_t, const __shared_ptr<_Tp, _Lp>& __a) noexcept
1512 { return !(nullptr < __a); }
1513
1514 template<typename _Sp>
1515 struct _Sp_less : public binary_function<_Sp, _Sp, bool>
1516 {
1517 bool
1518 operator()(const _Sp& __lhs, const _Sp& __rhs) const noexcept
1519 {
1520 typedef typename _Sp::element_type element_type;
1521 return std::less<element_type*>()(__lhs.get(), __rhs.get());
1522 }
1523 };
1524
1525 template<typename _Tp, _Lock_policy _Lp>
1526 struct less<__shared_ptr<_Tp, _Lp>>
1527 : public _Sp_less<__shared_ptr<_Tp, _Lp>>
1528 { };
1529
1530 // 20.7.2.2.8 shared_ptr specialized algorithms.
1531 template<typename _Tp, _Lock_policy _Lp>
1532 inline void
1533 swap(__shared_ptr<_Tp, _Lp>& __a, __shared_ptr<_Tp, _Lp>& __b) noexcept
1534 { __a.swap(__b); }
1535
1536 // 20.7.2.2.9 shared_ptr casts
1537
1538 // The seemingly equivalent code:
1539 // shared_ptr<_Tp, _Lp>(static_cast<_Tp*>(__r.get()))
1540 // will eventually result in undefined behaviour, attempting to
1541 // delete the same object twice.
1542 /// static_pointer_cast
1543 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1544 inline __shared_ptr<_Tp, _Lp>
1545 static_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1546 {
1547 using _Sp = __shared_ptr<_Tp, _Lp>;
1548 return _Sp(__r, static_cast<typename _Sp::element_type*>(__r.get()));
1549 }
1550
1551 // The seemingly equivalent code:
1552 // shared_ptr<_Tp, _Lp>(const_cast<_Tp*>(__r.get()))
1553 // will eventually result in undefined behaviour, attempting to
1554 // delete the same object twice.
1555 /// const_pointer_cast
1556 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1557 inline __shared_ptr<_Tp, _Lp>
1558 const_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1559 {
1560 using _Sp = __shared_ptr<_Tp, _Lp>;
1561 return _Sp(__r, const_cast<typename _Sp::element_type*>(__r.get()));
1562 }
1563
1564 // The seemingly equivalent code:
1565 // shared_ptr<_Tp, _Lp>(dynamic_cast<_Tp*>(__r.get()))
1566 // will eventually result in undefined behaviour, attempting to
1567 // delete the same object twice.
1568 /// dynamic_pointer_cast
1569 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1570 inline __shared_ptr<_Tp, _Lp>
1571 dynamic_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1572 {
1573 using _Sp = __shared_ptr<_Tp, _Lp>;
1574 if (auto* __p = dynamic_cast<typename _Sp::element_type*>(__r.get()))
1575 return _Sp(__r, __p);
1576 return _Sp();
1577 }
1578
1579#if __cplusplus201103L > 201402L
1580 template<typename _Tp, typename _Tp1, _Lock_policy _Lp>
1581 inline __shared_ptr<_Tp, _Lp>
1582 reinterpret_pointer_cast(const __shared_ptr<_Tp1, _Lp>& __r) noexcept
1583 {
1584 using _Sp = __shared_ptr<_Tp, _Lp>;
1585 return _Sp(__r, reinterpret_cast<typename _Sp::element_type*>(__r.get()));
1586 }
1587#endif
1588
1589 template<typename _Tp, _Lock_policy _Lp>
1590 class __weak_ptr
1591 {
1592 template<typename _Yp, typename _Res = void>
1593 using _Compatible = typename
1594 enable_if<__sp_compatible_with<_Yp*, _Tp*>::value, _Res>::type;
1595
1596 // Constraint for assignment from shared_ptr and weak_ptr:
1597 template<typename _Yp>
1598 using _Assignable = _Compatible<_Yp, __weak_ptr&>;
1599
1600 public:
1601 using element_type = typename remove_extent<_Tp>::type;
1602
1603 constexpr __weak_ptr() noexcept
1604 : _M_ptr(nullptr), _M_refcount()
1605 { }
1606
1607 __weak_ptr(const __weak_ptr&) noexcept = default;
1608
1609 ~__weak_ptr() = default;
1610
1611 // The "obvious" converting constructor implementation:
1612 //
1613 // template<typename _Tp1>
1614 // __weak_ptr(const __weak_ptr<_Tp1, _Lp>& __r)
1615 // : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount) // never throws
1616 // { }
1617 //
1618 // has a serious problem.
1619 //
1620 // __r._M_ptr may already have been invalidated. The _M_ptr(__r._M_ptr)
1621 // conversion may require access to *__r._M_ptr (virtual inheritance).
1622 //
1623 // It is not possible to avoid spurious access violations since
1624 // in multithreaded programs __r._M_ptr may be invalidated at any point.
1625 template<typename _Yp, typename = _Compatible<_Yp>>
1626 __weak_ptr(const __weak_ptr<_Yp, _Lp>& __r) noexcept
1627 : _M_refcount(__r._M_refcount)
1628 { _M_ptr = __r.lock().get(); }
1629
1630 template<typename _Yp, typename = _Compatible<_Yp>>
1631 __weak_ptr(const __shared_ptr<_Yp, _Lp>& __r) noexcept
1632 : _M_ptr(__r._M_ptr), _M_refcount(__r._M_refcount)
1633 { }
1634
1635 __weak_ptr(__weak_ptr&& __r) noexcept
1636 : _M_ptr(__r._M_ptr), _M_refcount(std::move(__r._M_refcount))
1637 { __r._M_ptr = nullptr; }
1638
1639 template<typename _Yp, typename = _Compatible<_Yp>>
1640 __weak_ptr(__weak_ptr<_Yp, _Lp>&& __r) noexcept
1641 : _M_ptr(__r.lock().get()), _M_refcount(std::move(__r._M_refcount))
1642 { __r._M_ptr = nullptr; }
1643
1644 __weak_ptr&
1645 operator=(const __weak_ptr& __r) noexcept = default;
1646
1647 template<typename _Yp>
1648 _Assignable<_Yp>
1649 operator=(const __weak_ptr<_Yp, _Lp>& __r) noexcept
1650 {
1651 _M_ptr = __r.lock().get();
1652 _M_refcount = __r._M_refcount;
1653 return *this;
1654 }
1655
1656 template<typename _Yp>
1657 _Assignable<_Yp>
1658 operator=(const __shared_ptr<_Yp, _Lp>& __r) noexcept
1659 {
1660 _M_ptr = __r._M_ptr;
1661 _M_refcount = __r._M_refcount;
1662 return *this;
1663 }
1664
1665 __weak_ptr&
1666 operator=(__weak_ptr&& __r) noexcept
1667 {
1668 _M_ptr = __r._M_ptr;
1669 _M_refcount = std::move(__r._M_refcount);
1670 __r._M_ptr = nullptr;
1671 return *this;
1672 }
1673
1674 template<typename _Yp>
1675 _Assignable<_Yp>
1676 operator=(__weak_ptr<_Yp, _Lp>&& __r) noexcept
1677 {
1678 _M_ptr = __r.lock().get();
1679 _M_refcount = std::move(__r._M_refcount);
1680 __r._M_ptr = nullptr;
1681 return *this;
1682 }
1683
1684 __shared_ptr<_Tp, _Lp>
1685 lock() const noexcept
1686 { return __shared_ptr<element_type, _Lp>(*this, std::nothrow); }
1687
1688 long
1689 use_count() const noexcept
1690 { return _M_refcount._M_get_use_count(); }
1691
1692 bool
1693 expired() const noexcept
1694 { return _M_refcount._M_get_use_count() == 0; }
1695
1696 template<typename _Tp1>
1697 bool
1698 owner_before(const __shared_ptr<_Tp1, _Lp>& __rhs) const noexcept
1699 { return _M_refcount._M_less(__rhs._M_refcount); }
1700
1701 template<typename _Tp1>
1702 bool
1703 owner_before(const __weak_ptr<_Tp1, _Lp>& __rhs) const noexcept
1704 { return _M_refcount._M_less(__rhs._M_refcount); }
1705
1706 void
1707 reset() noexcept
1708 { __weak_ptr().swap(*this); }
1709
1710 void
1711 swap(__weak_ptr& __s) noexcept
1712 {
1713 std::swap(_M_ptr, __s._M_ptr);
1714 _M_refcount._M_swap(__s._M_refcount);
1715 }
1716
1717 private:
1718 // Used by __enable_shared_from_this.
1719 void
1720 _M_assign(_Tp* __ptr, const __shared_count<_Lp>& __refcount) noexcept
1721 {
1722 if (use_count() == 0)
1723 {
1724 _M_ptr = __ptr;
1725 _M_refcount = __refcount;
1726 }
1727 }
1728
1729 template<typename _Tp1, _Lock_policy _Lp1> friend class __shared_ptr;
1730 template<typename _Tp1, _Lock_policy _Lp1> friend class __weak_ptr;
1731 friend class __enable_shared_from_this<_Tp, _Lp>;
1732 friend class enable_shared_from_this<_Tp>;
1733
1734 element_type* _M_ptr; // Contained pointer.
1735 __weak_count<_Lp> _M_refcount; // Reference counter.
1736 };
1737
1738 // 20.7.2.3.6 weak_ptr specialized algorithms.
1739 template<typename _Tp, _Lock_policy _Lp>
1740 inline void
1741 swap(__weak_ptr<_Tp, _Lp>& __a, __weak_ptr<_Tp, _Lp>& __b) noexcept
1742 { __a.swap(__b); }
1743
1744 template<typename _Tp, typename _Tp1>
1745 struct _Sp_owner_less : public binary_function<_Tp, _Tp, bool>
1746 {
1747 bool
1748 operator()(const _Tp& __lhs, const _Tp& __rhs) const noexcept
1749 { return __lhs.owner_before(__rhs); }
1750
1751 bool
1752 operator()(const _Tp& __lhs, const _Tp1& __rhs) const noexcept
1753 { return __lhs.owner_before(__rhs); }
1754
1755 bool
1756 operator()(const _Tp1& __lhs, const _Tp& __rhs) const noexcept
1757 { return __lhs.owner_before(__rhs); }
1758 };
1759
1760 template<>
1761 struct _Sp_owner_less<void, void>
1762 {
1763 template<typename _Tp, typename _Up>
1764 auto
1765 operator()(const _Tp& __lhs, const _Up& __rhs) const noexcept
1766 -> decltype(__lhs.owner_before(__rhs))
1767 { return __lhs.owner_before(__rhs); }
1768
1769 using is_transparent = void;
1770 };
1771
1772 template<typename _Tp, _Lock_policy _Lp>
1773 struct owner_less<__shared_ptr<_Tp, _Lp>>
1774 : public _Sp_owner_less<__shared_ptr<_Tp, _Lp>, __weak_ptr<_Tp, _Lp>>
1775 { };
1776
1777 template<typename _Tp, _Lock_policy _Lp>
1778 struct owner_less<__weak_ptr<_Tp, _Lp>>
1779 : public _Sp_owner_less<__weak_ptr<_Tp, _Lp>, __shared_ptr<_Tp, _Lp>>
1780 { };
1781
1782
1783 template<typename _Tp, _Lock_policy _Lp>
1784 class __enable_shared_from_this
1785 {
1786 protected:
1787 constexpr __enable_shared_from_this() noexcept { }
1788
1789 __enable_shared_from_this(const __enable_shared_from_this&) noexcept { }
1790
1791 __enable_shared_from_this&
1792 operator=(const __enable_shared_from_this&) noexcept
1793 { return *this; }
1794
1795 ~__enable_shared_from_this() { }
1796
1797 public:
1798 __shared_ptr<_Tp, _Lp>
1799 shared_from_this()
1800 { return __shared_ptr<_Tp, _Lp>(this->_M_weak_this); }
1801
1802 __shared_ptr<const _Tp, _Lp>
1803 shared_from_this() const
1804 { return __shared_ptr<const _Tp, _Lp>(this->_M_weak_this); }
1805
1806#if __cplusplus201103L > 201402L || !defined(__STRICT_ANSI__1) // c++1z or gnu++11
1807 __weak_ptr<_Tp, _Lp>
1808 weak_from_this() noexcept
1809 { return this->_M_weak_this; }
1810
1811 __weak_ptr<const _Tp, _Lp>
1812 weak_from_this() const noexcept
1813 { return this->_M_weak_this; }
1814#endif
1815
1816 private:
1817 template<typename _Tp1>
1818 void
1819 _M_weak_assign(_Tp1* __p, const __shared_count<_Lp>& __n) const noexcept
1820 { _M_weak_this._M_assign(__p, __n); }
1821
1822 friend const __enable_shared_from_this*
1823 __enable_shared_from_this_base(const __shared_count<_Lp>&,
1824 const __enable_shared_from_this* __p)
1825 { return __p; }
1826
1827 template<typename, _Lock_policy>
1828 friend class __shared_ptr;
1829
1830 mutable __weak_ptr<_Tp, _Lp> _M_weak_this;
1831 };
1832
1833 template<typename _Tp, _Lock_policy _Lp, typename _Alloc, typename... _Args>
1834 inline __shared_ptr<_Tp, _Lp>
1835 __allocate_shared(const _Alloc& __a, _Args&&... __args)
1836 {
1837 return __shared_ptr<_Tp, _Lp>(_Sp_make_shared_tag(), __a,
1838 std::forward<_Args>(__args)...);
1839 }
1840
1841 template<typename _Tp, _Lock_policy _Lp, typename... _Args>
1842 inline __shared_ptr<_Tp, _Lp>
1843 __make_shared(_Args&&... __args)
1844 {
1845 typedef typename std::remove_const<_Tp>::type _Tp_nc;
1846 return std::__allocate_shared<_Tp, _Lp>(std::allocator<_Tp_nc>(),
1847 std::forward<_Args>(__args)...);
1848 }
1849
1850 /// std::hash specialization for __shared_ptr.
1851 template<typename _Tp, _Lock_policy _Lp>
1852 struct hash<__shared_ptr<_Tp, _Lp>>
1853 : public __hash_base<size_t, __shared_ptr<_Tp, _Lp>>
1854 {
1855 size_t
1856 operator()(const __shared_ptr<_Tp, _Lp>& __s) const noexcept
1857 {
1858 return hash<typename __shared_ptr<_Tp, _Lp>::element_type*>()(
1859 __s.get());
1860 }
1861 };
1862
1863_GLIBCXX_END_NAMESPACE_VERSION
1864} // namespace
1865
1866#endif // _SHARED_PTR_BASE_H

/usr/lib/gcc/x86_64-linux-gnu/7.3.0/../../../../include/c++/7.3.0/ext/atomicity.h

1// Support for atomic operations -*- C++ -*-
2
3// Copyright (C) 2004-2017 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file ext/atomicity.h
26 * This file is a GNU extension to the Standard C++ Library.
27 */
28
29#ifndef _GLIBCXX_ATOMICITY_H1
30#define _GLIBCXX_ATOMICITY_H1 1
31
32#pragma GCC system_header
33
34#include <bits/c++config.h>
35#include <bits/gthr.h>
36#include <bits/atomic_word.h>
37
38namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)__attribute__ ((__visibility__ ("default")))
39{
40_GLIBCXX_BEGIN_NAMESPACE_VERSION
41
42 // Functions for portable atomic access.
43 // To abstract locking primitives across all thread policies, use:
44 // __exchange_and_add_dispatch
45 // __atomic_add_dispatch
46#ifdef _GLIBCXX_ATOMIC_BUILTINS1
47 static inline _Atomic_word
48 __exchange_and_add(volatile _Atomic_word* __mem, int __val)
49 { return __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL4); }
50
51 static inline void
52 __atomic_add(volatile _Atomic_word* __mem, int __val)
53 { __atomic_fetch_add(__mem, __val, __ATOMIC_ACQ_REL4); }
54#else
55 _Atomic_word
56 __attribute__ ((__unused__))
57 __exchange_and_add(volatile _Atomic_word*, int) throw ();
58
59 void
60 __attribute__ ((__unused__))
61 __atomic_add(volatile _Atomic_word*, int) throw ();
62#endif
63
64 static inline _Atomic_word
65 __exchange_and_add_single(_Atomic_word* __mem, int __val)
66 {
67 _Atomic_word __result = *__mem;
49
Use of memory after it is freed
68 *__mem += __val;
69 return __result;
70 }
71
72 static inline void
73 __atomic_add_single(_Atomic_word* __mem, int __val)
74 { *__mem += __val; }
75
76 static inline _Atomic_word
77 __attribute__ ((__unused__))
78 __exchange_and_add_dispatch(_Atomic_word* __mem, int __val)
79 {
80#ifdef __GTHREADS1
81 if (__gthread_active_p())
46
Assuming the condition is false
47
Taking false branch
82 return __exchange_and_add(__mem, __val);
83 else
84 return __exchange_and_add_single(__mem, __val);
48
Calling '__exchange_and_add_single'
85#else
86 return __exchange_and_add_single(__mem, __val);
87#endif
88 }
89
90 static inline void
91 __attribute__ ((__unused__))
92 __atomic_add_dispatch(_Atomic_word* __mem, int __val)
93 {
94#ifdef __GTHREADS1
95 if (__gthread_active_p())
96 __atomic_add(__mem, __val);
97 else
98 __atomic_add_single(__mem, __val);
99#else
100 __atomic_add_single(__mem, __val);
101#endif
102 }
103
104_GLIBCXX_END_NAMESPACE_VERSION
105} // namespace
106
107// Even if the CPU doesn't need a memory barrier, we need to ensure
108// that the compiler doesn't reorder memory accesses across the
109// barriers.
110#ifndef _GLIBCXX_READ_MEM_BARRIER__atomic_thread_fence (2)
111#define _GLIBCXX_READ_MEM_BARRIER__atomic_thread_fence (2) __atomic_thread_fence (__ATOMIC_ACQUIRE2)
112#endif
113#ifndef _GLIBCXX_WRITE_MEM_BARRIER__atomic_thread_fence (3)
114#define _GLIBCXX_WRITE_MEM_BARRIER__atomic_thread_fence (3) __atomic_thread_fence (__ATOMIC_RELEASE3)
115#endif
116
117#endif